Epitrocoid in Python is not giving proper plot - python

I wrote the following code in C#:
using System;
using System.Drawing;
using ZedGraph;
namespace _1_12_Epitrocoid
{
class Epitrocoid
{
double A = 1.0;
double a = 0.4;
double λ = 1.4;
public double X(double ϕ)
{
return (A + a) * Math.Cos(ϕ) - λ * a * Math.Cos(((A + a) / a) * ϕ);
}
public double Y(double ϕ)
{
return (A + a) * Math.Sin(ϕ) - λ * a * Math.Sin(((A + a) / a) * ϕ);
}
}
class Program
{
static void Main(string[] args)
{
Epitrocoid e = new Epitrocoid();
PointPairList list = new PointPairList();
for (double ϕ = 0; ϕ < 10; ϕ += 0.01)
{
double x = e.X(ϕ);
double y = e.Y(ϕ);
list.Add(x, y);
}
PlotForm f = new PlotForm("Epitrocoid");
f.Add(list, "Epitrocoid", Color.Black);
f.AxisChange();
f.ShowDialog();
Console.ReadLine();
}
}
}
I converted this source code into Python like the following:
import math
import matplotlib.pyplot as plt
A = 1.0;
a = 0.4;
λ = 1.4;
def X(ϕ):
return (A + a) * math.cos(ϕ) - λ * a * math.cos(((A + a) / a) * ϕ);
def Y(ϕ):
return (A + a) * math.sin(ϕ) - λ * a * math.sin(((A + a) / a) * ϕ);
x_list = []
y_list = []
for i in range(0, 1001, 1):
ϕ = i / 1000.0
x_list.append(X(ϕ))
y_list.append(Y(ϕ))
print(len(x_list))
print(len(y_list))
plt.plot(x_list, y_list)
Can someone tell me what thing is going wrong here?

You didn't construct your iterator the same in Python (up to phi = 1) as you did in C# (up to phi = 10).
Using the right phi gets you there with Python. Also, using numpy simplifies things quite a bit.
import numpy
from matplotlib import pyplot
A = 1.0;
a = 0.4;
λ = 1.4;
def X(ϕ):
return (A + a) * numpy.cos(ϕ) - λ * a * numpy.cos(((A + a) / a) * ϕ);
def Y(ϕ):
return (A + a) * numpy.sin(ϕ) - λ * a * numpy.sin(((A + a) / a) * ϕ);
ϕ = numpy.arange(0, 10, 0.01)
x = X(ϕ)
y = Y(ϕ)
fig, ax = pyplot.subplots()
ax.plot(x, y)
ax.set_aspect('equal')

Related

How to generate random points within a circular area, with higher density near the center?

We want the probability of a point being generated to be higher the closer to the center the point is. Therefore, most of the points would gather around the center of the circle.
So far, I have done something like this:
t = random.random() * 2 * math.pi
r = random.random() * radius
new_x = x_center + r * math.cos(t)
new_y = y_center + r * math.sin(t)
Your algorithm already results in a higher density at the center.
You can influence the density by raising random.random() to a variable power where you use it for defining r
So for instance, you could do this:
attraction = 3 # play with this value
t = random.random() * 2 * math.pi
r = random.random() ** attraction * radius # <--
new_x = x_center + r * math.cos(t)
new_y = y_center + r * math.sin(t)
Here is an interactive demo of the algorithm in JavaScript, where you can play with the additional parameter and see what it gives:
let x_center = 80,
y_center = 80,
radius = 80;
function refresh(attraction=1) {
clear();
// Plot 10^4 random points in a disc
for (let i = 0; i < 10000; i++) {
let t = Math.random() * 2 * Math.PI
let r = Math.random() ** attraction * radius
let new_x = x_center + r * Math.cos(t)
let new_y = y_center + r * Math.sin(t)
plot(new_x, new_y);
}
}
// I/O handling (would be different in Python)
let input = document.querySelector("input");
let canvas = document.querySelector("canvas");
let ctx = canvas.getContext("2d");
function plot(x, y) {
ctx.beginPath();
ctx.moveTo(x, y);
ctx.lineTo(x + 0.5, y + 0.5);
ctx.stroke();
}
function clear() {
ctx.clearRect(0, 0, canvas.width, canvas.height);
}
input.addEventListener("input", () => refresh(input.value));
refresh(input.value);
input { width: 4em }
div { text-align: center }
<input type="number" value="1.0" step="0.1"><br>
<div><canvas width="160" height="160"></canvas></div>

Random number generators C++ and Python

I have programmed a model in both C++ and Python. This model has a noisy-input component, which I can replace with this C++:
double doubleRand() {
thread_local std::mt19937 generator(std::random_device{}());
std::normal_distribution<double> distribution(0.0, 1.0);
return distribution(generator);
}
Or this Python:
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0]
The following is very strange:
If I omit the noisy component (Inoise=0), then both models (C++ as well as Python) give exactly the same result. If I only introduce the noisy component (Istim=0), then both models give results (i.e. natural fluctuations that hardly differ from each other at 1000 runs). However, if I choose Istim=0.000001 and add noise, then the results differ by 30%. How is that possible?
Here is the full code. C++:
#include<math.h>
#include<iostream>
#include<random>
#include<vector>
#include<algorithm>
#include<fstream>
#include<omp.h>
#include <iomanip>
#include <assert.h>
// parameters
constexpr double v_Rest = -65.0;
constexpr double gNa = 1200.0;
constexpr double gK = 360.0;
constexpr double gL = 3.0;
constexpr double vNa = 115.0;
constexpr double vK = -12.0;
constexpr double vL = 10.6;
constexpr double c = 1.0;
constexpr double knoise = 0.0005;
bool print = false;
bool bisection = false;
bool test = true;
// stepsize PFs
constexpr int steps = 5;
double store[steps];
int prob[steps];
double step[steps];
// time constants
constexpr double t_end = 1.0;
constexpr double delay = 0.1;
constexpr double duration = 0.1;
constexpr double dt = 0.0025;
constexpr int t_steps = t_end/dt;
constexpr int runs = 1000;
double voltage[t_steps];
double doubleRand() {
thread_local std::mt19937 engine(std::random_device{}());
std::normal_distribution<double> distribution(0.0, 1.0);
return distribution(engine);
}
double alphaM(const double v){ return 12.0 * ((2.5 - 0.1 * (v)) / (exp(2.5 - 0.1 * (v)) - 1.0)); }
double betaM(const double v){ return 12.0 * (4.0 * exp(-(v) / 18.0)); }
double betaH(const double v){ return 12.0 * (1.0 / (exp(3.0 - 0.1 * (v)) + 1.0)); }
double alphaH(const double v){ return 12.0 * (0.07 * exp(-(v) / 20.0)); }
double alphaN(const double v){ return 12.0 * ((1.0 - 0.1 * (v)) / (10.0 * (exp(1.0 - 0.1 * (v)) - 1.0))); }
double betaN(const double v){ return 12.0 * (0.125 * exp(-(v) / 80.0)); }
double HH_model(const double I, const double area_factor){
const double A = 1.0e-8 * area_factor;
const double C = c*A;
const double v0 = 0.0;
const double m0 = alphaM(v0)/(alphaM(v0)+betaM(v0));
const double h0 = alphaH(v0)/(alphaH(v0)+betaH(v0));
const double n0 = alphaN(v0)/(alphaN(v0)+betaN(v0));
int count = 0;
for(int j=0; j<runs; j++){
double vT = v0;
double mT = m0;
double hT = h0;
double nT = n0;
for(int i=0; i<t_steps; i++){
double IStim = 0.0;
if ((delay / dt <= (double)i) && ((double)i <= (delay + duration) / dt))
IStim = I;
mT = (mT + dt * alphaM(vT)) / (1.0 + dt * (alphaM(vT) + betaM(vT)));
hT = (hT + dt * alphaH(vT)) / (1.0 + dt * (alphaH(vT) + betaH(vT)));
nT = (nT + dt * alphaN(vT)) / (1.0 + dt * (alphaN(vT) + betaN(vT)));
const double iNa = gNa * pow(mT, 3.0) * hT * (vT - vNa);
const double iK = gK * pow(nT, 4.0) * (vT - vK);
const double iL = gL * (vT-vL);
const double Inoise = (doubleRand() * knoise * sqrt(gNa * A));
const double IIon = ((iNa + iK + iL) * A) + Inoise;
vT += ((-IIon + IStim) / C) * dt;
voltage[i] = vT;
if(vT > 60.0) {
count++;
break;
}
}
}
return count;
}
int main(){
std::cout << HH_model(1.0e-6,1) << std::endl;
}
}
Python:
import matplotlib.pyplot as py
import numpy as np
import scipy.optimize as optimize
from tqdm import tqdm
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# HH parameters
v_Rest = -65 # in mV
gNa = 1200 # in mS/cm^2
gK = 360 # in mS/cm^2
gL = 0.3*10 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
#Number of runs
runs = 1000
c = 1 # in uF/cm^2
def alphaM(v): return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v): return 12 * (4 * np.exp(-(v) / 18))
def betaH(v): return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v): return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v): return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v): return 12 * (0.125 * np.exp(-(v) / 80))
def HH_model(I,area_factor):
count = 0
t_end = 1 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
I = I
C = c*A # uF
for j in tqdm(range(0, runs), total=runs):
# Introduction of equations and channels
# compute the timesteps
t_steps= t_end/dt+1
# Compute the initial values
v0 = 0
m0 = alphaM(v0)/(alphaM(v0)+betaM(v0))
h0 = alphaH(v0)/(alphaH(v0)+betaH(v0))
n0 = alphaN(v0)/(alphaN(v0)+betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
### Noise component
knoise= 0.0005 #uA/(mS)^1/2
### --------- Step3: SOLVE
for i in range(0, int(t_steps)-1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
IStim = 0
if delay / dt <= i <= (delay + duration) / dt:
IStim = I # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT-vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# adjust the voltage to the resting potential
v = v + v_Rest
# test if there was a spike
if max(v[:]-v_Rest) > 60:
count += 1
return count
You've messed up indents in the Python code. These lines
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
do not execute when condition delay / dt <= i <= (delay + duration) / dt is True
After indentation is fixed the Python code produces 866, which nearly matches 876 - result of C++ code.

Mandelbrot in a Pixel Shader

I'm working for some days now on a DirectX 11 version of the Mandelbrot set. What I've done so far is create a quad with a texture on it. I can color the points with a Pixel Shader, but for some reason the Mandelbrot set in the Pixel Shader does not return the expected result. I tested the logic in plain C++ code and I've same eroneous result. Any idea what's wrong with the code? I have a proper version working in Python and I just replicated the code, but it seems something is missing.
The Width of the set is 2.5 (will stretched the image a bit). It assumes a 1024*960 window and max. iteration of 1000. I compiled with Shader Model 5.0. It starts with the default set with
RealStart = -2.0;
ImagStart = -1.25;
Passed via the constant buffer
cbuffer cBuffer
{
double RealStart; 'equals -2.5 from the default view of the set
double ImagStart; 'equals -1.25 from the default view of the set
};
// Pixel Shader
float4 main(float4 position : SV_POSITION) : SV_TARGET
{
double real, imag;
double real2, imag2;
int ite = 0;
float4 CalcColor = { 1.0f , 1.0f, 1.0f, 1.0f };
'position is the position of the pixel from 1.0f to 0.0f
real = RealStart + (double) position.x / 1024 * 2.5;
imag = ImagStart + (double) position.y / 960 * 2.5;
for (int i = 0; i < 1000; i++)
{
'breaking down the complex number by its constituents
real2 = real * real;
imag2 = imag * imag;
if (real2 + imag2 > 4.0)
{
break;
}
else {
imag = 2 * real * imag + ImagStart;
real = real2 - imag2 + RealStart;
ite++;
}
}
CalcColor[0] = (float) (ite % 333) / 333 ;
CalcColor[1] = (float) (ite % 666) / 666 ;
CalcColor[2] = (float) (ite % 1000) / 1000;
return CalcColor;
}
Edit Python version
def Mandelbrot(creal, cimag, maxNumberOfIterations):
real = creal
imag = cimag
for numberOfIterations in range(maxNumberOfIterations):
real2 = real * real
imag2 = imag * imag
if real2 + imag2 > 4.0:
return numberOfIterations
imag = 2 * real * imag + cimag
real = real2 - imag2 + creal
return maxNumberOfIterations
The creal, cimag and are created like that and then just looped through.
realAxis = np.linspace(realStart, realStart + width, dim)
imagAxis = np.linspace(imagStart, imagStart + width, dim)
It return the maxNumberOfIterations to a two-dimsensional array, which is plot to draw the Mandelbrot set.
The error was that the ImagStart and RealStart in the Else need to be scaled as well. The code in the Shader has been modified as follows:
cbuffer cBuffer
{
double2 C;
float2 Param;
float MaxIt;
};
// Pixel Shader
float4 main(float4 position : SV_POSITION, float2 texcoord : TEXCOORD) : SV_TARGET
{
double real, imag;
double real2, imag2;
uint ite = 0;
float4 CalcColor = { 1.0f , 1.0f, 1.0f, 1.0f };
real = C.x + ((double) texcoord.x - 0.5) * 2.0 * 2.5;
imag = C.y + ((double) texcoord.y - 0.5) * 2.0 * 2.5;
for (int i = 0; i < 100; i++)
{
real2 = real * real;
imag2 = imag * imag;
if (real2 + imag2 > 4.0)
{
break;
}
else {
imag = 2 * real * imag + C.y + ((double) texcoord.y - 0.5) * 2.0 * 2.5;
real = real2 - imag2 + C.x + ((double) texcoord.x - 0.5) * 2.0 * 2.5;
ite++;
}
}
if (ite > 100)
ite = 100;
CalcColor[0] = (float)(ite % 33) / 33;
CalcColor[1] = (float)(ite % 66) / 66;
CalcColor[2] = (float)(ite % 100) / 100;
return CalcColor;
}
The Mandelbrot set in drawn correctly.

PySide2 Qt Surface Example

I would like to reimplement the Qt C++ "Surface" example (Q3DSurface) in PySide2 but QSurfaceDataArray and QSurfaceDataRow are not available.
void SurfaceGraph::fillSqrtSinProxy()
{
float stepX = (sampleMax - sampleMin) / float(sampleCountX - 1);
float stepZ = (sampleMax - sampleMin) / float(sampleCountZ - 1);
QSurfaceDataArray *dataArray = new QSurfaceDataArray;
dataArray->reserve(sampleCountZ);
for (int i = 0 ; i < sampleCountZ ; i++) {
QSurfaceDataRow *newRow = new QSurfaceDataRow(sampleCountX);
// Keep values within range bounds, since just adding step can cause minor drift due
// to the rounding errors.
float z = qMin(sampleMax, (i * stepZ + sampleMin));
int index = 0;
for (int j = 0; j < sampleCountX; j++) {
float x = qMin(sampleMax, (j * stepX + sampleMin));
float R = qSqrt(z * z + x * x) + 0.01f;
float y = (qSin(R) / R + 0.24f) * 1.61f;
(*newRow)[index++].setPosition(QVector3D(x, y, z));
}
*dataArray << newRow;
}
m_sqrtSinProxy->resetArray(dataArray);
}
Is there are way to use a QVector<QSurfaceDataItem> in PySide2?
from PySide2.QtDataVisualization import QtDataVisualization as QDV
data_item = QDV.QSurfaceDataItem()
data_item.setPosition(QVector3D(x, y, z))
The QSurfaceDataItem is available but I can't pass the objects to QSurfaceDataProxy without QVector.

OpenCV how to smooth contour, reducing noise

I extracted the contours of an image, that you can see here:
However, it has some noise.
How can I smooth the noise? I did a close up to make clearer what I want to meant
Original image that I've used:
Code:
rMaskgray = cv2.imread('redmask.jpg', cv2.CV_LOAD_IMAGE_GRAYSCALE)
(thresh, binRed) = cv2.threshold(rMaskgray, 50, 255, cv2.THRESH_BINARY)
Rcontours, hier_r = cv2.findContours(binRed,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
r_areas = [cv2.contourArea(c) for c in Rcontours]
max_rarea = np.max(r_areas)
CntExternalMask = np.ones(binRed.shape[:2], dtype="uint8") * 255
for c in Rcontours:
if(( cv2.contourArea(c) > max_rarea * 0.70) and (cv2.contourArea(c)< max_rarea)):
cv2.drawContours(CntExternalMask,[c],-1,0,1)
cv2.imwrite('contour1.jpg', CntExternalMask)
Try an upgrade to OpenCV 3.1.0. After some code adaptations for the new version as shown below, I tried it out with OpenCV version 3.1.0 and did not see any of the effects you are describing.
import cv2
import numpy as np
print cv2.__version__
rMaskgray = cv2.imread('5evOn.jpg', 0)
(thresh, binRed) = cv2.threshold(rMaskgray, 50, 255, cv2.THRESH_BINARY)
_, Rcontours, hier_r = cv2.findContours(binRed,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
r_areas = [cv2.contourArea(c) for c in Rcontours]
max_rarea = np.max(r_areas)
CntExternalMask = np.ones(binRed.shape[:2], dtype="uint8") * 255
for c in Rcontours:
if(( cv2.contourArea(c) > max_rarea * 0.70) and (cv2.contourArea(c)< max_rarea)):
cv2.drawContours(CntExternalMask,[c],-1,0,1)
cv2.imwrite('contour1.jpg', CntExternalMask)
I don't know if is it ok to provide Java code - but I implemented Gaussian smoothing for openCV contour. Logic and theory is taken from here https://www.morethantechnical.com/2012/12/07/resampling-smoothing-and-interest-points-of-curves-via-css-in-opencv-w-code/
package CurveTools;
import org.apache.log4j.Logger;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Point;
import java.util.ArrayList;
import java.util.List;
import static org.opencv.core.CvType.CV_64F;
import static org.opencv.imgproc.Imgproc.getGaussianKernel;
class CurveSmoother {
private double[] g, dg, d2g, gx, dx, d2x;
private double gx1, dgx1, d2gx1;
public double[] kappa, smoothX, smoothY;
public double[] contourX, contourY;
/* 1st and 2nd derivative of 1D gaussian */
void getGaussianDerivs(double sigma, int M) {
int L = (M - 1) / 2;
double sigma_sq = sigma * sigma;
double sigma_quad = sigma_sq * sigma_sq;
dg = new double[M];
d2g = new double[M];
g = new double[M];
Mat tmpG = getGaussianKernel(M, sigma, CV_64F);
for (double i = -L; i < L + 1.0; i += 1.0) {
int idx = (int) (i + L);
g[idx] = tmpG.get(idx, 0)[0];
// from http://www.cedar.buffalo.edu/~srihari/CSE555/Normal2.pdf
dg[idx] = -i * g[idx] / sigma_sq;
d2g[idx] = (-sigma_sq + i * i) * g[idx] / sigma_quad;
}
}
/* 1st and 2nd derivative of smoothed curve point */
void getdX(double[] x, int n, double sigma, boolean isOpen) {
int L = (g.length - 1) / 2;
gx1 = dgx1 = d2gx1 = 0.0;
for (int k = -L; k < L + 1; k++) {
double x_n_k;
if (n - k < 0) {
if (isOpen) {
//open curve - mirror values on border
x_n_k = x[-(n - k)];
} else {
//closed curve - take values from end of curve
x_n_k = x[x.length + (n - k)];
}
} else if (n - k > x.length - 1) {
if (isOpen) {
//mirror value on border
x_n_k = x[n + k];
} else {
x_n_k = x[(n - k) - x.length];
}
} else {
x_n_k = x[n - k];
}
gx1 += x_n_k * g[k + L]; //gaussians go [0 -> M-1]
dgx1 += x_n_k * dg[k + L];
d2gx1 += x_n_k * d2g[k + L];
}
}
/* 0th, 1st and 2nd derivatives of whole smoothed curve */
void getdXcurve(double[] x, double sigma, boolean isOpen) {
gx = new double[x.length];
dx = new double[x.length];
d2x = new double[x.length];
for (int i = 0; i < x.length; i++) {
getdX(x, i, sigma, isOpen);
gx[i] = gx1;
dx[i] = dgx1;
d2x[i] = d2gx1;
}
}
/*
compute curvature of curve after gaussian smoothing
from "Shape similarity retrieval under affine transforms", Mokhtarian & Abbasi 2002
curvex - x position of points
curvey - y position of points
kappa - curvature coeff for each point
sigma - gaussian sigma
*/
void computeCurveCSS(double[] curvex, double[] curvey, double sigma, boolean isOpen) {
int M = (int) Math.round((10.0 * sigma + 1.0) / 2.0) * 2 - 1;
assert (M % 2 == 1); //M is an odd number
getGaussianDerivs(sigma, M);//, g, dg, d2g
double[] X, XX, Y, YY;
getdXcurve(curvex, sigma, isOpen);
smoothX = gx.clone();
X = dx.clone();
XX = d2x.clone();
getdXcurve(curvey, sigma, isOpen);
smoothY = gx.clone();
Y = dx.clone();
YY = d2x.clone();
kappa = new double[curvex.length];
for (int i = 0; i < curvex.length; i++) {
// Mokhtarian 02' eqn (4)
kappa[i] = (X[i] * YY[i] - XX[i] * Y[i]) / Math.pow(X[i] * X[i] + Y[i] * Y[i], 1.5);
}
}
/* find zero crossings on curvature */
ArrayList<Integer> findCSSInterestPoints() {
assert (kappa != null);
ArrayList<Integer> crossings = new ArrayList<>();
for (int i = 0; i < kappa.length - 1; i++) {
if ((kappa[i] < 0.0 && kappa[i + 1] > 0.0) || kappa[i] > 0.0 && kappa[i + 1] < 0.0) {
crossings.add(i);
}
}
return crossings;
}
public void polyLineSplit(MatOfPoint pl) {
contourX = new double[pl.height()];
contourY = new double[pl.height()];
for (int j = 0; j < contourX.length; j++) {
contourX[j] = pl.get(j, 0)[0];
contourY[j] = pl.get(j, 0)[1];
}
}
public MatOfPoint polyLineMerge(double[] xContour, double[] yContour) {
assert (xContour.length == yContour.length);
MatOfPoint pl = new MatOfPoint();
List<Point> list = new ArrayList<>();
for (int j = 0; j < xContour.length; j++)
list.add(new Point(xContour[j], yContour[j]));
pl.fromList(list);
return pl;
}
MatOfPoint smoothCurve(MatOfPoint curve, double sigma) {
int M = (int) Math.round((10.0 * sigma + 1.0) / 2.0) * 2 - 1;
assert (M % 2 == 1); //M is an odd number
//create kernels
getGaussianDerivs(sigma, M);
polyLineSplit(curve);
getdXcurve(contourX, sigma, false);
smoothX = gx.clone();
getdXcurve(contourY, sigma, false);
smoothY = gx;
Logger.getRootLogger().info("Smooth curve len: " + smoothX.length);
return polyLineMerge(smoothX, smoothY);
}
}

Categories

Resources