I'm trying to implement Numpy array addition with broadcasting in C. The below code works for arrays with same shape, how do i make it support broadcasting?
I'll read the data for inputs from files via load_data().
typedef struct tensor {
int *data;
int shape[4];
} Tensor;
int main(int argc, char const *argv[]) {
Tensor input_1, input_2, output;
input_1.data = malloc(256 * sizeof(int));
load_data("input_1", input_1.data);
input_1.shape[0] = 1;
input_1.shape[1] = 16;
input_1.shape[2] = 4;
input_1.shape[3] = 4;
input_2.data = malloc(256 * sizeof(int));
load_data("input_2", input_2.data);
input_2.shape[0] = 1;
input_2.shape[1] = 16;
input_2.shape[2] = 1;
input_2.shape[3] = 1;
output.data = malloc(256 * sizeof(int));
output.shape[0] = 1;
output.shape[1] = 16;
output.shape[2] = 4;
output.shape[3] = 4;
int total_elements =
output.shape[0] * output.shape[1] * output.shape[2] * output.shape[3];
// works when shapes are same for both inputs
for (int x = 0; x < output.shape[0]; x++) {
for (int y = 0; y < output.shape[1]; y++) {
for (int z = 0; z < output.shape[2]; z++) {
for (int w = 0; w < output.shape[3]; w++) {
int index =
x * (output.shape[0] * output.shape[1] * output.shape[2]) +
y * (output.shape[0] * output.shape[1]) + z * (output.shape[2]) +
w;
*(output.data + index) =
*(input_1.data + index) + *(input_2.data + index);
}
}
}
}
return 0;
}
I'm working for some days now on a DirectX 11 version of the Mandelbrot set. What I've done so far is create a quad with a texture on it. I can color the points with a Pixel Shader, but for some reason the Mandelbrot set in the Pixel Shader does not return the expected result. I tested the logic in plain C++ code and I've same eroneous result. Any idea what's wrong with the code? I have a proper version working in Python and I just replicated the code, but it seems something is missing.
The Width of the set is 2.5 (will stretched the image a bit). It assumes a 1024*960 window and max. iteration of 1000. I compiled with Shader Model 5.0. It starts with the default set with
RealStart = -2.0;
ImagStart = -1.25;
Passed via the constant buffer
cbuffer cBuffer
{
double RealStart; 'equals -2.5 from the default view of the set
double ImagStart; 'equals -1.25 from the default view of the set
};
// Pixel Shader
float4 main(float4 position : SV_POSITION) : SV_TARGET
{
double real, imag;
double real2, imag2;
int ite = 0;
float4 CalcColor = { 1.0f , 1.0f, 1.0f, 1.0f };
'position is the position of the pixel from 1.0f to 0.0f
real = RealStart + (double) position.x / 1024 * 2.5;
imag = ImagStart + (double) position.y / 960 * 2.5;
for (int i = 0; i < 1000; i++)
{
'breaking down the complex number by its constituents
real2 = real * real;
imag2 = imag * imag;
if (real2 + imag2 > 4.0)
{
break;
}
else {
imag = 2 * real * imag + ImagStart;
real = real2 - imag2 + RealStart;
ite++;
}
}
CalcColor[0] = (float) (ite % 333) / 333 ;
CalcColor[1] = (float) (ite % 666) / 666 ;
CalcColor[2] = (float) (ite % 1000) / 1000;
return CalcColor;
}
Edit Python version
def Mandelbrot(creal, cimag, maxNumberOfIterations):
real = creal
imag = cimag
for numberOfIterations in range(maxNumberOfIterations):
real2 = real * real
imag2 = imag * imag
if real2 + imag2 > 4.0:
return numberOfIterations
imag = 2 * real * imag + cimag
real = real2 - imag2 + creal
return maxNumberOfIterations
The creal, cimag and are created like that and then just looped through.
realAxis = np.linspace(realStart, realStart + width, dim)
imagAxis = np.linspace(imagStart, imagStart + width, dim)
It return the maxNumberOfIterations to a two-dimsensional array, which is plot to draw the Mandelbrot set.
The error was that the ImagStart and RealStart in the Else need to be scaled as well. The code in the Shader has been modified as follows:
cbuffer cBuffer
{
double2 C;
float2 Param;
float MaxIt;
};
// Pixel Shader
float4 main(float4 position : SV_POSITION, float2 texcoord : TEXCOORD) : SV_TARGET
{
double real, imag;
double real2, imag2;
uint ite = 0;
float4 CalcColor = { 1.0f , 1.0f, 1.0f, 1.0f };
real = C.x + ((double) texcoord.x - 0.5) * 2.0 * 2.5;
imag = C.y + ((double) texcoord.y - 0.5) * 2.0 * 2.5;
for (int i = 0; i < 100; i++)
{
real2 = real * real;
imag2 = imag * imag;
if (real2 + imag2 > 4.0)
{
break;
}
else {
imag = 2 * real * imag + C.y + ((double) texcoord.y - 0.5) * 2.0 * 2.5;
real = real2 - imag2 + C.x + ((double) texcoord.x - 0.5) * 2.0 * 2.5;
ite++;
}
}
if (ite > 100)
ite = 100;
CalcColor[0] = (float)(ite % 33) / 33;
CalcColor[1] = (float)(ite % 66) / 66;
CalcColor[2] = (float)(ite % 100) / 100;
return CalcColor;
}
The Mandelbrot set in drawn correctly.
I would like to reimplement the Qt C++ "Surface" example (Q3DSurface) in PySide2 but QSurfaceDataArray and QSurfaceDataRow are not available.
void SurfaceGraph::fillSqrtSinProxy()
{
float stepX = (sampleMax - sampleMin) / float(sampleCountX - 1);
float stepZ = (sampleMax - sampleMin) / float(sampleCountZ - 1);
QSurfaceDataArray *dataArray = new QSurfaceDataArray;
dataArray->reserve(sampleCountZ);
for (int i = 0 ; i < sampleCountZ ; i++) {
QSurfaceDataRow *newRow = new QSurfaceDataRow(sampleCountX);
// Keep values within range bounds, since just adding step can cause minor drift due
// to the rounding errors.
float z = qMin(sampleMax, (i * stepZ + sampleMin));
int index = 0;
for (int j = 0; j < sampleCountX; j++) {
float x = qMin(sampleMax, (j * stepX + sampleMin));
float R = qSqrt(z * z + x * x) + 0.01f;
float y = (qSin(R) / R + 0.24f) * 1.61f;
(*newRow)[index++].setPosition(QVector3D(x, y, z));
}
*dataArray << newRow;
}
m_sqrtSinProxy->resetArray(dataArray);
}
Is there are way to use a QVector<QSurfaceDataItem> in PySide2?
from PySide2.QtDataVisualization import QtDataVisualization as QDV
data_item = QDV.QSurfaceDataItem()
data_item.setPosition(QVector3D(x, y, z))
The QSurfaceDataItem is available but I can't pass the objects to QSurfaceDataProxy without QVector.
I have a few questions for doing optical flow projects. I use Python 2 (planning to use lasagne to use deep learning to learn optical flow), and don't know how to convert the c++ functions to that of python in visualization of the flows.
I downloaded (from http://vision.middlebury.edu/flow/data/comp/zip/other-gt-flow.zip) some image pairs where I have to estimate their optical flow, and their ground truth flow (.flo file). The problem is, when I read the .flo file into the program, it is a vectorized code. How do I view them like how they show in the webpage (http://vision.middlebury.edu/flow/data/)? I read from various sources and tried the following, but doesn't work.
In evaluating EPE (end point error) in what form should I have my prediction to be compared with the .flo file?
The code:
################################ Reading flow file ################################
f = open('flow10.flo', 'rb')
x = np.fromfile(f, np.int32, count=1) # not sure what this gives
w = np.fromfile(f, np.int32, count=1) # width
h = np.fromfile(f, np.int32, count=1) # height
print 'x %d, w %d, h %d flo file' % (x, w, h)
data = np.fromfile(f, np.float32) # vector
data_2D = np.reshape(data, newshape=(388,584,2)); # convert to x,y - flow
x = data_2D[...,0]; y = data_2D[...,1];
################################ visualising flow file ################################
mag, ang = cv2.cartToPolar(x,y)
hsv = np.zeros_like(x)
hsv = np.array([ hsv,hsv,hsv ])
hsv = np.reshape(hsv, (388,584,3)); # having rgb channel
hsv[...,1] = 255; # full green channel
hsv[...,0] = ang*180/np.pi/2 # angle in pi
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX) # magnitude [0,255]
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
bgr = draw_hsv(data_2D)
cv2.imwrite('opticalhsv.png',bgr)
On Middlebury's page there is a zip file called flow-code (http://vision.middlebury.edu/flow/code/flow-code.zip), which provides a tool called color_flow to convert those .flo files to color images.
On the other hand, if you want to implement your own code to do the transformation, i have this piece of code (i cannot provide the original author, it has been some time) that helps you to first compute the color:
static Vec3b computeColor(float fx, float fy)
{
static bool first = true;
// relative lengths of color transitions:
// these are chosen based on perceptual similarity
// (e.g. one can distinguish more shades between red and yellow
// than between yellow and green)
const int RY = 15;
const int YG = 6;
const int GC = 4;
const int CB = 11;
const int BM = 13;
const int MR = 6;
const int NCOLS = RY + YG + GC + CB + BM + MR;
static Vec3i colorWheel[NCOLS];
if (first)
{
int k = 0;
for (int i = 0; i < RY; ++i, ++k)
colorWheel[k] = Vec3i(255, 255 * i / RY, 0);
for (int i = 0; i < YG; ++i, ++k)
colorWheel[k] = Vec3i(255 - 255 * i / YG, 255, 0);
for (int i = 0; i < GC; ++i, ++k)
colorWheel[k] = Vec3i(0, 255, 255 * i / GC);
for (int i = 0; i < CB; ++i, ++k)
colorWheel[k] = Vec3i(0, 255 - 255 * i / CB, 255);
for (int i = 0; i < BM; ++i, ++k)
colorWheel[k] = Vec3i(255 * i / BM, 0, 255);
for (int i = 0; i < MR; ++i, ++k)
colorWheel[k] = Vec3i(255, 0, 255 - 255 * i / MR);
first = false;
}
const float rad = sqrt(fx * fx + fy * fy);
const float a = atan2(-fy, -fx) / (float)CV_PI;
const float fk = (a + 1.0f) / 2.0f * (NCOLS - 1);
const int k0 = static_cast<int>(fk);
const int k1 = (k0 + 1) % NCOLS;
const float f = fk - k0;
Vec3b pix;
for (int b = 0; b < 3; b++)
{
const float col0 = colorWheel[k0][b] / 255.f;
const float col1 = colorWheel[k1][b] / 255.f;
float col = (1 - f) * col0 + f * col1;
if (rad <= 1)
col = 1 - rad * (1 - col); // increase saturation with radius
else
col *= .75; // out of range
pix[2 - b] = static_cast<uchar>(255.f * col);
}
return pix;
}
Then it calls the above function for all the pixels:
static void drawOpticalFlow(const Mat_<Point2f>& flow, Mat& dst, float maxmotion = -1)
{
dst.create(flow.size(), CV_8UC3);
dst.setTo(Scalar::all(0));
// determine motion range:
float maxrad = maxmotion;
if (maxmotion <= 0)
{
maxrad = 1;
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (!isFlowCorrect(u))
continue;
maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
}
}
}
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (isFlowCorrect(u))
dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
}
}
}
This is for my use in OpenCV, but the code help should anyone who wants achieve something similar.
The same XorShift functions written in C and Python give different results. Can you explain it?
The XorShift function generates numbers in the following way:
x(0) = 123456789
y(0) = 362436069
z(0) = 521288629
w(0) = 88675123
x(n+1) = y(n)
y(n+1) = z(n)
z(n+1) = w(n)
w(n+1) = w(n) ^ (w(n)>>19) ^ (x(n)^(x(n)<<11)) ^ ((x(n)^(x(n)<<11)) >> 8)
I wrote this function in Python to generate subsequent values of w:
X = 123456789
Y = 362436069
Z = 521288629
W = 88675123
def xor_shift():
global X, Y, Z, W
t = X ^ (X << 11)
X = Y
Y = Z
Z = W
W = W ^ (W >> 19) ^ t ^ (t >> 8)
return W
W1 = xor_shift() # 252977563114
W2 = xor_shift() # 646616338854
W3 = xor_shift() # 476657867818
The same code written in C (it can be found on Wikipedia http://en.wikipedia.org/wiki/Xorshift) gives different results:
#include <stdint.h>
uint32_t xor128(void) {
static uint32_t x = 123456789;
static uint32_t y = 362436069;
static uint32_t z = 521288629;
static uint32_t w = 88675123;
uint32_t t;
t = x ^ (x << 11);
x = y; y = z; z = w;
return w = w ^ (w >> 19) ^ t ^ (t >> 8);
}
cout << xor128() <<'\n'; // result W1 = 3701687786
cout << xor128() <<'\n'; // result W2 = 458299110
cout << xor128() <<'\n'; // result W3 = 2500872618
I suppose that there is a problem with my Python code or my use of cout (I am not very good at C++).
EDIT: Working solution:
need to change the return value from uint32_t to uint64_t:
#include <stdint.h>
uint64_t xor128(void) {
static uint64_t x = 123456789;
static uint64_t y = 362436069;
static uint64_t z = 521288629;
static uint64_t w = 88675123;
uint64_t t;
t = x ^ (x << 11);
x = y; y = z; z = w;
return w = w ^ (w >> 19) ^ t ^ (t >> 8);
}
Change all your uint32_t types to uin64_t and you'll get the same result. The difference is the precision between uint32_t and the unlimited precision of python integer types.