I need to compare some theoretical data with real data in python.
The theoretical data comes from resolving an equation.
To improve the comparative I would like to remove data points that fall far from the theoretical curve. I mean, I want to remove the points below and above red dashed lines in the figure (made with matplotlib).
Both the theoretical curves and the data points are arrays of different length.
I can try to remove the points in a roughly-eye way, for example: the first upper point can be detected using:
data2[(data2.redshift<0.4)&data2.dmodulus>1]
rec.array([('1997o', 0.374, 1.0203223485103787, 0.44354759972859786)], dtype=[('SN_name', '|S10'), ('redshift', '<f8'), ('dmodulus', '<f8'), ('dmodulus_error', '<f8')])
But I would like to use a less roughly-eye way.
So, can anyone help me finding an easy way of removing the problematic points?
Thank you!
This might be overkill and is based on your comment
Both the theoretical curves and the data points are arrays of
different length.
I would do the following:
Truncate the data set so that its x values lie within the max and min values of the theoretical set.
Interpolate the theoretical curve using scipy.interpolate.interp1d and the above truncated data x values. The reason for step (1) is to satisfy the constraints of interp1d.
Use numpy.where to find data y values that are out side the range of acceptable theory values.
DONT discard these values, as was suggested in comments and other answers. If you want for clarity, point them out by plotting the 'inliners' one color and the 'outliers' an other color.
Here's a script that is close to what you are looking for, I think. It hopefully will help you accomplish what you want:
import numpy as np
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
# make up data
def makeUpData():
'''Make many more data points (x,y,yerr) than theory (x,y),
with theory yerr corresponding to a constant "sigma" in y,
about x,y value'''
NX= 150
dataX = (np.random.rand(NX)*1.1)**2
dataY = (1.5*dataX+np.random.rand(NX)**2)*dataX
dataErr = np.random.rand(NX)*dataX*1.3
theoryX = np.arange(0,1,0.1)
theoryY = theoryX*theoryX*1.5
theoryErr = 0.5
return dataX,dataY,dataErr,theoryX,theoryY,theoryErr
def makeSameXrange(theoryX,dataX,dataY):
'''
Truncate the dataX and dataY ranges so that dataX min and max are with in
the max and min of theoryX.
'''
minT,maxT = theoryX.min(),theoryX.max()
goodIdxMax = np.where(dataX<maxT)
goodIdxMin = np.where(dataX[goodIdxMax]>minT)
return (dataX[goodIdxMax])[goodIdxMin],(dataY[goodIdxMax])[goodIdxMin]
# take 'theory' and get values at every 'data' x point
def theoryYatDataX(theoryX,theoryY,dataX):
'''For every dataX point, find interpolated thoeryY value. theoryx needed
for interpolation.'''
f = interpolate.interp1d(theoryX,theoryY)
return f(dataX[np.where(dataX<np.max(theoryX))])
# collect valid points
def findInlierSet(dataX,dataY,interpTheoryY,thoeryErr):
'''Find where theoryY-theoryErr < dataY theoryY+theoryErr and return
valid indicies.'''
withinUpper = np.where(dataY<(interpTheoryY+theoryErr))
withinLower = np.where(dataY[withinUpper]
>(interpTheoryY[withinUpper]-theoryErr))
return (dataX[withinUpper])[withinLower],(dataY[withinUpper])[withinLower]
def findOutlierSet(dataX,dataY,interpTheoryY,thoeryErr):
'''Find where theoryY-theoryErr < dataY theoryY+theoryErr and return
valid indicies.'''
withinUpper = np.where(dataY>(interpTheoryY+theoryErr))
withinLower = np.where(dataY<(interpTheoryY-theoryErr))
return (dataX[withinUpper],dataY[withinUpper],
dataX[withinLower],dataY[withinLower])
if __name__ == "__main__":
dataX,dataY,dataErr,theoryX,theoryY,theoryErr = makeUpData()
TruncDataX,TruncDataY = makeSameXrange(theoryX,dataX,dataY)
interpTheoryY = theoryYatDataX(theoryX,theoryY,TruncDataX)
inDataX,inDataY = findInlierSet(TruncDataX,TruncDataY,interpTheoryY,
theoryErr)
outUpX,outUpY,outDownX,outDownY = findOutlierSet(TruncDataX,
TruncDataY,
interpTheoryY,
theoryErr)
#print inlierIndex
fig = plt.figure()
ax = fig.add_subplot(211)
ax.errorbar(dataX,dataY,dataErr,fmt='.',color='k')
ax.plot(theoryX,theoryY,'r-')
ax.plot(theoryX,theoryY+theoryErr,'r--')
ax.plot(theoryX,theoryY-theoryErr,'r--')
ax.set_xlim(0,1.4)
ax.set_ylim(-.5,3)
ax = fig.add_subplot(212)
ax.plot(inDataX,inDataY,'ko')
ax.plot(outUpX,outUpY,'bo')
ax.plot(outDownX,outDownY,'ro')
ax.plot(theoryX,theoryY,'r-')
ax.plot(theoryX,theoryY+theoryErr,'r--')
ax.plot(theoryX,theoryY-theoryErr,'r--')
ax.set_xlim(0,1.4)
ax.set_ylim(-.5,3)
fig.savefig('findInliers.png')
This figure is the result:
At the end I use some of the Yann code:
def theoryYatDataX(theoryX,theoryY,dataX):
'''For every dataX point, find interpolated theoryY value. theoryx needed
for interpolation.'''
f = interpolate.interp1d(theoryX,theoryY)
return f(dataX[np.where(dataX<np.max(theoryX))])
def findOutlierSet(data,interpTheoryY,theoryErr):
'''Find where theoryY-theoryErr < dataY theoryY+theoryErr and return
valid indicies.'''
up = np.where(data.dmodulus > (interpTheoryY+theoryErr))
low = np.where(data.dmodulus < (interpTheoryY-theoryErr))
# join all the index together in a flat array
out = np.hstack([up,low]).ravel()
index = np.array(np.ones(len(data),dtype=bool))
index[out]=False
datain = data[index]
dataout = data[out]
return datain, dataout
def selectdata(data,theoryX,theoryY):
"""
Data selection: z<1 and +-0.5 LFLRW separation
"""
# Select data with redshift z<1
data1 = data[data.redshift < 1]
# From modulus to light distance:
data1.dmodulus, data1.dmodulus_error = modulus2distance(data1.dmodulus,data1.dmodulus_error)
# redshift data order
data1.sort(order='redshift')
# Outliers: distance to LFLRW curve bigger than +-0.5
theoryErr = 0.5
# Theory curve Interpolation to get the same points as data
interpy = theoryYatDataX(theoryX,theoryY,data1.redshift)
datain, dataout = findOutlierSet(data1,interpy,theoryErr)
return datain, dataout
Using those functions I can finally obtain:
Thank you all for your help.
Just look at the difference between the red curve and the points, if it is bigger than the difference between the red curve and the dashed red curve remove it.
diff=np.abs(points-red_curve)
index= (diff>(dashed_curve-redcurve))
filtered=points[index]
But please take the comment from NickLH serious. Your Data looks pretty good without any filtering, your "outlieres" all have a very big error and won't affect the fit much.
Either you could use the numpy.where() to identify which xy pairs meet your plotting criteria, or perhaps enumerate to do pretty much the same thing. Example:
x_list = [ 1, 2, 3, 4, 5, 6 ]
y_list = ['f','o','o','b','a','r']
result = [y_list[i] for i, x in enumerate(x_list) if 2 <= x < 5]
print result
I'm sure you could change the conditions so that '2' and '5' in the above example are the functions of your curves
Related
I have a 3D data matrix of sea level data (time, y, x) and I found the power spectrum by taking the square of the FFT but there are low frequencies that are really dominant. I want to get rid of those low frequencies by applying a high pass filter... how would I go about doing that?
Example of data set and structure/code is below:
This is the data set and creating the arrays:
Yearmin = 2018
Yearmax = 2019
year_len = Yearmax - Yearmin + 1.0 # number of years
direcInput = "filepath"
a = s.Dataset(direcInput+"test.nc", mode='r')
#creating arrays
lat = a.variables["latitude"][:]
lon = a.variables["longitude"][:]
time1 = a.variables["time"][:] #DAYS SINCE JAN 1ST 1950
sla = a.variables["sla"][:,:,:] #t, y, x
time = Yearmin + (year_len * (time1 - np.min(time1)) / ( np.max(time1) - np.min(time1)))
#detrending and normalizing data
def standardize(y, detrend = True, normalize = True):
if detrend == True:
y = signal.detrend(y, axis=0)
y = (y - np.mean(y, axis=0))
if normalize == True:
y = y / np.std(y, axis=0)
return y
sla_standard = standardize(sla)
print(sla_standard.shape) = (710, 81, 320)
#fft
fft = np.fft.rfft(sla_standard, axis=0)
spec = np.square(abs(fft))
frequencies = (0, nyquist, df)
#PLOTTING THE FREQUENCIES VS SPECTRUM FOR A FEW DIFFERENT SPATIAL LOCATIONS
plt.plot(frequencies, spec[:, 68,85])
plt.plot(frequencies, spec[:, 23,235])
plt.plot(frequencies, spec[:, 39,178])
plt.plot(frequencies, spec[:, 30,149])
plt.xlim(0,.05)
plt.show()
My goal is to make a high pass filter of the ORIGINAL time series (sla_standard) to remove the two really big peaks. Which type of filter should I use? Thank you!
Use .axes.Axes.set_ylim to set the y-axis limit.
Axes.set_ylim(self, left=None, right=None, emit=True, auto=False, *, ymin=None, ymax=None)
So in your case ymin=None and you set ymax for example to ymax=60000 before you start plotting.
Thus plt.ylim(ymin=None, ymax=60000).
Taking out data should not be done here because its "falsifying results". What you actually want is to zoom in on the chart. The person who reads the chart independently from you would interpret the data falsely if not made aware in advance. Peaks that go off the chart are okay because everybody understands that.
Or:
Directly replacement of certain values in an array (arr):
arr[arr > ori] = dest
For example in your case ori=60000 and dest=1
All values larger ">" than 60k are replaces by 1.
The different filters: As you state a filter acts on the frequencies of your signal. Different filter shapes exist and some of them have complex expressions because they need to be implemented in real time processing (causal). However in your case, you seem to post process the data. You can use the Fourier Transform, that requires all the data (non causal).
The filter to choose: Consequently you can directly perform you filtering operation in the Fourier domain by applying a mask on your frequencies. If you want to remove frequencies, I recommand you to use a binary mask made of 0 and 1. Why? Because it is the simplest filter you can think about. It is scientifically relevant to state that you completely removed some frequencies (say it and justify it). However it is more difficult to claim that you let some and attenuated a little bit others, and that you chose arbitrarily the attenuation factor...
Python implementation
signal_fft = np.fft.rfft(sla_standard,axis=0)
mask = np.ones_like(sla_standard)
mask[freq_to_filter,...] = 0.0 # define here the frequencies to filter
filtered_signal = np.fft.irfft(mask*signal_fft,axis=0)
i need to create a set of 100 random 2D points with two requirements.
A: the points must be inside a rectangle with specific dimensions.
B: the points must satisfy a condition; for example, given the coordinates x and y of a certain generated point, x+y<2.
I'm able to generate a set of points inside a rectangle:
xyMin = [xMin, yMin]
xyMax = [xMax, yMax]
data = np.random.uniform(low=xyMin, high=xyMax, size=(100,2))
How can i add the second condition? I could use a while loop, generating one point per loop and checking the condition. If the condition is satisfied, increase the counter and go to the next point until the index is equal to 100. If not, try again in the next loop without increase the index.
Is it possible to achieve the same result using list comprehension?
Here's a faster way than generating pairs one at a time. It just re-generates all pairs which fail the second condition until there are no failures left.
xyMin = 1.1
xyMax = 0.9
data = np.random.uniform(low=xyMin, high=xyMax, size=(100,2))
while True:
failures = data.sum(axis=1)>=2
n = failures.sum()
if n>0:
data[failures] = np.random.uniform(low=xyMin, high=xyMax, size=(n,2))
else:
break
That said, study this question from stackexchange mathematics. There's going to be a much better way. You can generate points in the triangle x+y<2 like this:
A = np.array([0,0])
B = np.array([2,0])
C = np.array([0,2])
r1,r2 = np.random.random(size=(2,100,1))
points = (1-np.sqrt(r1))*A + (np.sqrt(r1)*(1-r2))*B + r2*np.sqrt(r1)*C
Here is a sample answer using some guess values
import numpy as np
import matplotlib.pyplot as plt
xyMin = [0, 0]
xyMax = [3, 3]
data = np.random.uniform(low=xyMin, high=xyMax, size=(10000,2))
mask = (np.sum(data, axis=1)<2)
data = data[mask]
plt.scatter(data[:, 0], data[:,1])
plt.show()
This is in astronomy, but I think my question is probably very elementary - I'm not very experienced, I apologise.
I am plotting the relationship between the colour of a star-forming galaxy (y axis) with the redshift (x axis). The plot is a line that rises up from around 0 up to maybe 9, then decays again to about -2. The peak (~9 colour) is around 4 in terms of redshift, and I want to find the peak is more exactly. The redshift is given by quite a confusing function, and I can't figure out how to differentiate it or else I would just do that.
Could I maybe differentiate the complicated redshift (z) function? If so, how?
If not, how could I estimate a peak graphically/numerically?
Sorry for the very basic question and thank you very much in advance. My code is below.
import numpy as np
import matplotlib.pyplot as plt
import IGM
import scipy.integrate as integrate
SF = np.load('StarForming.npy')
lam = SF[0]
SED = SF[1]
filters = ['f435w','f606w','f814w','f105w','f125w','f140w','f160w']
filters_wl = {'f435w':0.435,'f606w':0.606,'f814w':0.814,'f105w':1.05,'f125w':1.25,'f140w':1.40,'f160w':1.60} # filter dictionary to give wavelengths of filters in microns
fT = {} # this is a dictionary
for f in filters:
data = np.loadtxt(f+'.txt').T
fT[f]= data
fluxes = {}
for f in filters: fluxes[f] = [] # make empty list for each
redshifts = np.arange(0.0,10.0,0.1) # redshifts going from 0 to 10
for z in redshifts:
lamz = lam * (1. + z)
obsSED = SED * IGM.madau(lamz, z)
for f in filters:
newT = np.interp(lamz,fT[f][0],fT[f][1]) # for each filter, refer back
bb_flux = integrate.trapz((1./lamz)*obsSED*newT,x=lamz)/integrate.trapz((1./lamz)*newT,x=lamz)
# 1st bit integrates, 2nd bit divides by area under filter to normalise filter
# loops over all z, for all z it creates a new SED, redshift wl grid
fluxes[f].append(bb_flux)
for f in filters: fluxes[f] = np.array(fluxes[f])
colour = -2.5*np.log10(fluxes['f435w']/fluxes['f606w'])
plt.plot(redshifts,colour)
plt.xlabel('Redshift')
plt.ylabel('Colour')
plt.show
I do not have high enough reputation to comment, but this may solve your problem, so I guess its answer. Store all your y-coordinates in a list, then use the max(list) function to find the max. If you want an ordered pair, store your coordinates as (y,x) tuples and use max(list)
lst = [(3,2), (4,1), (1, 200)]
max(lst)
yields (4,1)
I have a problem in which a have a bunch of images for which I have to generate histograms. But I have to generate an histogram for each pixel. I.e, for a collection of n images, I have to count the values that the pixel 0,0 assumed and generate an histogram, the same for 0,1, 0,2 and so on. I coded the following method to do this:
class ImageData:
def generate_pixel_histogram(self, images, bins):
"""
Generate a histogram of the image for each pixel, counting
the values assumed for each pixel in a specified bins
"""
max_value = 0.0
min_value = 0.0
for i in range(len(images)):
image = images[i]
max_entry = max(max(p[1:]) for p in image.data)
min_entry = min(min(p[1:]) for p in image.data)
if max_entry > max_value:
max_value = max_entry
if min_entry < min_value:
min_value = min_entry
interval_size = (math.fabs(min_value) + math.fabs(max_value))/bins
for x in range(self.width):
for y in range(self.height):
pixel_histogram = {}
for i in range(bins+1):
key = round(min_value+(i*interval_size), 2)
pixel_histogram[key] = 0.0
for i in range(len(images)):
image = images[i]
value = round(Utils.get_bin(image.data[x][y], interval_size), 2)
pixel_histogram[value] += 1.0/len(images)
self.data[x][y] = pixel_histogram
Where each position of a matrix store a dictionary representing an histogram. But, how I do this for each pixel, and this calculus take a considerable time, this seems to me to be a good problem to be parallelized. But I don't have experience with this and I don't know how to do this.
EDIT:
I tried what #Eelco Hoogendoorn told me and it works perfectly. But applying it to my code, where the data are a large number of images generated with this constructor (after the values are calculated and not just 0 anymore), I just got as h an array of zeros [0 0 0]. What I pass to the histogram method is an array of ImageData.
class ImageData(object):
def __init__(self, width=5, height=5, range_min=-1, range_max=1):
"""
The ImageData constructor
"""
self.width = width
self.height = height
#The values range each pixel can assume
self.range_min = range_min
self.range_max = range_max
self.data = np.arange(width*height).reshape(height, width)
#Another class, just the method here
def generate_pixel_histogram(realizations, bins):
"""
Generate a histogram of the image for each pixel, counting
the values assumed for each pixel in a specified bins
"""
data = np.array([image.data for image in realizations])
min_max_range = data.min(), data.max()+1
bin_boundaries = np.empty(bins+1)
# Function to wrap np.histogram, passing on only the first return value
def hist(pixel):
h, b = np.histogram(pixel, bins=bins, range=min_max_range)
bin_boundaries[:] = b
return h
# Apply this for each pixel
hist_data = np.apply_along_axis(hist, 0, data)
print hist_data
print bin_boundaries
Now I get:
hist_data = np.apply_along_axis(hist, 0, data)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/numpy/lib/shape_base.py", line 104, in apply_along_axis
outshape[axis] = len(res)
TypeError: object of type 'NoneType' has no len()
Any help would be appreciated.
Thanks in advance.
As noted by john, the most obvious solution to this is to look for library functionality that will do this for you. It exists, and it will be orders of magnitude more efficient than what you are doing here.
Standard numpy has a histogram function that can be used for this purpose. If you have only few values per pixel, it will be relatively inefficient; and it creates a dense histogram vector rather than the sparse one you produce here. Still, chances are good the code below solves your problem efficiently.
import numpy as np
#some example data; 128 images of 4x4 pixels
voxeldata = np.random.randint(0,100, (128, 4,4))
#we need to apply the same binning range to each pixel to get sensibble output
globalminmax = voxeldata.min(), voxeldata.max()+1
#number of output bins
bins = 20
bin_boundaries = np.empty(bins+1)
#function to wrap np.histogram, passing on only the first return value
def hist(pixel):
h, b = np.histogram(pixel, bins=bins, range=globalminmax)
bin_boundaries[:] = b #simply overwrite; result should be identical each time
return h
#apply this for each pixel
histdata = np.apply_along_axis(hist, 0, voxeldata)
print bin_boundaries
print histdata[:,0,0] #print the histogram of an arbitrary pixel
But the more general message id like to convey, looking at your code sample and the type of problem you are working on: do yourself a favor, and learn numpy.
Parallelization certainly would not be my first port of call in optimizing this kind of thing. Your main problem is that you're doing lots of looping at the Python level. Python is inherently slow at this kind of thing.
One option would be to learn how to write Cython extensions and write the histogram bit in Cython. This might take you a while.
Actually, taking a histogram of pixel values is a very common task in computer vision and it has already been efficiently implemented in OpenCV (which has python wrappers). There are also several functions for taking histograms in the numpy python package (though they are slower than the OpenCV implementations).
I use matplotlib's method hexbin to compute 2d histograms on my data.
But I would like to get the coordinates of the centers of the hexagons in order to further process the results.
I got the values using get_array() method on the result, but I cannot figure out how to get the bins coordinates.
I tried to compute them given number of bins and the extent of my data but i don't know the exact number of bins in each direction. gridsize=(10,2) should do the trick but it does not seem to work.
Any idea?
I think this works.
from __future__ import division
import numpy as np
import math
import matplotlib.pyplot as plt
def generate_data(n):
"""Make random, correlated x & y arrays"""
points = np.random.multivariate_normal(mean=(0,0),
cov=[[0.4,9],[9,10]],size=int(n))
return points
if __name__ =='__main__':
color_map = plt.cm.Spectral_r
n = 1e4
points = generate_data(n)
xbnds = np.array([-20.0,20.0])
ybnds = np.array([-20.0,20.0])
extent = [xbnds[0],xbnds[1],ybnds[0],ybnds[1]]
fig=plt.figure(figsize=(10,9))
ax = fig.add_subplot(111)
x, y = points.T
# Set gridsize just to make them visually large
image = plt.hexbin(x,y,cmap=color_map,gridsize=20,extent=extent,mincnt=1,bins='log')
# Note that mincnt=1 adds 1 to each count
counts = image.get_array()
ncnts = np.count_nonzero(np.power(10,counts))
verts = image.get_offsets()
for offc in xrange(verts.shape[0]):
binx,biny = verts[offc][0],verts[offc][1]
if counts[offc]:
plt.plot(binx,biny,'k.',zorder=100)
ax.set_xlim(xbnds)
ax.set_ylim(ybnds)
plt.grid(True)
cb = plt.colorbar(image,spacing='uniform',extend='max')
plt.show()
I would love to confirm that the code by Hooked using get_offsets() works, but I tried several iterations of the code mentioned above to retrieve center positions and, as Dave mentioned, get_offsets() remains empty. The workaround that I found is to use the non-empty 'image.get_paths()' option. My code takes the mean to find centers but which means it is just a smidge longer, but it does work.
The get_paths() option returns a set of x,y coordinates embedded that can be looped over and then averaged to return the center position for each hexagram.
The code that I have is as follows:
counts=image.get_array() #counts in each hexagon, works great
verts=image.get_offsets() #empty, don't use this
b=image.get_paths() #this does work, gives Path([[]][]) which can be plotted
for x in xrange(len(b)):
xav=np.mean(b[x].vertices[0:6,0]) #center in x (RA)
yav=np.mean(b[x].vertices[0:6,1]) #center in y (DEC)
plt.plot(xav,yav,'k.',zorder=100)
I had this same problem. I think what needs to be developed is a framework to have a HexagonalGrid object which can then be applied to many different data sets (and it would be awesome to do it for N dimensions). This is possible and it surprises me that neither Scipy or Numpy has anything for it (furthermore there seems to be nothing else like it except perhaps binify)
That said, I assume you want to use hexbinning to compare multiple binned data sets. This requires some common base. I got this to work using matplotlib's hexbin the following way:
import numpy as np
import matplotlib.pyplot as plt
def get_data (mean,cov,n=1e3):
"""
Quick fake data builder
"""
np.random.seed(101)
points = np.random.multivariate_normal(mean=mean,cov=cov,size=int(n))
x, y = points.T
return x,y
def get_centers (hexbin_output):
"""
about 40% faster than previous post only cause you're not calculating the
min/max every time
"""
paths = hexbin_output.get_paths()
v = paths[0].vertices[:-1] # adds a value [0,0] to the end
vx,vy = v.T
idx = [3,0,5,2] # index for [xmin,xmax,ymin,ymax]
xmin,xmax,ymin,ymax = vx[idx[0]],vx[idx[1]],vy[idx[2]],vy[idx[3]]
half_width_x = abs(xmax-xmin)/2.0
half_width_y = abs(ymax-ymin)/2.0
centers = []
for i in xrange(len(paths)):
cx = paths[i].vertices[idx[0],0]+half_width_x
cy = paths[i].vertices[idx[2],1]+half_width_y
centers.append((cx,cy))
return np.asarray(centers)
# important parts ==>
class Hexagonal2DGrid (object):
"""
Used to fix the gridsize, extent, and bins
"""
def __init__ (self,gridsize,extent,bins=None):
self.gridsize = gridsize
self.extent = extent
self.bins = bins
def hexbin (x,y,hexgrid):
"""
To hexagonally bin the data in 2 dimensions
"""
fig = plt.figure()
ax = fig.add_subplot(111)
# Note mincnt=0 so that it will return a value for every point in the
# hexgrid, not just those with count>mincnt
# Basically you fix the gridsize, extent, and bins to keep them the same
# then the resulting count array is the same
hexbin = plt.hexbin(x,y, mincnt=0,
gridsize=hexgrid.gridsize,
extent=hexgrid.extent,
bins=hexgrid.bins)
# you could close the figure if you don't want it
# plt.close(fig.number)
counts = hexbin.get_array().copy()
return counts, hexbin
# Example ===>
if __name__ == "__main__":
hexgrid = Hexagonal2DGrid((21,5),[-70,70,-20,20])
x_data,y_data = get_data((0,0),[[-40,95],[90,10]])
x_model,y_model = get_data((0,10),[[100,30],[3,30]])
counts_data, hexbin_data = hexbin(x_data,y_data,hexgrid)
counts_model, hexbin_model = hexbin(x_model,y_model,hexgrid)
# if you want the centers, they will be the same for both
centers = get_centers(hexbin_data)
# if you want to ignore the cells with zeros then use the following mask.
# But if want zeros for some bins and not others I'm not sure an elegant way
# to do this without using the centers
nonzero = counts_data != 0
# now you can compare the two data sets
variance_data = counts_data[nonzero]
square_diffs = (counts_data[nonzero]-counts_model[nonzero])**2
chi2 = np.sum(square_diffs/variance_data)
print(" chi2={}".format(chi2))