Related
I am trying to build a logistic regression model for a dataset consisting of two parameters
x1 and x2, but instead of analyzing just the two of them, I have added their squares as well - x12, x22 and x1· x2.
At the first glance everything looks fine and the error function is decreasing, but whilist drawing the plot of the decision boundary I have noticed, that after circa 500 iterations something strange happens to it.
Here is an animation of the error function as a function of iterations and a respective plot of the decision boundary:
Now,I interpret the decision boundary as a quadratic function
x2=f(x1), where
the relation between both parameters is given like this:
0.5 = θ0 + θ1x1 + θ2x2 + θ3x12 + θ4x1x2
+ θ5x22
Here is the python code I use to do everything:
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from math import log
from matplotlib.animation import FuncAnimation
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def loadData(filepath):
source=""
try:
f = open(filepath, "r")
source = f.read()
f.close()
except IOError:
print("Error while reading file (" + filepath + ")")
return ""
raw_data = source.split("\n")
raw_data = [x.split(",") for x in raw_data if x !=""]
raw_data = np.matrix(raw_data).astype(float)
return (raw_data[:,:np.size(raw_data,1)-1], raw_data[:,np.size(raw_data, 1)-1:])
def standardize(dataset, skipfirst=True):
means = np.amin(dataset, 0)
deviation = np.std(dataset, 0)
if skipfirst:
dataset[:,1:] -= means[:,1:]
dataset[:,1:] /= deviation[:,1:]
return dataset
else:
dataset -= means
dataset /= deviation
return dataset
def error(X, Y, Theta):
"Calculates error values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
sigmo = v_sigm(h_x)
partial_vect = (Y-1).T # np.log(1-sigmo) - Y.T # np.log(sigmo)
return 1/(2*np.size(Y, axis=0))*np.sum(partial_vect)
def gradientStep(X, Y, Theta, LR):
"Returns new theta Values"
v_sigm = np.vectorize(sigmoid)
h_x = X # Theta
modif = -1*LR/np.size(Y, 0)*(h_x-Y)
sums = np.sum(modif.T # X, axis = 0)
return Theta + sums.T
X, Y = loadData("ex2data1.txt")
#add bias to X
X = np.append(np.ones((np.size(X, 0), 1)), X, axis=1)
added_params = [[x[1]**2, x[1]*x[2], x[2]**2] for x in np.array(X)]
X = np.append(X, np.matrix(added_params), axis=1)
#standardize X
X = standardize(X)
#create vector of parameters
Theta=np.zeros((np.size(X, 1), 1))
iterations = 3000
Theta_vals = []
Error_vals = []
for i in range(0, iterations):
Theta_vals.append(np.asarray(Theta).flatten())
Error_vals.append(error(X, Y, Theta))
Theta = gradientStep(X, Y, Theta, 0.07)
#CALCULATING FINISHES HERE
#plot data:
fig = plt.figure()
def_ax = fig.add_subplot(211)
def_ax.set_xlim(np.amin(X[:,1:2]), np.amax(X[:,1:2]))
def_ax.set_ylim(np.amin(X[:,2:3]), np.amax(X[:,2:3]))
err_ax = fig.add_subplot(212)
err_ax.set_ylim(0, error(X, Y, Theta))
err_ax.set_xlim(0, iterations)
positive_X1 = []
positive_X2 = []
negative_X1 = []
negative_X2 = []
for i in range(0, np.size(Y, 0)):
if(Y[i, 0] == 1):
positive_X1.append(X[i, 1])
positive_X2.append(X[i, 2])
else:
negative_X1.append(X[i, 1])
negative_X2.append(X[i, 2])
err_ax.set_ylim(np.amin(Error_vals), np.amax(Error_vals))
def animation(frame):
global Theta_vals, Error_vals, def_ax, err_ax, positive_X1, positive_X2, negative_X1, negative_X2
def_limX = def_ax.get_xlim()
def_limY = def_ax.get_ylim()
err_limX = err_ax.get_xlim()
err_limY = err_ax.get_ylim()
def_ax.clear()
err_ax.clear()
def_ax.set_xlim(def_limX)
def_ax.set_ylim(def_limY)
err_ax.set_xlim(err_limX)
err_ax.set_ylim(err_limY)
def_ax.scatter(positive_X1, positive_X2, marker="^")
def_ax.scatter(negative_X1, negative_X2, marker="o")
Theta = Theta_vals[frame]
res_x = np.linspace(*def_ax.get_xlim(), num=5)
delta_x = [(Theta[4]*x+Theta[2])**2-4*Theta[5]*(Theta[3]*x**2+Theta[1]*x+Theta[0]-0.5) for x in res_x]
delta_x = [np.sqrt(x) if x >= 0 else 0 for x in delta_x]
minb = [-(Theta[4]*x+Theta[2]) for x in res_x]
res_1 = []
res_2 = []
for i in range(0, len(res_x)):
if Theta[5] == 0:
res_1.append(0)
res_2.append(0)
else:
res_1.append((minb[i]+delta_x[i])/(2*Theta[5]))
res_2.append((minb[i]-+delta_x[i])/(2*Theta[5]))
def_ax.plot(res_x, res_1)
def_ax.plot(res_x, res_2)
err_x = np.linspace(0, frame, frame)
err_y = Error_vals[0:frame]
err_ax.plot(err_x, err_y)
anim = FuncAnimation(fig, animation, frames=iterations, interval=3, repeat_delay=2000)
print(error(X, Y, Theta))
anim.save("anim.mp4")
What could be the reason of such a strange behaviour?
My timing shows that k-means consistently loses out on timing, compared to a mixture model, initialized using k-means.
What's the explanation for this? Is the GMM using a different k-means algorithm? Am I misunderstanding how it works? Does it use a differently sized dataset (smaller than I'm drawing from?).
import sklearn.cluster
import sklearn.mixture
import numpy as np
import time
import matplotlib.pyplot as plt
k = 3
N = 100
def clust():
m = sklearn.cluster.KMeans(n_clusters = k)
m.fit(X.reshape(-1, 1))
return m.cluster_centers_
def fit():
m = sklearn.mixture.GaussianMixture(n_components = k, init_params = "kmeans")
m.fit(X.reshape(-1, 1))
return m.means_
duration_clust = []
duration_fit = []
ctrs_clust = []
ctrs_fit = []
for i in range(N):
_1 = np.random.normal(0.25, 0.15, 50)
_2 = np.random.normal(0.50, 0.15, 50)
_3 = np.random.normal(0.75, 0.15, 50)
X = np.concatenate((_1, _2, _3)).reshape(-1, 1)
ts = time.time()
c = clust()
te = time.time()
time_clust = (te - ts) * 1e3
ts = time.time()
f = fit()
te = time.time()
time_fit = (te - ts) * 1e3
duration_clust.append(time_clust)
duration_fit.append(time_fit)
ctrs_clust.append(c)
ctrs_fit.append(f)
bins0 = np.arange(0, 20, 1)
bins1 = np.linspace(0,1,30)
fig, ax = plt.subplots(nrows = 2)
ax[0].hist(duration_clust, label = "Kmeans", bins = bins0, alpha = 0.5)
ax[0].hist(duration_fit, label = "GMM with Kmeans", bins = bins0, alpha = 0.5)
ax[0].set_xlabel("duration (ms)")
ax[0].legend(loc = "upper right")
ax[1].hist(np.ravel(ctrs_clust), label = "Kmeans centers", bins = bins1, alpha = 0.5)
ax[1].hist(np.ravel(ctrs_fit), label = "GMM centers", bins = bins1, alpha = 0.5)
ax[1].set_xlabel("Center location")
ax[1].axvline([0.25], label = "Truth", color = "black")
ax[1].axvline([0.50], color = "black")
ax[1].axvline([0.75], color = "black")
ax[1].legend(loc = "upper right")
plt.tight_layout()
plt.show()
I'm trying to run the following Python code in Matlab:
from numba import autojit
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.integrate import quad
from scipy.special import jv
def make_p(p_max):
p_list = []
for p in range(p_max+1):
temp = []
for u in range(p+1):
for r in range(p+1):
for i in range(p+1):
if u+r+i == p:
temp.append([u,r,i])
p_list.append(temp)
return p_list
#autojit
def p_find(e,Ae,Aho,k,q,p_max,p_list):
bessels_sqr_0 = (jv(0,Ae*k*q/2)*jv(0,Aho*k*q/4)*jv(0,Aho*k*q/4))**2
if bessels_sqr_0 > 0.99:
return 0
else:
for i in range(1,len(p_list)):
bessels_sqr = 0
for triplets in p_list[i]:
n,j,l = triplets[0], triplets[1], triplets[2]
bessels_sqr += (jv(n,Ae*k*q/2)*jv(j,Aho*k*q/4)*jv(l,Aho*k*q/4))**2
if bessels_sqr_0 + bessels_sqr > 0.99:
return i
return p_max
#autojit
def integrand1(x,Dm,O_sqr,G_sqr,Dl,h,Ae,Aho,o_rf,o_ho,k,q):
sum_func = 0
for u in h:
n,j,l = u
sum_func += (O_sqr*(jv(n,Ae*k*q/2)*jv(j,Aho*k*q/4)*jv(l,Aho*k*q/4))**2)/(G_sqr+2*O_sqr*((jv(n,Ae*k*q/2)*jv(j,Aho*k*q/4)*jv(l,Aho*k*q/4))**2)+4*(Dl-o_rf*(n-l+j)-o_ho*(l+j)+Dm*x)**2)
return (x/(np.pi*np.sqrt(1-x**2)))*sum_func
#autojit
def dEdt1(O_sqr,G_sqr,Dl,Dm,hb,h,Ae,Aho,o_rf,o_ho,k,q):
return -hb*np.sqrt(G_sqr)*Dm*quad(integrand1,-1,1,args=(Dm,O_sqr,G_sqr,Dl,h,Ae,Aho,o_rf,o_ho,k,q), epsabs = 1e-35)[0]
#autojit
def integrand2(x,Dm,O_sqr,G_sqr,Dl):
return (x/(np.pi*np.sqrt(1-x**2)))*(O_sqr/(G_sqr+2*O_sqr+4*(Dl+Dm*x)**2))
#autojit
def dEdt2(O_sqr,G_sqr,Dl,Dm,hb):
return -hb*np.sqrt(G_sqr)*Dm*quad(integrand2,-1,1,args=(Dm,O_sqr,G_sqr,Dl), epsabs = 1e-35)[0]
def main():
kb = 1.38064852e-23
hb = 1.054571800e-34
s = 0.5
G_sqr = (2*np.pi*21.5e6)**2
k = 2*np.pi/(422e-9)
O_sqr = s*G_sqr/2
dt = 1e-5
m = 87.9*1.660539040e-27
max_t = 2e-3
tt = np.arange(0,max_t,dt)
Dl = -2*np.pi*10e6
T0 = 1
E_array = np.zeros(len(tt))
E_array[0] = kb*T0
p_max = 5
o_rf = 2*np.pi*26.51e6
a = 0
q = 0.1
o_ho = o_rf/(2*np.sqrt(a+q**2/2))
Ae = 1e-9
p_list = make_p(p_max)
for i in range(len(E_array)-1):
if i%int(len(E_array)/10) == 0:
print(i/(len(E_array)-1)/10)
Aho = np.sqrt(2*E_array[i]/(m*o_ho**2))
p_min = p_find(E_array[i],Ae,Aho,k,q,p_max,p_list)
if p_min > 0:
print(p_min)
for h in p_list[:p_min+1]:
if i < len(E_array):
k1 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*E_array[i]/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
k2 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k1)/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
k3 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k2)/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
k4 = dEdt1(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+dt*k3)/m),hb,h,Ae,Aho,o_rf,o_ho,k,q)
E_array[i+1] = E_array[i]+dt/6*(k1+2*k2+2*k3+k4)
else:
if i < len(E_array):
k1 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*E_array[i]/m),hb)
k2 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k1)/m),hb)
k3 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+(dt/2)*k2)/m),hb)
k4 = dEdt2(O_sqr,G_sqr,Dl,k*np.sqrt(2*(E_array[i]+dt*k3)/m),hb)
E_array[i+1] = E_array[i]+dt/6*(k1+2*k2+2*k3+k4)
return E_array, tt, kb
start = time.time()
E_array, tt, kb = main()
end = time.time()
print('runtime: {:.3f}'.format(end-start))
plt.figure()
plt.title('Temperature vs time')
plt.plot(tt*1e3,E_array/kb, linestyle = '', marker = 'o', color='black')
plt.show()
The problem is, when I try to call py.matcode.main(), I get the following error message: Python Error:
main() missing 14 required positional arguments: 'E_array', 'k', 'm',
'O_sqr', 'G_sqr', 'Dl', 'hb', 'kb', 'TD', 'method', 'p_list', 'o_ho',
'Ae', and 'p_max'
Even though Matlab is showing that Python has a problem running my code, the code runs fine in Python.
Moreover, I am able to call py.matcode.make_p(py.int(5)) which gives the correct result.
Can anyone please help trying to find what I'm doing wrong?
Change the bottom of your code to:
if __name__ == "__main__":
start = time.time()
E_array, tt, kb = main()
end = time.time()
print('runtime: {:.3f}'.format(end-start))
plt.figure()
plt.title('Temperature vs time')
plt.plot(tt*1e3,E_array/kb, linestyle = '', marker = 'o', color='black')
plt.show()
I have been studying the 1D wave equations and making the animation of the equation. But there are some problems when using the anim.save of animation to save the video file. I have already installed ffmpeg on my computer (a Windows machine) and set the environment variables. But it keeps telling me this:
UserWarning: MovieWriter ffmpeg unavailable
...
RuntimeError: Error creating movie, return code: 4 Try running with --verbose-debug
Here is my code:
from numpy import zeros,linspace,sin,pi
import matplotlib.pyplot as mpl
from matplotlib import animation
mpl.rcParams['animation.ffmpeg_path'] = 'C:\\ffmpeg\\bin\\ffmpeg.exe'
def I(x):
return sin(2*x*pi/L)
def f(x,t):
return sin(0.5*x*t)
def solver0(I,f,c,L,n,dt,t):
# f is a function of x and t, I is a function of x
x = linspace(0,L,n+1)
dx = L/float(n)
if dt <= 0:
dt = dx/float(c)
C2 = (c*dt/dx)**2
dt2 = dt*dt
up = zeros(n+1)
u = up.copy()
um = up.copy()
for i in range(0,n):
u[i] = I(x[i])
for i in range(1,n-1):
um[i] = u[i]+0.5*C2*(u[i-1] - 2*u[i] + u[i+1]) + dt2*f(x[i],t)
um[0] = 0
um[n] = 0
while t <= tstop:
t_old = t
t += dt
#update all inner points:
for i in range(1,n-1):
up[i] = -um[i] + 2*u[i] + C2*(u[i-1] - 2*u[i] + u[i+1]) + dt2*f(x[i],t_old)
#insert boundary conditions:
up[0] = 0
up[n] = 0
#update data structures for next step
um = u.copy()
u = up.copy()
return u
c = 1.0
L = 10
n = 100
dt = 0
tstop = 40
fig = mpl.figure()
ax = mpl.axes(xlim=(0,10),ylim=(-1.0,1.0))
line, = ax.plot([],[],lw=2)
def init():
line.set_data([],[])
return line,
def animate(t):
x = linspace(0,L,n+1)
y = solver0(I, f, c, L, n, dt, t)
line.set_data(x,y)
return line,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=20, blit=True)
anim.save('seawave_1d_ani.mp4',writer='ffmpeg',fps=30)
mpl.show()
I believe the problem is in the part of the animation instead of the three functions above. Please help me find what mistake I have made.
I finally worked it out.It seemed that I missed two important lines.
mpl.rcParams['animation.ffmpeg_path'] = 'C:\\ffmpeg\\bin\\ffmpeg.exe' This line is correct.Both '\\' and '/' work in Windows,as far as I've tried.
I add mywriter = animation.FFMpegWriter()at the bottom and modify anim.save('seawave_1d_ani.mp4',writer='ffmpeg',fps=30) by anim.save('seawave_1d_ani.mp4',writer=mywriter,fps=30).It worked at last,thanks to Using FFmpeg and IPython.
Also grateful to those friends who helped me with this problem.
Can anybody help how to optimize the plot function in python? I use Matplotlib to plot financial data.Here small function for plotting OHLC data. The time increase significantly if I add indicators or other data.
import numpy as np
import datetime
from matplotlib.collections import LineCollection
from pylab import *
import urllib2
def test_plot(OHLCV):
bar_width = 1.3
date_offset = 0.5
fig = figure(figsize=(50, 20), facecolor='w')
ax = fig.add_subplot(1, 1, 1)
labels = ax.get_xmajorticklabels()
setp(labels, rotation=0)
month = MonthLocator()
day = DayLocator()
timeFmt = DateFormatter('%Y-%m-%d')
colormap = OHLCV[:,1] < OHLCV[:,4]
color = np.zeros(colormap.__len__(), dtype = np.dtype('|S5'))
color[:] = 'red'
color[np.where(colormap)] = 'green'
dates = date2num( OHLCV[:,0])
lines_hl = LineCollection( zip(zip(dates, OHLCV[:,2]), zip(dates, OHLCV[:,3])))
lines_hl.set_color(color)
lines_hl.set_linewidth(bar_width)
lines_op = LineCollection( zip(zip((np.array(dates) - date_offset).tolist(), OHLCV[:,1]), zip((np.array(dates)).tolist(), parsed_table[:,1])))
lines_op.set_color(color)
lines_op.set_linewidth(bar_width)
lines_cl = LineCollection( zip(zip((np.array(dates) + date_offset).tolist(), OHLCV[:,4]), zip((np.array(dates)).tolist(), parsed_table[:,4])))
lines_cl.set_color(color)
lines_cl.set_linewidth(bar_width)
ax.add_collection(lines_hl, autolim=True)
ax.add_collection(lines_cl, autolim=True)
ax.add_collection(lines_op, autolim=True)
ax.xaxis.set_major_locator(month)
ax.xaxis.set_major_formatter(timeFmt)
ax.xaxis.set_minor_locator(day)
ax.autoscale_view()
ax.xaxis.grid(True, 'major')
ax.grid(True)
ax.set_title('EOD test plot')
ax.set_xlabel('Date')
ax.set_ylabel('Price , $')
fig.savefig('test.png', dpi = 50, bbox_inches='tight')
close()
if __name__=='__main__':
data_table = urllib2.urlopen(r"http://ichart.finance.yahoo.com/table.csv?s=IBM&a=00&b=1&c=2012&d=00&e=15&f=2013&g=d&ignore=.csv").readlines()[1:][::-1]
parsed_table = []
#Format: Date, Open, High, Low, Close, Volume
dtype = (lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(),float, float, float, float, int)
for row in data_table:
field = row.strip().split(',')[:-1]
data_tmp = [i(j) for i,j in zip(dtype, field)]
parsed_table.append(data_tmp)
parsed_table = np.array(parsed_table)
import time
bf = time.time()
count = 100
for i in xrange(count):
test_plot(parsed_table)
print('Plot time: %s' %(time.time() - bf) / count)
The result is something like this. Average time execution on each plot is aproximately 2.6s. Charting in R is much faster, but I didn't measure the performance and I don't want use Rpy, so I bielive that my code is inefficient.
This solution reuses a Figure instance and saves plots asynchronously. You could change this to have as many figures as there are processors, do that many plots asynchronously, and it should speed things up even more. As it is, this takes ~1s per plot, down from 2.6 on my machine.
import numpy as np
import datetime
import urllib2
import time
import multiprocessing as mp
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pylab import *
from matplotlib.collections import LineCollection
class AsyncPlotter():
def __init__(self, processes=mp.cpu_count()):
self.manager = mp.Manager()
self.nc = self.manager.Value('i', 0)
self.pids = []
self.processes = processes
def async_plotter(self, nc, fig, filename, processes):
while nc.value >= processes:
time.sleep(0.1)
nc.value += 1
print "Plotting " + filename
fig.savefig(filename)
plt.close(fig)
nc.value -= 1
def save(self, fig, filename):
p = mp.Process(target=self.async_plotter,
args=(self.nc, fig, filename, self.processes))
p.start()
self.pids.append(p)
def join(self):
for p in self.pids:
p.join()
class FinanceChart():
def __init__(self, async_plotter):
self.async_plotter = async_plotter
self.bar_width = 1.3
self.date_offset = 0.5
self.fig = plt.figure(figsize=(50, 20), facecolor='w')
self.ax = self.fig.add_subplot(1, 1, 1)
self.labels = self.ax.get_xmajorticklabels()
setp(self.labels, rotation=0)
line_hl = LineCollection(([[(734881,1), (734882,5), (734883,9), (734889,5)]]))
line_op = LineCollection(([[(734881,1), (734882,5), (734883,9), (734889,5)]]))
line_cl = LineCollection(([[(734881,1), (734882,5), (734883,9), (734889,5)]]))
self.lines_hl = self.ax.add_collection(line_hl, autolim=True)
self.lines_op = self.ax.add_collection(line_cl, autolim=True)
self.lines_cl = self.ax.add_collection(line_op, autolim=True)
self.ax.set_title('EOD test plot')
self.ax.set_xlabel('Date')
self.ax.set_ylabel('Price , $')
month = MonthLocator()
day = DayLocator()
timeFmt = DateFormatter('%Y-%m-%d')
self.ax.xaxis.set_major_locator(month)
self.ax.xaxis.set_major_formatter(timeFmt)
self.ax.xaxis.set_minor_locator(day)
def test_plot(self, OHLCV, i):
colormap = OHLCV[:,1] < OHLCV[:,4]
color = np.zeros(colormap.__len__(), dtype = np.dtype('|S5'))
color[:] = 'red'
color[np.where(colormap)] = 'green'
dates = date2num( OHLCV[:,0])
date_array = np.array(dates)
xmin = min(dates)
xmax = max(dates)
ymin = min(OHLCV[:,1])
ymax = max(OHLCV[:,1])
self.lines_hl.set_segments( zip(zip(dates, OHLCV[:,2]), zip(dates, OHLCV[:,3])))
self.lines_hl.set_color(color)
self.lines_hl.set_linewidth(self.bar_width)
self.lines_op.set_segments( zip(zip((date_array - self.date_offset).tolist(), OHLCV[:,1]), zip(date_array.tolist(), OHLCV[:,1])))
self.lines_op.set_color(color)
self.lines_op.set_linewidth(self.bar_width)
self.lines_cl.set_segments( zip(zip((date_array + self.date_offset).tolist(), OHLCV[:,4]), zip(date_array.tolist(), OHLCV[:,4])))
self.lines_cl.set_color(color)
self.lines_cl.set_linewidth(self.bar_width)
self.ax.set_xlim(xmin,xmax)
self.ax.set_ylim(ymin,ymax)
self.ax.xaxis.grid(True, 'major')
self.ax.grid(True)
self.async_plotter.save(self.fig, '%04i.png'%i)
if __name__=='__main__':
print "Starting"
data_table = urllib2.urlopen(r"http://ichart.finance.yahoo.com/table.csv?s=IBM&a=00&b=1&c=2012&d=00&e=15&f=2013&g=d&ignore=.csv").readlines()[1:][::-1]
parsed_table = []
#Format: Date, Open, High, Low, Close, Volume
dtype = (lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(),float, float, float, float, int)
for row in data_table:
field = row.strip().split(',')[:-1]
data_tmp = [i(j) for i,j in zip(dtype, field)]
parsed_table.append(data_tmp)
parsed_table = np.array(parsed_table)
import time
bf = time.time()
count = 10
a = AsyncPlotter()
_chart = FinanceChart(a)
print "Done with startup tasks"
for i in xrange(count):
_chart.test_plot(parsed_table, i)
a.join()
print('Plot time: %.2f' %(float(time.time() - bf) / float(count)))