negative dimensions are not allowed in GIS - python

I define coordinates for cities, it worked for one city, but not for others
almaty = df.loc[(df['latitude']>= 43.117971)&(df['latitude'] <= 51.21465 )& (df['longitude']>= 76.3)&(df['longitude']<= 77.017043)]
astana = df.loc[(df['latitude']>=43.67 )&(df['latitude'] <= 49.33 )& (df['longitude']>=71.4 )&(df['longitude']<=71.7 )]
shymkent = df.loc[(df['latitude']>= 43.3006564195014)&(df['latitude'] <=43.393584 )& (df['longitude']>=69.521168 )&(df['longitude']<=69.796007 )]
karaganda = df.loc[(df['latitude']>= 49.180286)&(df['latitude'] <=49.975897 )& (df['longitude']>=73.007664 )&(df['longitude']<=73.216594 )]
aktobe = df.loc[(df['latitude']>=43.157950342428 )&(df['latitude'] <= 43.202757)& (df['longitude']>= 57.037496)&(df['longitude']<= 57.309703)]
aktay = df.loc[(df['latitude']>= 41.091843)&(df['latitude'] <=42.321688)& (df['longitude']>= 51.0684404142448)&(df['longitude']<= 51.242566)]
atyray = df.loc[(df['latitude']>=42.373844 )&(df['latitude'] <=42.878625)& (df['longitude']>= 51.829899)&(df['longitude']<= 51.976558)]
taraz = df.loc[(df['latitude']>=43.638035)&(df['latitude'] <= 43.860407)& (df['longitude']>=71.289942 )&(df['longitude']<=71.383097 )]
oral = df.loc[(df['latitude']>=53.218562)&(df['latitude'] <=53.91340984)& (df['longitude']>= 51.268246)&(df['longitude']<= 51.5)]
kostanay = df.loc[(df['latitude']>=43.226135)&(df['latitude'] <=43.231757)& (df['longitude']>= 63.539098)&(df['longitude']<= 63.682181)]
ust_kam = df.loc[(df['latitude']>= 53.218562)&(df['latitude'] <= 53.91340984)& (df['longitude']>=82.548347 )&(df['longitude']<=82.775019 )]
turkestan = df.loc[(df['latitude']>=43.253377)&(df['latitude'] <=43.26444)& (df['longitude']>=68.193404 )&(df['longitude']<=68.340979 )]
kzyl_orda = df.loc[(df['latitude']>=43.235554 )&(df['latitude'] <=43.241596)& (df['longitude']>= 65.357)&(df['longitude']<=65.5872052968506 )]
semei = df.loc[(df['latitude']>=52.305741)&(df['latitude'] <= 52.910023 )& (df['longitude']>= 80.421254)&(df['longitude']<=81.068463)]
Almaty is working, but others don't.
fig = ff.create_hexbin_mapbox(
data_frame=almaty, lat=almaty.latitude, lon=almaty.longitude,
nx_hexagon=60, opacity=0.5, labels={"color": "Point Count"},
min_count=5, color_continuous_scale="Viridis",
show_original_data=True,zoom=10,
original_data_marker=dict(size=4, opacity=0.6, color="deeppink",
)
)
fig.update_layout(mapbox_style="open-street-map")
fig.show()
Working
fig = ff.create_hexbin_mapbox(
data_frame=astana, lat=astana.latitude, lon=astana.longitude,
nx_hexagon=10, opacity=0.5, labels={"color": "Point Count"},
min_count=5, color_continuous_scale="Viridis",
show_original_data=True,zoom=10,
original_data_marker=dict(size=4, opacity=0.6, color="deeppink",
)
)
fig.update_layout(mapbox_style="open-street-map")
fig.show()
Not working

Related

Add limit orders on side of the chart Plotly

Trying to add limit order on right side that will show where limit orders was places and volume of that limit. if it sell order make it red and if buy green. If volume of limit order is big make line bigger. There is quick example what I'm looking for.
Here is full code that I have with chart data: https://textbin.net/noz678jlue
chartData = {'Price': [0.965879, 0.964773, 0.96447, 0.961223, 0.958788, 0.956747, 0.958788, 0.959872, 0.959868, 0.960104, 0.961375, 0.962256, 0.963297, 0.963315, 0.964611, 0.964513, 0.963761, 0.963763, 0.963922, 0.963907, 0.963857, 0.963265, 0.963293, 0.963171, 0.96318, 0.963707, 0.964389, 0.964352, 0.963077, 0.961785, 0.959572, 0.958703, 0.959223, 0.95819, 0.952994, 0.95124, 0.950693, 0.950906, 0.95184, 0.951838, 1.053997, 1.060501, 1.060672, 1.060486, 1.060171, 1.060241, 1.059292, 1.059263, 1.059205, 0.95906, 0.954787, 0.954901, 0.954993, 0.955447, 0.955465, 0.955626, 0.953638, 0.952751, 0.951972, 0.950729, 0.950532, 0.952849, 0.952773, 0.952682, 0.952351, 0.948383, 0.94847, 0.948451, 0.95198, 0.952234, 0.951982, 0.952163, 0.952301, 0.952407, 0.955843, 0.956628, 0.957734, 0.957548, 0.95771, 0.956813, 0.958674, 0.958295, 0.954697, 0.953861, 0.955926, 0.953264, 0.951443, 0.950245, 0.949453, 0.949492, 0.948764, 0.946932, 0.949487, 0.950302, 0.950381, 0.949979, 0.948601, 0.949252, 0.949217, 0.949271, 0.947859, 0.947683, 0.947763, 0.947593, 0.948247, 0.9483, 0.948568, 0.947236, 0.946515, 0.946128, 0.946793, 0.946244, 0.951683, 0.951324, 0.950662, 0.949001, 0.947648, 0.946191, 0.946928, 0.933038, 0.92239, 0.923197, 0.925719, 0.937193, 0.93354, 0.932933, 0.932073, 0.931954, 0.932393, 0.931602, 0.932908, 0.932966, 0.933866, 0.931223, 0.929834, 0.933195, 0.936534, 0.935959, 0.932762, 0.931187, 0.937434, 0.937664, 0.936378, 0.934742, 0.934742], 'Date': [1652117700000, 1652118000000, 1652118300000, 1652118600000, 1652118900000, 1652119200000, 1652119500000, 1652119800000, 1652120100000, 1652120400000, 1652120700000, 1652121000000, 1652121300000, 1652121600000, 1652121900000, 1652122200000, 1652122500000, 1652122800000, 1652123100000, 1652123400000, 1652123700000, 1652124000000, 1652124300000, 1652124600000, 1652124900000, 1652125200000, 1652125500000, 1652125800000, 1652126100000, 1652126400000, 1652126700000, 1652127000000, 1652127300000, 1652127600000, 1652127900000, 1652128200000, 1652128500000, 1652128800000, 1652129100000, 1652129400000, 1652129700000, 1652130000000, 1652130300000, 1652130600000, 1652130900000, 1652131200000, 1652131500000, 1652131800000, 1652132100000, 1652132400000, 1652132700000, 1652133000000, 1652133300000, 1652133600000, 1652133900000, 1652134200000, 1652134500000, 1652134800000, 1652135100000, 1652135400000, 1652135700000, 1652136000000, 1652136300000, 1652136600000, 1652136900000, 1652137200000, 1652137500000, 1652137800000, 1652138100000, 1652138400000, 1652138700000, 1652139000000, 1652139300000, 1652139600000, 1652139900000, 1652140200000, 1652140500000, 1652140800000, 1652141100000, 1652141400000, 1652141700000, 1652142000000, 1652142300000, 1652142600000, 1652142900000, 1652143200000, 1652143500000, 1652143800000, 1652144100000, 1652144400000, 1652144700000, 1652145000000, 1652145300000, 1652145600000, 1652145900000, 1652146200000, 1652146500000, 1652146800000, 1652147100000, 1652147400000, 1652147700000, 1652148000000, 1652148300000, 1652148600000, 1652148900000, 1652149200000, 1652149500000, 1652149800000, 1652150100000, 1652150400000, 1652150700000, 1652151000000, 1652151300000, 1652151600000, 1652151900000, 1652152200000, 1652152500000, 1652152800000, 1652153100000, 1652153400000, 1652153700000, 1652154000000, 1652154300000, 1652154600000, 1652154900000, 1652155200000, 1652155500000, 1652155800000, 1652156100000, 1652156400000, 1652156700000, 1652157000000, 1652157300000, 1652157600000, 1652157900000, 1652158200000, 1652158500000, 1652158800000, 1652159100000, 1652159400000, 1652159700000, 1652160000000, 1652160300000, 1652160600000, 1652160636000]}
limitOrders = {"BUY":{"0.98": 50000, "0.93": 5555, "0.67": 300000, "0.85": 5555, "0.47": 300000, '0.57': 300000, "0.95": 5555}, "SELL":{"1.00": 50000, "0.83": 5555, "0.67": 300000, "0.75": 5555, "0.57": 300000, '0.67': 300000, "0.85": 5555}}
eastern = pytz.timezone('US/Eastern')
df: DataFrame = pd.DataFrame.from_dict(chatData).fillna(method="backfill")
df['Date'] = pd.to_datetime(df['Date'], unit='ms').dt.tz_localize('UTC').dt.tz_convert(eastern)
x = df['Date']
y = df['Price']
layout = Layout(
autosize=True,
width=1980,
height=1080,
margin=dict(l=10, r=10, t=80, b=10),
title="<b>TEST</b>",
paper_bgcolor='rgb(0.03,0.00,0.07)',
plot_bgcolor='rgb(0.03,0.00,0.07)',
yaxis_tickformat=".3f",
title_x=0.5,
font=dict(
family="Amarante,cursive",
size=25,
color="White")
)
fig = go.Figure([
go.Scatter(x=x, y=1.01 * np.ones_like(y), opacity=0.5, line_width=0, showlegend=False),
go.Scatter(x=x, y=y, fill='tonexty', fillcolor="#240050", line=dict(color="#940099"), line_shape='spline',
opacity=0, showlegend=False)
], layout=layout)
fig.show()
You can use plotly shapes to place down the line segments representing limit orders, and annotations to place down the text with the corresponding volume amount. However, you will need to increase the right margin so the annotations are visible.
import pytz
import numpy as np
import pandas as pd
import plotly.graph_objects as go
chartData = {'Price': [0.965879, 0.964773, 0.96447, 0.961223, 0.958788, 0.956747, 0.958788, 0.959872, 0.959868, 0.960104, 0.961375, 0.962256, 0.963297, 0.963315, 0.964611, 0.964513, 0.963761, 0.963763, 0.963922, 0.963907, 0.963857, 0.963265, 0.963293, 0.963171, 0.96318, 0.963707, 0.964389, 0.964352, 0.963077, 0.961785, 0.959572, 0.958703, 0.959223, 0.95819, 0.952994, 0.95124, 0.950693, 0.950906, 0.95184, 0.951838, 1.053997, 1.060501, 1.060672, 1.060486, 1.060171, 1.060241, 1.059292, 1.059263, 1.059205, 0.95906, 0.954787, 0.954901, 0.954993, 0.955447, 0.955465, 0.955626, 0.953638, 0.952751, 0.951972, 0.950729, 0.950532, 0.952849, 0.952773, 0.952682, 0.952351, 0.948383, 0.94847, 0.948451, 0.95198, 0.952234, 0.951982, 0.952163, 0.952301, 0.952407, 0.955843, 0.956628, 0.957734, 0.957548, 0.95771, 0.956813, 0.958674, 0.958295, 0.954697, 0.953861, 0.955926, 0.953264, 0.951443, 0.950245, 0.949453, 0.949492, 0.948764, 0.946932, 0.949487, 0.950302, 0.950381, 0.949979, 0.948601, 0.949252, 0.949217, 0.949271, 0.947859, 0.947683, 0.947763, 0.947593, 0.948247, 0.9483, 0.948568, 0.947236, 0.946515, 0.946128, 0.946793, 0.946244, 0.951683, 0.951324, 0.950662, 0.949001, 0.947648, 0.946191, 0.946928, 0.933038, 0.92239, 0.923197, 0.925719, 0.937193, 0.93354, 0.932933, 0.932073, 0.931954, 0.932393, 0.931602, 0.932908, 0.932966, 0.933866, 0.931223, 0.929834, 0.933195, 0.936534, 0.935959, 0.932762, 0.931187, 0.937434, 0.937664, 0.936378, 0.934742, 0.934742], 'Date': [1652117700000, 1652118000000, 1652118300000, 1652118600000, 1652118900000, 1652119200000, 1652119500000, 1652119800000, 1652120100000, 1652120400000, 1652120700000, 1652121000000, 1652121300000, 1652121600000, 1652121900000, 1652122200000, 1652122500000, 1652122800000, 1652123100000, 1652123400000, 1652123700000, 1652124000000, 1652124300000, 1652124600000, 1652124900000, 1652125200000, 1652125500000, 1652125800000, 1652126100000, 1652126400000, 1652126700000, 1652127000000, 1652127300000, 1652127600000, 1652127900000, 1652128200000, 1652128500000, 1652128800000, 1652129100000, 1652129400000, 1652129700000, 1652130000000, 1652130300000, 1652130600000, 1652130900000, 1652131200000, 1652131500000, 1652131800000, 1652132100000, 1652132400000, 1652132700000, 1652133000000, 1652133300000, 1652133600000, 1652133900000, 1652134200000, 1652134500000, 1652134800000, 1652135100000, 1652135400000, 1652135700000, 1652136000000, 1652136300000, 1652136600000, 1652136900000, 1652137200000, 1652137500000, 1652137800000, 1652138100000, 1652138400000, 1652138700000, 1652139000000, 1652139300000, 1652139600000, 1652139900000, 1652140200000, 1652140500000, 1652140800000, 1652141100000, 1652141400000, 1652141700000, 1652142000000, 1652142300000, 1652142600000, 1652142900000, 1652143200000, 1652143500000, 1652143800000, 1652144100000, 1652144400000, 1652144700000, 1652145000000, 1652145300000, 1652145600000, 1652145900000, 1652146200000, 1652146500000, 1652146800000, 1652147100000, 1652147400000, 1652147700000, 1652148000000, 1652148300000, 1652148600000, 1652148900000, 1652149200000, 1652149500000, 1652149800000, 1652150100000, 1652150400000, 1652150700000, 1652151000000, 1652151300000, 1652151600000, 1652151900000, 1652152200000, 1652152500000, 1652152800000, 1652153100000, 1652153400000, 1652153700000, 1652154000000, 1652154300000, 1652154600000, 1652154900000, 1652155200000, 1652155500000, 1652155800000, 1652156100000, 1652156400000, 1652156700000, 1652157000000, 1652157300000, 1652157600000, 1652157900000, 1652158200000, 1652158500000, 1652158800000, 1652159100000, 1652159400000, 1652159700000, 1652160000000, 1652160300000, 1652160600000, 1652160636000]}
limitOrders = {"BUY":{"0.98": 50000, "0.93": 5555, "0.67": 300000, "0.85": 5555, "0.47": 300000, '0.57': 300000, "0.95": 5555}, "SELL":{"1.00": 50000, "0.83": 5555, "0.67": 300000, "0.75": 5555, "0.57": 300000, '0.67': 300000, "0.85": 5555}}
eastern = pytz.timezone('US/Eastern')
df = pd.DataFrame.from_dict(chartData).fillna(method="backfill")
df['Date'] = pd.to_datetime(df['Date'], unit='ms').dt.tz_localize('UTC').dt.tz_convert(eastern)
x = df['Date']
y = df['Price']
layout = dict(
autosize=True,
width=1980,
height=1080,
margin=dict(l=10, r=200, t=80, b=10),
title="<b>TEST</b>",
paper_bgcolor='rgb(0.03,0.00,0.07)',
plot_bgcolor='rgb(0.03,0.00,0.07)',
yaxis_tickformat=".3f",
title_x=0.5,
font=dict(
family="Amarante,cursive",
size=25,
color="White")
)
fig = go.Figure([
go.Scatter(x=x, y=1.01 * np.ones_like(y), opacity=0.5, line_width=0, showlegend=False),
go.Scatter(x=x, y=y, fill='tonexty', fillcolor="#240050", line=dict(color="#940099"), line_shape='spline',
opacity=0, showlegend=False)
], layout=layout)
## add limit orders using annotations
## use paper coordinates to determine length in the x direction
max_limit_volume = 500000
max_limit_volume_length = 0.25
for limit_order_name,limit_order_info in limitOrders.items():
if limit_order_name == "BUY":
for y_value, volume in limit_order_info.items():
y_value = float(y_value)
fig.add_shape(type="line",
x0=1, y0=y_value, x1=1-0.1*volume/max_limit_volume, y1=y_value,
line=dict(color="green",width=3)
)
fig.add_annotation(
x=1.05, y=y_value,
yshift=-30, xref="paper",
text=f"${volume}", font=dict(color="white")
)
if limit_order_name == "SELL":
for y_value, volume in limit_order_info.items():
y_value = float(y_value)
fig.add_shape(type="line",
x0=1, y0=y_value, x1=1-0.1*volume/max_limit_volume, y1=y_value,
line=dict(color="red",width=3)
)
fig.add_annotation(
x=1.05, y=y_value,
yshift=-30e, xref="paper",
text=f"${volume}", font=dict(color="white")
)
fig.update_shapes(dict(xref='paper', yref='y'))
fig.show()

How to solve warning message in Gekko due to m.connection?

I am using m.connection to estimate variables initial conditions but I am getting 12 warning messages like:
Moreover, the APM file shows:
I am not sure how to solve these messages.
I am following this explanation "If pos1 or pos2 is not None, the associated var must be a GEKKO variable and the position is the (0-indexed) time-discretized index of the variable" to write m.Connection(var1,var2,pos1=None,pos2=None,node1='end',node2='end').
https://gekko.readthedocs.io/en/latest/quick_start.html#connections
Thanks in advance.
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
import math as math
import pandas as pd
tm1 = [0, 0.0667,0.5,1,4, 22.61667]
mca1 = [5.68, 3.48, 3.24, 3.36, 2.96, 1.96]
tm2 = [0, 0.08333,0.5,1,4.25 , 22.8167]
mca2 = [5.68, 4.20, 4.04, 4.00, 3.76, 2.88]
tm3 = [0,0.08333,0.5,1,4.33 , 22.9500]
mca3 = [5.68, 4.64, 4.52, 4.56, 4.24, 3.72]
tm4 = [0,0.08333,0.5,1,4.0833 , 23.0833]
mca4 =[18.90,15.4,14.3,15.10,13.50, 10.90]
tm5 = [0,0.08333,0.5,1,4.5, 23.2167]
mca5 =[18.90, 15.5, 16.30, 16, 14.70, 13.00]
tm6 = [0,0.08333,0.5,1,4.6667, 23.3333 ]
mca6 = [18.90, 15.8, 11.70,15.5,12, 9.5 ]
df1=pd.DataFrame({'time':tm1,'x1':mca1})
df2=pd.DataFrame({'time':tm2,'x2':mca2})
df3=pd.DataFrame({'time':tm3,'x3':mca3})
df4=pd.DataFrame({'time':tm4,'x4':mca4})
df5=pd.DataFrame({'time':tm5,'x5':mca5})
df6=pd.DataFrame({'time':tm6,'x6':mca6})
df1.set_index('time',inplace=True)
df2.set_index('time',inplace=True)
df3.set_index('time',inplace=True)
df4.set_index('time',inplace=True)
df5.set_index('time',inplace=True)
df6.set_index('time',inplace=True)
#simulation time points
dfx = pd.DataFrame({'time':np.linspace(0,25,101)})
dfx.set_index('time',inplace=True)
#merge dataframes
dfxx=dfx.join(df1,how='outer')
dfxxx=dfxx.join(df2,how='outer')
dfxxxx=dfxxx.join(df3,how='outer')
dfxxxxx=dfxxxx.join(df4,how='outer')
dfxxxxxx=dfxxxxx.join(df5,how='outer')
df=dfxxxxxx.join(df6,how='outer')
# get True (1) or False (0) for measurement
df['meas1']=(df['x1'].values==df['x1'].values).astype(int)
df['meas2']=(df['x2'].values==df['x2'].values).astype(int)
df['meas3']=(df['x3'].values==df['x3'].values).astype(int)
df['meas4']=(df['x4'].values==df['x4'].values).astype(int)
df['meas5']=(df['x5'].values==df['x5'].values).astype(int)
df['meas6']=(df['x6'].values==df['x6'].values).astype(int)
#replace NaN with zeros
df0=df.fillna(value=0)
m = GEKKO()
m.time = df0.index.values
meas1 = m.Param(df0['meas1'].values)
meas2 = m.Param(df0['meas2'].values)
meas3 = m.Param(df0['meas3'].values)
meas4 = m.Param(df0['meas4'].values)
meas5 = m.Param(df0['meas5'].values)
meas6 = m.Param(df0['meas6'].values)
#adjustable Parameters
kf=m.FV(1.3,lb=0.01,ub=10)
ks=m.FV(1.3,lb=0.01,ub=10)
cnf01=m.FV(1.3,lb=0.01,ub=10)
cns01=m.FV(1.3,lb=0.01,ub=10)
#constrains
cnf02=m.FV(value=cnf01*0.5,lb=cnf01*0.5, ub=cnf01*0.5)
cns02=m.FV(value=cns01*0.5,lb=cns01*0.5, ub=cns01*0.5)
cnf03=m.FV(value=cnf01*0.25,lb=cnf01*0.25, ub=cnf01*0.25)
cns03=m.FV(value=cns01*0.25,lb=cns01*0.25, ub=cns01*0.25)
cnf04=m.FV(value=cnf01,lb=cnf01, ub=cnf01)
cns04=m.FV(value=cns01,lb=cns01, ub=cns01)
cnf05=m.FV(value=cnf01*0.5,lb=cnf01*0.5, ub=cnf01*0.5)
cns05=m.FV(value=cns01*0.5,lb=cns01*0.5, ub=cns01*0.5)
cnf06=m.FV(value=cnf01*0.25,lb=cnf01*0.25, ub=cnf01*0.25)
cns06=m.FV(value=cns01*0.25,lb=cns01*0.25, ub=cns01*0.25)
#Variables
c1 = m.Var(value=mca1[0])
c2 = m.Var(value=mca2[0])
c3 = m.Var(value=mca3[0])
c4 = m.Var(value=mca4[0])
c5 = m.Var(value=mca5[0])
c6 = m.Var(value=mca6[0])
cm1 = m.Param(df0['x1'].values)
cm2 = m.Param(df0['x2'].values)
cm3 = m.Param(df0['x3'].values)
cm4 = m.Param(df0['x4'].values)
cm5 = m.Param(df0['x5'].values)
cm6 = m.Param(df0['x6'].values)
m.Minimize((meas1*(c1-cm1)**2)+(meas2*(c2-cm2)**2)\
+(meas3*(c3-cm3)**2)+(meas4*(c4-cm4)**2)\
+(meas5*(c5-cm5)**2)+(meas6*(c6-cm6)**2))
cnf1=m.Var(value=cnf01,fixed_initial=False)
cns1=m.Var(value=cns01,fixed_initial=False)
cnf2=m.Var(value=cnf02,fixed_initial=False)
cns2=m.Var(value=cns02,fixed_initial=False)
cnf3=m.Var(value=cnf03,fixed_initial=False)
cns3=m.Var(value=cns03,fixed_initial=False)
cnf4=m.Var(value=cnf04,fixed_initial=False)
cns4=m.Var(value=cns04,fixed_initial=False)
cnf5=m.Var(value=cnf05,fixed_initial=False)
cns5=m.Var(value=cns05,fixed_initial=False)
cnf6=m.Var(value=cnf06,fixed_initial=False)
cns6=m.Var(value=cns06,fixed_initial=False)
#Equations
t = m.Param(value=m.time)
m.Connection(cnf1,cnf01,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cnf2,cnf02,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cnf3,cnf03,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cnf4,cnf04,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cnf5,cnf05,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cnf6,cnf06,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cns1,cns01,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cns2,cns02,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cns3,cns03,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cns4,cns04,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cns5,cns05,pos1=0,pos2=0,node1=1,node2=1)
m.Connection(cns6,cns06,pos1=0,pos2=0,node1=1,node2=1)
m.Equation(cnf1.dt()==-kf*c1*cnf1)
m.Equation(cns1.dt()==-ks*c1*cns1)
m.Equation(c1.dt()==cnf1.dt()+cns1.dt())
m.Equation(cnf2.dt()==-kf*c2*cnf2)
m.Equation(cns2.dt()==-ks*c2*cns2)
m.Equation(c2.dt()==cnf2.dt()+cns2.dt())
m.Equation(cnf3.dt()==-kf*c3*cnf3)
m.Equation(cns3.dt()==-ks*c3*cns3)
m.Equation(c3.dt()==cnf3.dt()+cns3.dt())
m.Equation(cnf4.dt()==-kf*c4*cnf4)
m.Equation(cns4.dt()==-ks*c4*cns4)
m.Equation(c4.dt()==cnf4.dt()+cns4.dt())
m.Equation(cnf5.dt()==-kf*c5*cnf5)
m.Equation(cns5.dt()==-ks*c5*cns5)
m.Equation(c5.dt()==cnf5.dt()+cns5.dt())
m.Equation(cnf6.dt()==-kf*c6*cnf6)
m.Equation(cns6.dt()==-ks*c6*cns6)
m.Equation(c6.dt()==cnf6.dt()+cns6.dt())
if True:
kf.STATUS=1
ks.STATUS=1
cnf01.STATUS=1
cns01.STATUS=1
cnf02.STATUS=1
cns02.STATUS=1
cnf03.STATUS=1
cns03.STATUS=1
cnf04.STATUS=1
cns04.STATUS=1
cnf05.STATUS=1
cns05.STATUS=1
cnf06.STATUS=1
cns06.STATUS=1
#Options
m.options.SOLVER = 1 #IPOPT solver
m.options.IMODE = 5 #Dynamic Simultaneous - estimation = MHE
m.options.EV_TYPE = 2 #absolute error
m.options.NODES = 3 #collocation nodes (2,5)
m.solve(disp=True)
m.open_folder()
print('Final SSE Objective: ' + str(m.options.objfcnval))
print('Solution')
print('cnf01 = ' + str(cnf01.value[0]))
print('cns01 = ' + str(cns01.value[0]))
print('kf = ' + str(kf.value[0]))
print('ks = ' + str(ks.value[0]))
print('cns02 = '+ str(cns02.value[0]))
print('cnf02 = '+ str(cnf02.value[0]))
print('cns03 = '+ str(cns03.value[0]))
print('cnf03 = '+ str(cnf03.value[0]))
print('cns04 = '+ str(cns04.value[0]))
print('cnf04 = '+ str(cnf04.value[0]))
print('cns05 = '+ str(cns05.value[0]))
print('cnf05 = '+ str(cnf05.value[0]))
print('cns06 = '+ str(cns06.value[0]))
print('cnf06 = '+ str(cnf06.value[0]))
plt.figure(1,figsize=(8,5))
plt.plot(m.time,c1.value,'r',label='Predicted c1')
plt.plot(m.time,c2.value,'y',label='Predicted c2')
plt.plot(m.time,c3.value,'c',label='Predicted c3')
plt.plot(m.time,c4.value,'g',label='Predicted c4')
plt.plot(m.time,c5.value,'b',label='Predicted c5')
plt.plot(m.time,c6.value,'m',label='Predicted c6')
plt.plot(tm1,mca1,'rx',label='Meas c1')
plt.plot(tm2,mca2,'yx',label='Meas c2')
plt.plot(tm3,mca3,'cx',label='Meas c3')
plt.plot(tm4,mca4,'go',label='Meas c4')
plt.plot(tm5,mca5,'bo',label='Meas c5')
plt.plot(tm6,mca6,'mo',label='Meas c6')
plt.xlabel('time (h)')
plt.ylabel('Concentration (mgCl2/L)')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
The underlying node structure has a 1-index instead of a 0-index that is common in Python. Using pos1=1 and pos2=1 resolves the warnings.
m.Connection(cnf1,cnf01,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf2,cnf02,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf3,cnf03,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf4,cnf04,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf5,cnf05,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf6,cnf06,pos1=1,pos2=1,node1=1,node2=1)
Another issue is that Gekko variables shouldn't generally be used to initialize other values. I recommend setting x0=1.3 and using that float to initialize the variables. Change m.Var() to m.SV() to avoid reclassification of m.Var() as an m.FV() during the connection. The m.SV() is a promoted type of variable that is at the same level of precedence as the m.FV(). Here is a complete script although the results don't look optimal.
from gekko import GEKKO
import numpy as np
import matplotlib.pyplot as plt
import math as math
import pandas as pd
tm1 = [0, 0.0667,0.5,1,4, 22.61667]
mca1 = [5.68, 3.48, 3.24, 3.36, 2.96, 1.96]
tm2 = [0, 0.08333,0.5,1,4.25 , 22.8167]
mca2 = [5.68, 4.20, 4.04, 4.00, 3.76, 2.88]
tm3 = [0,0.08333,0.5,1,4.33 , 22.9500]
mca3 = [5.68, 4.64, 4.52, 4.56, 4.24, 3.72]
tm4 = [0,0.08333,0.5,1,4.0833 , 23.0833]
mca4 =[18.90,15.4,14.3,15.10,13.50, 10.90]
tm5 = [0,0.08333,0.5,1,4.5, 23.2167]
mca5 =[18.90, 15.5, 16.30, 16, 14.70, 13.00]
tm6 = [0,0.08333,0.5,1,4.6667, 23.3333 ]
mca6 = [18.90, 15.8, 11.70,15.5,12, 9.5 ]
df1=pd.DataFrame({'time':tm1,'x1':mca1})
df2=pd.DataFrame({'time':tm2,'x2':mca2})
df3=pd.DataFrame({'time':tm3,'x3':mca3})
df4=pd.DataFrame({'time':tm4,'x4':mca4})
df5=pd.DataFrame({'time':tm5,'x5':mca5})
df6=pd.DataFrame({'time':tm6,'x6':mca6})
df1.set_index('time',inplace=True)
df2.set_index('time',inplace=True)
df3.set_index('time',inplace=True)
df4.set_index('time',inplace=True)
df5.set_index('time',inplace=True)
df6.set_index('time',inplace=True)
#simulation time points
dfx = pd.DataFrame({'time':np.linspace(0,25,101)})
dfx.set_index('time',inplace=True)
#merge dataframes
dfxx=dfx.join(df1,how='outer')
dfxxx=dfxx.join(df2,how='outer')
dfxxxx=dfxxx.join(df3,how='outer')
dfxxxxx=dfxxxx.join(df4,how='outer')
dfxxxxxx=dfxxxxx.join(df5,how='outer')
df=dfxxxxxx.join(df6,how='outer')
# get True (1) or False (0) for measurement
df['meas1']=(df['x1'].values==df['x1'].values).astype(int)
df['meas2']=(df['x2'].values==df['x2'].values).astype(int)
df['meas3']=(df['x3'].values==df['x3'].values).astype(int)
df['meas4']=(df['x4'].values==df['x4'].values).astype(int)
df['meas5']=(df['x5'].values==df['x5'].values).astype(int)
df['meas6']=(df['x6'].values==df['x6'].values).astype(int)
#replace NaN with zeros
df0=df.fillna(value=0)
m = GEKKO()
m.time = df0.index.values
meas1 = m.Param(df0['meas1'].values)
meas2 = m.Param(df0['meas2'].values)
meas3 = m.Param(df0['meas3'].values)
meas4 = m.Param(df0['meas4'].values)
meas5 = m.Param(df0['meas5'].values)
meas6 = m.Param(df0['meas6'].values)
#adjustable Parameters
kf=m.FV(1.3,lb=0.01,ub=10)
ks=m.FV(1.3,lb=0.01,ub=10)
x0 = 1.3
cnf01=m.FV(x0,lb=0.01,ub=10)
cns01=m.FV(x0,lb=0.01,ub=10)
#constrains
cnf02=m.FV(value=x0*0.5,lb=x0*0.5, ub=x0*0.5)
cns02=m.FV(value=x0*0.5,lb=x0*0.5, ub=x0*0.5)
cnf03=m.FV(value=x0*0.25,lb=x0*0.25, ub=x0*0.25)
cns03=m.FV(value=x0*0.25,lb=x0*0.25, ub=x0*0.25)
cnf04=m.FV(value=x0,lb=x0, ub=x0)
cns04=m.FV(value=x0,lb=x0, ub=x0)
cnf05=m.FV(value=x0*0.5,lb=x0*0.5, ub=x0*0.5)
cns05=m.FV(value=x0*0.5,lb=x0*0.5, ub=x0*0.5)
cnf06=m.FV(value=x0*0.25,lb=x0*0.25, ub=x0*0.25)
cns06=m.FV(value=x0*0.25,lb=x0*0.25, ub=x0*0.25)
#Variables
c1 = m.SV(value=mca1[0])
c2 = m.SV(value=mca2[0])
c3 = m.SV(value=mca3[0])
c4 = m.SV(value=mca4[0])
c5 = m.SV(value=mca5[0])
c6 = m.SV(value=mca6[0])
cm1 = m.Param(df0['x1'].values)
cm2 = m.Param(df0['x2'].values)
cm3 = m.Param(df0['x3'].values)
cm4 = m.Param(df0['x4'].values)
cm5 = m.Param(df0['x5'].values)
cm6 = m.Param(df0['x6'].values)
m.Minimize((meas1*(c1-cm1)**2)+(meas2*(c2-cm2)**2)\
+(meas3*(c3-cm3)**2)+(meas4*(c4-cm4)**2)\
+(meas5*(c5-cm5)**2)+(meas6*(c6-cm6)**2))
cnf1=m.SV(value=x0,fixed_initial=False)
cns1=m.SV(value=x0,fixed_initial=False)
cnf2=m.SV(value=x0,fixed_initial=False)
cns2=m.SV(value=x0,fixed_initial=False)
cnf3=m.SV(value=x0,fixed_initial=False)
cns3=m.SV(value=x0,fixed_initial=False)
cnf4=m.SV(value=x0,fixed_initial=False)
cns4=m.SV(value=x0,fixed_initial=False)
cnf5=m.SV(value=x0,fixed_initial=False)
cns5=m.SV(value=x0,fixed_initial=False)
cnf6=m.SV(value=x0,fixed_initial=False)
cns6=m.SV(value=x0,fixed_initial=False)
#Equations
t = m.Param(value=m.time)
m.Connection(cnf1,cnf01,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf2,cnf02,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf3,cnf03,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf4,cnf04,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf5,cnf05,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cnf6,cnf06,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cns1,cns01,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cns2,cns02,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cns3,cns03,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cns4,cns04,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cns5,cns05,pos1=1,pos2=1,node1=1,node2=1)
m.Connection(cns6,cns06,pos1=1,pos2=1,node1=1,node2=1)
m.Equation(cnf1.dt()==-kf*c1*cnf1)
m.Equation(cns1.dt()==-ks*c1*cns1)
m.Equation(c1.dt()==cnf1.dt()+cns1.dt())
m.Equation(cnf2.dt()==-kf*c2*cnf2)
m.Equation(cns2.dt()==-ks*c2*cns2)
m.Equation(c2.dt()==cnf2.dt()+cns2.dt())
m.Equation(cnf3.dt()==-kf*c3*cnf3)
m.Equation(cns3.dt()==-ks*c3*cns3)
m.Equation(c3.dt()==cnf3.dt()+cns3.dt())
m.Equation(cnf4.dt()==-kf*c4*cnf4)
m.Equation(cns4.dt()==-ks*c4*cns4)
m.Equation(c4.dt()==cnf4.dt()+cns4.dt())
m.Equation(cnf5.dt()==-kf*c5*cnf5)
m.Equation(cns5.dt()==-ks*c5*cns5)
m.Equation(c5.dt()==cnf5.dt()+cns5.dt())
m.Equation(cnf6.dt()==-kf*c6*cnf6)
m.Equation(cns6.dt()==-ks*c6*cns6)
m.Equation(c6.dt()==cnf6.dt()+cns6.dt())
#Options
m.options.SOLVER = 1 # APOPT solver
m.options.IMODE = 5 # Dynamic Simultaneous - estimation = MHE
m.options.EV_TYPE = 2 # Squared error
m.options.NODES = 3 # Collocation nodes (2,5)
if True:
kf.STATUS=1
ks.STATUS=1
cnf01.STATUS=1
cns01.STATUS=1
cnf02.STATUS=1
cns02.STATUS=1
cnf03.STATUS=1
cns03.STATUS=1
cnf04.STATUS=1
cns04.STATUS=1
cnf05.STATUS=1
cns05.STATUS=1
cnf06.STATUS=1
cns06.STATUS=1
m.options.TIME_SHIFT = 0
try:
m.solve(disp=True)
except:
print("don't stop when not finding cnf01...cnf06")
#m.open_folder()
print('Final SSE Objective: ' + str(m.options.objfcnval))
print('Solution')
print('cnf01 = ' + str(cnf1.value[0]))
print('cns01 = ' + str(cns1.value[0]))
print('kf = ' + str(kf.value[0]))
print('ks = ' + str(ks.value[0]))
print('cns02 = '+ str(cns2.value[0]))
print('cnf02 = '+ str(cnf2.value[0]))
print('cns03 = '+ str(cns3.value[0]))
print('cnf03 = '+ str(cnf3.value[0]))
print('cns04 = '+ str(cns4.value[0]))
print('cnf04 = '+ str(cnf4.value[0]))
print('cns05 = '+ str(cns5.value[0]))
print('cnf05 = '+ str(cnf5.value[0]))
print('cns06 = '+ str(cns6.value[0]))
print('cnf06 = '+ str(cnf6.value[0]))
plt.figure(1,figsize=(8,5))
plt.plot(m.time,c1.value,'r',label='Predicted c1')
plt.plot(m.time,c2.value,'y',label='Predicted c2')
plt.plot(m.time,c3.value,'c',label='Predicted c3')
plt.plot(m.time,c4.value,'g',label='Predicted c4')
plt.plot(m.time,c5.value,'b',label='Predicted c5')
plt.plot(m.time,c6.value,'m',label='Predicted c6')
plt.plot(tm1,mca1,'rx',label='Meas c1')
plt.plot(tm2,mca2,'yx',label='Meas c2')
plt.plot(tm3,mca3,'cx',label='Meas c3')
plt.plot(tm4,mca4,'go',label='Meas c4')
plt.plot(tm5,mca5,'bo',label='Meas c5')
plt.plot(tm6,mca6,'mo',label='Meas c6')
plt.xlabel('time (h)')
plt.ylabel('Concentration (mgCl2/L)')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
plt.show()

heroku app not running even after deployment

After typing heroku logs --tail --app "app_name"
I find the above error
NO Such Table
My database is working fine and its error-free then to it is showing this error that there is no such table
import pandas as pd
import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import sqlalchemy as sa
from datetime import date, timedelta
import sqlite3
import flask
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
con = sqlite3.connect('C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db', check_same_thread=False)
df = pd.read_sql_query('SELECT * FROM ABCC1;', con)
dataaa = df
### SQL Engine
disk_engine = sa.create_engine("sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db")
connection = disk_engine.connect()
metadata = sa.MetaData()
SQL_table = sa.Table(
"ABCC1",
metadata,
sa.Column("Site", sa.VARCHAR),
sa.Column("Last_Greased_Date:YYYY-MM-DD", sa.TEXT),
sa.Column("Department", sa.VARCHAR),
sa.Column("Equipment_ID", sa.VARCHAR),
sa.Column("Equipment_Name", sa.VARCHAR),
sa.Column("HAC_Code", sa.VARCHAR),
sa.Column("Frequency_Schedule_Days", sa.INTEGER),
sa.Column("NEXT_Date:YYYY-MM-DD", sa.TEXT),
sa.Column("Grease_Grade", sa.VARCHAR),
sa.Column("Point", sa.INTEGER),
sa.Column("Stroke", sa.INTEGER),
sa.Column("Grease_Gun_No(gm_per_stroke)", sa.VARCHAR),
sa.Column("Quantity_Grease_In_Grams(in_one_stroke)", sa.FLOAT),
sa.Column("Total_Quantity_Grease_Used(in_gms)", sa.FLOAT),
sa.Column("Name_Of_Technicians", sa.TEXT),
sa.Column("Remarks", sa.VARCHAR),
)
disk_engine = sa.create_engine(
"sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db",
connect_args={"check_same_thread": False})
connection = disk_engine.connect()
metadata = sa.MetaData()
SQL_TABLE = sa.Table(
"Scheduler",
metadata,
sa.Column("Department", sa.VARCHAR),
sa.Column("EqName", sa.VARCHAR),
sa.Column("EqId", sa.VARCHAR),
sa.Column("GreaseGrade", sa.VARCHAR),
sa.Column("Point", sa.INTEGER),
sa.Column("Stroke", sa.INTEGER),
sa.Column("gmperstroke", sa.FLOAT),
sa.Column("TotalGreaseused(ingms.)", sa.FLOAT),
sa.Column('GreaseNippleStatus', sa.TEXT),
sa.Column("Schedule.freqDays", sa.INTEGER),
sa.Column("AttendeBy", sa.VARCHAR),
sa.Column("Remark/anyabnormalitiesfound", sa.VARCHAR),
)
dss = pd.read_sql_query('SELECT * FROM Scheduler;', con)
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, )
server = flask.Flask("C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Database")
def serve_layout():
layout = html.Div([
html.H4("Next Greasing Date"),
html.H4("Upload"),
dcc.Upload(
id="upload-data",
children=html.Div(
["Drag and drop or click to select a file to upload."]
),
multiple=True,
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
}
),
html.Div(id='output-of-upload'),
html.P([
html.Label('Choose a Department:', style={'fontSize': 18}),
dcc.Dropdown(
id='dept_input',
options=[{'label': i, 'value': i} for i in dataa['Department'].unique()],
style={'height': '30px', 'width': '300px'}
)], className="three columns"),
html.P([
html.Label('Choose Date:Day-Month-Year', style={'fontSize': 18}),
dcc.DatePickerSingle(
id="single",
month_format='MMMM Y',
placeholder='DD-MM-YYYY',
with_portal=True,
clearable=True,
display_format="DD-MM-YYYY",
)], ),
html.P([
html.Label('Next Greasing Dates', style={'fontSize': 18}),
html.Div([
dash_table.DataTable(
id='next_greasing_dates', sort_action="native", sort_mode="multi",
columns=[{"name": i, "id": i} for i in ['Site', 'Last_Greased_Date:YYYY-MM-DD', 'Department',
'Equipment_ID', 'Equipment_Name',
'HAC_Code', 'Frequency_Schedule_Days',
'NEXT_Date:YYYY-MM-DD', 'Grease_Grade', 'Point', 'Stroke',
'Grease_Gun(gm_per_stroke)',
'Quantity_Grease_In_Grams(in_one_stroke)',
'Total_Quantity_Grease_Used(in_gms)',
'Name_Of_Technicians', 'Remarks']],
export_format='csv',
export_columns="all",
export_headers='display',
merge_duplicate_headers=True,
style_cell={'textAlign': 'left'},
style_cell_conditional=[
{'if': {'column_id': 'Equipment_ID'},
'textAlign': 'center'},
{'if': {'column_id': 'HAC_CODE'},
'textAlign': 'center'},
{'if': {'column_id': 'Last_Greased_Date:YYYY-MM-DD'},
'textAlign': 'center'},
{'if': {'column_id': 'Frequency_Schedule_Days'},
'textAlign': 'center'},
{'if': {'column_id': 'NEXT_Date:YYYY-MM-DD'},
'textAlign': 'center'},
{'if': {'column_id': 'Point'},
'textAlign': 'center'},
{'if': {'column_id': 'Quantity_Grease_In_Grams(in_one_stroke)'},
'textAlign': 'center'},
{'if': {'column_id': 'Stroke'},
'textAlign': 'center'},
{'if': {'column_id': 'Date:Year-Month-Date'},
'width': '40%'},
{'if': {'column_id': 'NEXT_Date:YYYY-MM-DD'},
'width': '40%'}],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'},
style_table={'overflowX': 'scroll'}, )
])
]),
]
)
return layout
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
dg = pd.read_csv(io.StringIO(decoded.decode('utf-8'))).to_sql('ABCC1', con, if_exists='append', index=False)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
dg = pd.read_excel(io.BytesIO(decoded)).to_sql('ABCC1', con, if_exists='append', index=False)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return html.Div([
html.H5(filename),
html.H6(datetime.datetime.fromtimestamp(date))
])
# adding one column in dataa by setting its value to 0
dataaa['freq'] = 0
dataaa['grade'] = 0
dataaa['qty'] = 0
dataaa['stroke'] = 0
dataaa['point'] = 0
dataaa['man'] = 0
dataaa['depty'] = 0
# converting date object to date time format
dataaa['Last_Greased_Date:YYYY-MM-DD'] = pd.to_datetime(dataaa['Last_Greased_Date:YYYY-MM-DD'])
dataaa['Last_Greased_Date:YYYY-MM-DD'].dt.strftime("%Y-%m-%d")
dataa = dataaa
dataa['date'] = pd.to_datetime(dataa['Last_Greased_Date:YYYY-MM-DD'])
# checking eq id and eq name in scheduler dataset and then filling the respective new columns
for i in range(0, len(dss)):
m = dss['EqName'][i]
n = dss['EqId'][i]
if (dataa['Equipment_ID'] == n).any():
dataa['freq'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Schedule.freqDays'][i]
dataa['depty'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Department'][i]
dataa['grade'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['GreaseGrade'][i]
dataa['point'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Point'][i]
dataa['qty'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['TotalGreaseused(ingms.)'][i]
dataa['stroke'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Stroke'][i]
dataa['man'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['AttendeBy'][i]
else:
dataa['freq'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Schedule.freqDays'][i]
dataa['depty'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Department'][i]
dataa['grade'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['GreaseGrade'][i]
dataa['point'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Point'][i]
dataa['qty'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['TotalGreaseused(ingms.)'][i]
dataa['stroke'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Stroke'][i]
dataa['man'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['AttendeBy'][i]
# Sorting date in decending order inorder to get last greasing date of every equipment
dataa['Last_Greased_Date:YYYY-MM-DD'] = dataa['Last_Greased_Date:YYYY-MM-DD'].astype(str)
d = dataa.sort_values("Last_Greased_Date:YYYY-MM-DD", ascending=False)
d.reset_index(inplace=True)
# droping the duplicates of eqid and eqname and keeping them first inorder to get the last graesing dates of equipments
di = d.drop_duplicates(["Equipment_Name", "HAC_Code"], keep='first')
di = d.drop_duplicates(["Equipment_ID", "Equipment_Name"], keep='first')
di.drop('index', axis=1, inplace=True)
di.reset_index(inplace=True)
app.layout = serve_layout
#app.callback(Output('next_greasing_dates', 'data'),
[Input('single', 'date'),
Input('dept_input', 'value')])
def ngrease_table(datee, dept):
data = di[di['Department'] == dept]
data1 = data[['Site', 'Last_Greased_Date:YYYY-MM-DD', 'Department', 'Equipment_ID', 'Equipment_Name',
'HAC_Code', 'Frequency_Schedule_Days', 'NEXT_Date:YYYY-MM-DD',
'Grease_Grade', 'Point', 'Stroke', 'Grease_Gun_No(gm_per_stroke)',
'Quantity_Grease_In_Grams(in_one_stroke)', 'Total_Quantity_Grease_Used(in_gms)',
'Name_Of_Technicians', 'Remarks', 'freq', 'date', "grade", 'point', "qty", "stroke", "man"]]
data1.reset_index(inplace=True)
data1.drop('index', axis=1, inplace=True)
yl = int(
datee.split('-')[0]) # splitting the end date in year month and date and putting it in separate index 0,1,2
ml = int(datee.split('-')[1])
dl = int(datee.split('-')[2])
df2 = pd.DataFrame()
for i in range(0, len(data1)):
# splitting last date in year month and date and putting it in separate index 0,1,2
y = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[0])
m = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[1])
d = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[2])
if data1['freq'][i] != 0: # giving freq not = 0 because it gives us error
# subtracting end date with the last date to get number of days in between and then divding it by freq
# to get the quotient so we can come to know that how many times it should get greased
# for example if the diff betwen dates are 30 and the and the freq we get is 10 when diveded we get 3days
# that means 3 times we can apply greasing between to selected dates
p = (date(year=yl, month=ml, day=dl) - date(year=y, month=m, day=d)) // data1['freq'][i]
# making new data frame changing column names adding new column NEXT_Date
# which is initially 0 and then multiplying p.days to it ie. 3days will get
# multiplied to it and number of rows will be 3
df1 = pd.DataFrame({'Site': [data1['Site'][i]] * p.days,
'Last_Greased_Date:YYYY-MM-DD': [data1['Last_Greased_Date:YYYY-MM-DD'][i]] * p.days,
'Department': [data1['Department'][i]] * p.days,
'Equipment_ID': [data1['Equipment_ID'][i]] * p.days,
'Equipment_Name': [data1['Equipment_Name'][i]] * p.days,
'HAC_Code': [data1['HAC_Code'][i]] * p.days,
'Frequency_Schedule_Days': [data1['freq'][i]] * p.days,
'NEXT_Date:YYYY-MM-DD': [0] * p.days,
"Grease_Grade": [data1["grade"][i]] * p.days,
"Point": [data1["point"][i]] * p.days,
"Stroke": [data1["stroke"][i]] * p.days,
"Quantity_Grease_In_Grams(in_one_stroke)": [data1["qty"][i]] * p.days, })
for j in range(0, len(df1)): # it will take lenght according to df1
# now adding date with freq and saving it in NEXT_Date
df1['NEXT_Date:YYYY-MM-DD'][j] = data1.date[i] + (timedelta(days=int(data1['freq'][i] * (j + 1))))
df2 = pd.concat([df1, df2])
# above it wil give all the dates like last date to end date
# therefore, here we have given start date so that it can only display from selected start date to end date
df3 = df2[df2['NEXT_Date:YYYY-MM-DD'] >= datetime.datetime.strptime(datee, '%Y-%m-%d')]
df3.sort_values(by='NEXT_Date:YYYY-MM-DD', inplace=True)
# here the seconds with the date will get cut off
df3['NEXT_Date:YYYY-MM-DD'] = df3['NEXT_Date:YYYY-MM-DD'].apply(lambda x: str(x.date()))
df3.reset_index(inplace=True)
df3.drop('index', axis=1, inplace=True)
con = sa.create_engine('sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db')
df3.to_sql('Next_Datee', con, if_exists='append', index=False)
return df3.to_dict('records')
#app.callback(Output('output-of-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = [
parse_contents(contents, filename, date) for contents, filename, date in
zip(list_of_contents, list_of_names, list_of_dates)]
return children
if __name__ == "__main__":
app.run_server()
above is my code
Requirement text
Click==7.0
dash==1.9.0
dash-core-components==1.8.0
dash-html-components==1.0.2
dash-renderer==1.2.4
dash-table==4.6.0
Flask==1.1.1
Flask-Compress==1.4.0
Flask-SeaSurf==0.2.2
future==0.18.2
gunicorn==20.0.4
itsdangerous==1.1.0
Jinja2==2.11.1
MarkupSafe==1.1.1
numpy==1.18.1
pandas==1.0.1
plotly==4.5.0
python-dateutil==2.8.1
pytz==2019.3
retrying==1.3.3
six==1.14.0
SQLAlchemy==1.3.13
ua-parser==0.9.0
Werkzeug==1.0.0
This is my gitignore file
venv
*.pyc
.DS_Store
.env
This is my Procfile
web: gunicorn app:server
You hardcoded your sql database:
con = sqlite3.connect('C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db', check_same_thread=False)
disk_engine = sa.create_engine("sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db")
This path is not available on Linux.
You can build a relative path like this:
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../relative/path/to/file/you/want.db')
With .. you can go a folder structure up. os.path.dirname(__file__) returns the path of the folder the python file is currently in.

How to process streaming FX data and calculate live-stream ATR()?

I am trying out algorithmic trading with python, for backtesting.
First I downloaded some tick data, then resampled them as 10 second OHLC-data, and then used ATR-indicator, as a defined function:
df = pd.read_csv( 'gbpusd jan17.csv',
names = ['instrument', 'time', 'bid', 'ask'],
index_col = 1,
parse_dates = True,
nrows = 1000
)
df = df['ask'].resample('10s').ohlc()
n = list( range( 0, len( df.index ) ) ) # I changed index because my indicator
# doesn't work on datetime index
df.index = n
def ATR(df, n): #________________________# Average True Range
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max( df.get_value( i + 1, 'high' ),
df.get_value( i, 'close' )
)
- min( df.get_value( i + 1, 'low' ),
df.get_value( i, 'close' )
)
TR_l.append( TR )
i = i + 1
TR_s = pd.Series( TR_l )
ATR = pd.Series( TR_s.rolling( window = 12,
min_periods = 12,
center = False
).mean(),
name = 'ATR_' + str(n)
)
df = df.join( ATR )
return df
Outputs: head()
instrument bid ask
time
2017-01-02 00:00:01.105 GBP/USD 1.23399 1.23551
2017-01-02 00:00:01.561 GBP/USD 1.23399 1.23551
2017-01-02 00:00:05.122 GBP/USD 1.23399 1.23551
2017-01-02 00:00:05.525 GBP/USD 1.23365 1.23577
2017-01-02 00:00:06.139 GBP/USD 1.23365 1.23577
open high low close ATR_8
\
1.23562 1.23562 1.23562 1.23562 0.000120 0.596718
1.23562 1.23575 1.23548 1.23548 0.000121 0.619445
1.23548 1.23548 1.23541 1.23541 0.000122 0.645532
1.23541 1.23541 1.23541 1.23541 0.000117 0.674178
1.23541 1.23548 1.23541 1.23548 0.000123 0.687229
But, the problem starts when I connect to Oanda API to get streaming rates, the while loop for ATR calculation doesn't seem to work, firstly I thought, it's not working because of, not enough rows of data at the begining, so I made ATR calculation start after certain ticks, still not working.
Can any one help with the while loop, how should I change for the streaming data?

Retrieving results from dictionary of a range

I have a ENTIREMAP which has a mapping of all names and toy prices. What I want to try and do is create a function getResults like below:
#################
def getResults(name, price):
# where name could be 'Laura' and price is 0.02
# then from the ENTIREMAP find the key 'Laura' and if 0.02 is in the range
# of the prices in the map i.e since 0.02 is between (0.0,0.05) then return
# ('PEN', 'BLUE')
prices = [d[name] for d in ENTIRELIST if name in d]
if prices:
print prices[0]
###################
GIRLTOYPRICES = {(0.0,0.05):('PEN', 'BLUE'),
(0.05,0.08):('GLASSES', 'DESIGNER'),
(0.08,0.12):('TOP', 'STRIPY'),
}
BOYTOYPRICES = {(0.0,0.10):('BOOK', 'HARRY POTTER'),
(0.10,0.15):('BLANKET', 'SOFT'),
(0.15,0.40):('GBA', 'GAMES'),
}
GIRLS = ['Laura', 'Samantha']
BOYS = ['Mike','Fred']
GIRLLIST = [{girl: GIRLTOYPRICES} for girl in GIRLS]
BOYLIST = [{boy: BOYTOYPRICES} for boy in BOYS]
ENTIRELIST = GIRLMAP + BOYMAP
print ENTIRELIST
[{'Laura': {(0.0, 0.05): ('PEN', 'BLUE'), (0.08, 0.12): ('TOP', 'STRIPY'), (0.05, 0.08): ('GLASSES', 'DESIGNER')}}, {'Samantha': {(0.0, 0.05): ('PEN', 'BLUE'), (0.08, 0.12): ('TOP', 'STRIPY'), (0.05, 0.08): ('GLASSES', 'DESIGNER')}}, {'Mike': {(0.0, 0.1): ('BOOK', 'HARRY POTTER'), (0.15, 0.4): ('GBA', 'GAMES'), (0.1, 0.15): ('BLANKET', 'SOFT')}}, {'Fred': {(0.0, 0.1): ('BOOK', 'HARRY POTTER'), (0.15, 0.4): ('GBA', 'GAMES'), (0.1, 0.15): ('BLANKET', 'SOFT')}}]
Any help would be appreciated.
Kind of a weird data structure, but:
for person in ENTIRELIST:
person_name, toys = person.items()[0]
if person_name != name: # inverted to reduce nesting
continue
for (price_min, price_max), toy in toys.items():
if price_min <= price < price_max:
return toy
This is simpler (and more effective):
GIRLMAP = {girl: GIRLTOYPRICES for girl in GIRLS}
BOYMAP = {boy: BOYTOYPRICES for boy in BOYS}
ENTIREMAP = dict(GIRLMAP, **BOYMAP)
for (price_min, price_max), toy in ENTIREMAP[name].items():
if price_min <= price < price_max:
return toy

Categories

Resources