heroku app not running even after deployment - python

After typing heroku logs --tail --app "app_name"
I find the above error
NO Such Table
My database is working fine and its error-free then to it is showing this error that there is no such table
import pandas as pd
import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import sqlalchemy as sa
from datetime import date, timedelta
import sqlite3
import flask
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
con = sqlite3.connect('C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db', check_same_thread=False)
df = pd.read_sql_query('SELECT * FROM ABCC1;', con)
dataaa = df
### SQL Engine
disk_engine = sa.create_engine("sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db")
connection = disk_engine.connect()
metadata = sa.MetaData()
SQL_table = sa.Table(
"ABCC1",
metadata,
sa.Column("Site", sa.VARCHAR),
sa.Column("Last_Greased_Date:YYYY-MM-DD", sa.TEXT),
sa.Column("Department", sa.VARCHAR),
sa.Column("Equipment_ID", sa.VARCHAR),
sa.Column("Equipment_Name", sa.VARCHAR),
sa.Column("HAC_Code", sa.VARCHAR),
sa.Column("Frequency_Schedule_Days", sa.INTEGER),
sa.Column("NEXT_Date:YYYY-MM-DD", sa.TEXT),
sa.Column("Grease_Grade", sa.VARCHAR),
sa.Column("Point", sa.INTEGER),
sa.Column("Stroke", sa.INTEGER),
sa.Column("Grease_Gun_No(gm_per_stroke)", sa.VARCHAR),
sa.Column("Quantity_Grease_In_Grams(in_one_stroke)", sa.FLOAT),
sa.Column("Total_Quantity_Grease_Used(in_gms)", sa.FLOAT),
sa.Column("Name_Of_Technicians", sa.TEXT),
sa.Column("Remarks", sa.VARCHAR),
)
disk_engine = sa.create_engine(
"sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db",
connect_args={"check_same_thread": False})
connection = disk_engine.connect()
metadata = sa.MetaData()
SQL_TABLE = sa.Table(
"Scheduler",
metadata,
sa.Column("Department", sa.VARCHAR),
sa.Column("EqName", sa.VARCHAR),
sa.Column("EqId", sa.VARCHAR),
sa.Column("GreaseGrade", sa.VARCHAR),
sa.Column("Point", sa.INTEGER),
sa.Column("Stroke", sa.INTEGER),
sa.Column("gmperstroke", sa.FLOAT),
sa.Column("TotalGreaseused(ingms.)", sa.FLOAT),
sa.Column('GreaseNippleStatus', sa.TEXT),
sa.Column("Schedule.freqDays", sa.INTEGER),
sa.Column("AttendeBy", sa.VARCHAR),
sa.Column("Remark/anyabnormalitiesfound", sa.VARCHAR),
)
dss = pd.read_sql_query('SELECT * FROM Scheduler;', con)
app = dash.Dash(__name__, external_stylesheets=external_stylesheets, )
server = flask.Flask("C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Database")
def serve_layout():
layout = html.Div([
html.H4("Next Greasing Date"),
html.H4("Upload"),
dcc.Upload(
id="upload-data",
children=html.Div(
["Drag and drop or click to select a file to upload."]
),
multiple=True,
style={
"width": "100%",
"height": "60px",
"lineHeight": "60px",
"borderWidth": "1px",
"borderStyle": "dashed",
"borderRadius": "5px",
"textAlign": "center",
"margin": "10px",
}
),
html.Div(id='output-of-upload'),
html.P([
html.Label('Choose a Department:', style={'fontSize': 18}),
dcc.Dropdown(
id='dept_input',
options=[{'label': i, 'value': i} for i in dataa['Department'].unique()],
style={'height': '30px', 'width': '300px'}
)], className="three columns"),
html.P([
html.Label('Choose Date:Day-Month-Year', style={'fontSize': 18}),
dcc.DatePickerSingle(
id="single",
month_format='MMMM Y',
placeholder='DD-MM-YYYY',
with_portal=True,
clearable=True,
display_format="DD-MM-YYYY",
)], ),
html.P([
html.Label('Next Greasing Dates', style={'fontSize': 18}),
html.Div([
dash_table.DataTable(
id='next_greasing_dates', sort_action="native", sort_mode="multi",
columns=[{"name": i, "id": i} for i in ['Site', 'Last_Greased_Date:YYYY-MM-DD', 'Department',
'Equipment_ID', 'Equipment_Name',
'HAC_Code', 'Frequency_Schedule_Days',
'NEXT_Date:YYYY-MM-DD', 'Grease_Grade', 'Point', 'Stroke',
'Grease_Gun(gm_per_stroke)',
'Quantity_Grease_In_Grams(in_one_stroke)',
'Total_Quantity_Grease_Used(in_gms)',
'Name_Of_Technicians', 'Remarks']],
export_format='csv',
export_columns="all",
export_headers='display',
merge_duplicate_headers=True,
style_cell={'textAlign': 'left'},
style_cell_conditional=[
{'if': {'column_id': 'Equipment_ID'},
'textAlign': 'center'},
{'if': {'column_id': 'HAC_CODE'},
'textAlign': 'center'},
{'if': {'column_id': 'Last_Greased_Date:YYYY-MM-DD'},
'textAlign': 'center'},
{'if': {'column_id': 'Frequency_Schedule_Days'},
'textAlign': 'center'},
{'if': {'column_id': 'NEXT_Date:YYYY-MM-DD'},
'textAlign': 'center'},
{'if': {'column_id': 'Point'},
'textAlign': 'center'},
{'if': {'column_id': 'Quantity_Grease_In_Grams(in_one_stroke)'},
'textAlign': 'center'},
{'if': {'column_id': 'Stroke'},
'textAlign': 'center'},
{'if': {'column_id': 'Date:Year-Month-Date'},
'width': '40%'},
{'if': {'column_id': 'NEXT_Date:YYYY-MM-DD'},
'width': '40%'}],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'},
style_table={'overflowX': 'scroll'}, )
])
]),
]
)
return layout
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
dg = pd.read_csv(io.StringIO(decoded.decode('utf-8'))).to_sql('ABCC1', con, if_exists='append', index=False)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
dg = pd.read_excel(io.BytesIO(decoded)).to_sql('ABCC1', con, if_exists='append', index=False)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return html.Div([
html.H5(filename),
html.H6(datetime.datetime.fromtimestamp(date))
])
# adding one column in dataa by setting its value to 0
dataaa['freq'] = 0
dataaa['grade'] = 0
dataaa['qty'] = 0
dataaa['stroke'] = 0
dataaa['point'] = 0
dataaa['man'] = 0
dataaa['depty'] = 0
# converting date object to date time format
dataaa['Last_Greased_Date:YYYY-MM-DD'] = pd.to_datetime(dataaa['Last_Greased_Date:YYYY-MM-DD'])
dataaa['Last_Greased_Date:YYYY-MM-DD'].dt.strftime("%Y-%m-%d")
dataa = dataaa
dataa['date'] = pd.to_datetime(dataa['Last_Greased_Date:YYYY-MM-DD'])
# checking eq id and eq name in scheduler dataset and then filling the respective new columns
for i in range(0, len(dss)):
m = dss['EqName'][i]
n = dss['EqId'][i]
if (dataa['Equipment_ID'] == n).any():
dataa['freq'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Schedule.freqDays'][i]
dataa['depty'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Department'][i]
dataa['grade'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['GreaseGrade'][i]
dataa['point'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Point'][i]
dataa['qty'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['TotalGreaseused(ingms.)'][i]
dataa['stroke'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['Stroke'][i]
dataa['man'][(dataa['Equipment_Name'] == m) & (dataa['Equipment_ID'] == n)] = dss['AttendeBy'][i]
else:
dataa['freq'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Schedule.freqDays'][i]
dataa['depty'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Department'][i]
dataa['grade'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['GreaseGrade'][i]
dataa['point'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Point'][i]
dataa['qty'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['TotalGreaseused(ingms.)'][i]
dataa['stroke'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['Stroke'][i]
dataa['man'][(dataa['Equipment_Name'] == m) & (dataa['HAC_Code'] == n)] = dss['AttendeBy'][i]
# Sorting date in decending order inorder to get last greasing date of every equipment
dataa['Last_Greased_Date:YYYY-MM-DD'] = dataa['Last_Greased_Date:YYYY-MM-DD'].astype(str)
d = dataa.sort_values("Last_Greased_Date:YYYY-MM-DD", ascending=False)
d.reset_index(inplace=True)
# droping the duplicates of eqid and eqname and keeping them first inorder to get the last graesing dates of equipments
di = d.drop_duplicates(["Equipment_Name", "HAC_Code"], keep='first')
di = d.drop_duplicates(["Equipment_ID", "Equipment_Name"], keep='first')
di.drop('index', axis=1, inplace=True)
di.reset_index(inplace=True)
app.layout = serve_layout
#app.callback(Output('next_greasing_dates', 'data'),
[Input('single', 'date'),
Input('dept_input', 'value')])
def ngrease_table(datee, dept):
data = di[di['Department'] == dept]
data1 = data[['Site', 'Last_Greased_Date:YYYY-MM-DD', 'Department', 'Equipment_ID', 'Equipment_Name',
'HAC_Code', 'Frequency_Schedule_Days', 'NEXT_Date:YYYY-MM-DD',
'Grease_Grade', 'Point', 'Stroke', 'Grease_Gun_No(gm_per_stroke)',
'Quantity_Grease_In_Grams(in_one_stroke)', 'Total_Quantity_Grease_Used(in_gms)',
'Name_Of_Technicians', 'Remarks', 'freq', 'date', "grade", 'point', "qty", "stroke", "man"]]
data1.reset_index(inplace=True)
data1.drop('index', axis=1, inplace=True)
yl = int(
datee.split('-')[0]) # splitting the end date in year month and date and putting it in separate index 0,1,2
ml = int(datee.split('-')[1])
dl = int(datee.split('-')[2])
df2 = pd.DataFrame()
for i in range(0, len(data1)):
# splitting last date in year month and date and putting it in separate index 0,1,2
y = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[0])
m = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[1])
d = int(data1['Last_Greased_Date:YYYY-MM-DD'][i].split('-')[2])
if data1['freq'][i] != 0: # giving freq not = 0 because it gives us error
# subtracting end date with the last date to get number of days in between and then divding it by freq
# to get the quotient so we can come to know that how many times it should get greased
# for example if the diff betwen dates are 30 and the and the freq we get is 10 when diveded we get 3days
# that means 3 times we can apply greasing between to selected dates
p = (date(year=yl, month=ml, day=dl) - date(year=y, month=m, day=d)) // data1['freq'][i]
# making new data frame changing column names adding new column NEXT_Date
# which is initially 0 and then multiplying p.days to it ie. 3days will get
# multiplied to it and number of rows will be 3
df1 = pd.DataFrame({'Site': [data1['Site'][i]] * p.days,
'Last_Greased_Date:YYYY-MM-DD': [data1['Last_Greased_Date:YYYY-MM-DD'][i]] * p.days,
'Department': [data1['Department'][i]] * p.days,
'Equipment_ID': [data1['Equipment_ID'][i]] * p.days,
'Equipment_Name': [data1['Equipment_Name'][i]] * p.days,
'HAC_Code': [data1['HAC_Code'][i]] * p.days,
'Frequency_Schedule_Days': [data1['freq'][i]] * p.days,
'NEXT_Date:YYYY-MM-DD': [0] * p.days,
"Grease_Grade": [data1["grade"][i]] * p.days,
"Point": [data1["point"][i]] * p.days,
"Stroke": [data1["stroke"][i]] * p.days,
"Quantity_Grease_In_Grams(in_one_stroke)": [data1["qty"][i]] * p.days, })
for j in range(0, len(df1)): # it will take lenght according to df1
# now adding date with freq and saving it in NEXT_Date
df1['NEXT_Date:YYYY-MM-DD'][j] = data1.date[i] + (timedelta(days=int(data1['freq'][i] * (j + 1))))
df2 = pd.concat([df1, df2])
# above it wil give all the dates like last date to end date
# therefore, here we have given start date so that it can only display from selected start date to end date
df3 = df2[df2['NEXT_Date:YYYY-MM-DD'] >= datetime.datetime.strptime(datee, '%Y-%m-%d')]
df3.sort_values(by='NEXT_Date:YYYY-MM-DD', inplace=True)
# here the seconds with the date will get cut off
df3['NEXT_Date:YYYY-MM-DD'] = df3['NEXT_Date:YYYY-MM-DD'].apply(lambda x: str(x.date()))
df3.reset_index(inplace=True)
df3.drop('index', axis=1, inplace=True)
con = sa.create_engine('sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db')
df3.to_sql('Next_Datee', con, if_exists='append', index=False)
return df3.to_dict('records')
#app.callback(Output('output-of-upload', 'children'),
[Input('upload-data', 'contents')],
[State('upload-data', 'filename'),
State('upload-data', 'last_modified')])
def update_output(list_of_contents, list_of_names, list_of_dates):
if list_of_contents is not None:
children = [
parse_contents(contents, filename, date) for contents, filename, date in
zip(list_of_contents, list_of_names, list_of_dates)]
return children
if __name__ == "__main__":
app.run_server()
above is my code
Requirement text
Click==7.0
dash==1.9.0
dash-core-components==1.8.0
dash-html-components==1.0.2
dash-renderer==1.2.4
dash-table==4.6.0
Flask==1.1.1
Flask-Compress==1.4.0
Flask-SeaSurf==0.2.2
future==0.18.2
gunicorn==20.0.4
itsdangerous==1.1.0
Jinja2==2.11.1
MarkupSafe==1.1.1
numpy==1.18.1
pandas==1.0.1
plotly==4.5.0
python-dateutil==2.8.1
pytz==2019.3
retrying==1.3.3
six==1.14.0
SQLAlchemy==1.3.13
ua-parser==0.9.0
Werkzeug==1.0.0
This is my gitignore file
venv
*.pyc
.DS_Store
.env
This is my Procfile
web: gunicorn app:server

You hardcoded your sql database:
con = sqlite3.connect('C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db', check_same_thread=False)
disk_engine = sa.create_engine("sqlite:///C:\\Users\\Admin\\Downloads\\Python Scripts\\Next_Gresing_Date\\Grease.db")
This path is not available on Linux.
You can build a relative path like this:
import os
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '../relative/path/to/file/you/want.db')
With .. you can go a folder structure up. os.path.dirname(__file__) returns the path of the folder the python file is currently in.

Related

Date picker component controlling the date of SQL query

I am implementing date picker to my dash web app. When the date is picked I want it to be passed to SQL code so it returns values from the selected date. Although it sounds pretty simple I'm stuck on the second day with it and have no idea how to make it work. In the code below I am trying to return date_value from the function and assign it to the variable datev and then insert it into SQL query inside the class Values. Result is the error pasted just below.
ERROR
File "C:\Users\patryk.suwala\Documents\pythonProject\pythonProject6\data.py", line 28, in
dates, datev = update_output()
File "C:\Users\patryk.suwala\Documents\pythonProject\pythonProject6\venv\lib\site-packages\dash_callback.py", line 143, in add_context
output_spec = kwargs.pop("outputs_list")
KeyError: 'outputs_list'
app.layout = html.Div([
dcc.DatePickerSingle(id='date-picker-single', date=date(1997, 5, 10)),
html.Div(id='output-container-date-picker-single')
])
#app.callback(
Output('output-container-date-picker-single', 'children'),
Input('my-date-picker-single', 'date'))
def update_output(date_value):
if date_value is not None:
date_object = date.fromisoformat(date_value)
date_string = date_object.strftime('%B %d, %Y')
return date_string, date_value # Include date_value in function return
dates, datev = update_output() # Assign date_value to the variable
class Values:
SLGordersAc = f"""SELECT COUNT(slg.id) AS slg_orders_accepted
FROM slg_failure slg
WHERE slg.slg_declined_reason_id = 0
AND slg.created_date = {datev}; """ #Insert variable into SQL query
cursor.execute(SLGordersAc)
resultSLGoA = cursor.fetchall()
[resultSLGoA] = resultSLGoA[0]
SLGo = dftab3.at[3, 'Value']
SLGo_PR = SLGo / OO
dftab3.loc[4, :] = 'SLG Orders %', round(SLGo_PR, 2)
dftab3.loc[5, :] = 'SLG Orders Accepted', resultSLGoA
SLGoA = dftab3.at[5, 'Value']
SLGoA_PR = SLGoA / OO
dftab3.loc[6, :] = 'SLG Orders Accepted %', round(SLGoA_PR, 2)

Trying to add liquidity on uniswap v3 via web3py

I'm trying this code on Ropsten, it but keeps failing:
contract_instance = w3.eth.contract(address="0xC36442b4a4522E871399CD717aBDD847Ab11FE88", abi=liq_ABI)
tx_hash = contract_instance.functions.mint(
(
'0x31F42841c2db5173425b5223809CF3A38FEde360',
'0xc778417E063141139Fce010982780140Aa0cD5Ab',
3000,
49548,
50549,
w3.toWei(0.001,'ether'),
w3.toWei(0.17,'ether'),
w3.toWei(0,'ether'),
w3.toWei(0,'ether'),
wallet_address,
round(time.time()) + 60*20,
)
).buildTransaction({
'from': wallet_address,
'chainId': 3,
'gas': 300000,
'gasPrice': w3.toWei(500, 'gwei'),
'nonce': w3.eth.getTransactionCount(wallet_address),
'value': Web3.toWei(0, 'ether')
})
print(tx_hash)
signed_tx = w3.eth.account.signTransaction(tx_hash, private_key=wallet_key)
tx = w3.eth.sendRawTransaction(signed_tx.rawTransaction)
Failed transaction: https://ropsten.etherscan.io/tx/0xc2f3d6ffff164df331dd4b46fc65dadc5dba8f135f6e13ef1cd383a73a2d0c4b
Web3-Ethereum-Defi Python library has a function called add_liquidity for Uniswap v3 that is probably what you are looking for.
Here is some example code:
fee = 3000
pool1 = deploy_pool(
web3,
deployer,
deployment=uniswap_v3,
token0=weth,
token1=usdc,
fee=fee,
)
pool2 = deploy_pool(
web3,
deployer,
deployment=uniswap_v3,
token0=usdc,
token1=dai,
fee=fee,
)
# add same liquidity amount to both pools as in SDK tests
min_tick, max_tick = get_default_tick_range(fee)
add_liquidity(
web3,
deployer,
deployment=uniswap_v3,
pool=pool1,
amount0=100_000,
amount1=100_000,
lower_tick=min_tick,
upper_tick=max_tick,
)
add_liquidity(
web3,
deployer,
deployment=uniswap_v3,
pool=pool2,
amount0=120_000,
amount1=100_000,
lower_tick=min_tick,
upper_tick=max_tick,
)
price_helper = UniswapV3PriceHelper(uniswap_v3)
# test get_amount_out, based on: https://github.com/Uniswap/v3-sdk/blob/1a74d5f0a31040fec4aeb1f83bba01d7c03f4870/src/entities/trade.test.ts#L394
for slippage, expected_amount_out in [
(0, 7004),
(5 * 100, 6670),
(200 * 100, 2334),
]:
amount_out = price_helper.get_amount_out(
10_000,
[
weth.address,
usdc.address,
dai.address,
],
[fee, fee],
slippage=slippage,
)
assert amount_out == expected_amount_out
# test get_amount_in, based on: https://github.com/Uniswap/v3-sdk/blob/1a74d5f0a31040fec4aeb1f83bba01d7c03f4870/src/entities/trade.test.ts#L361
for slippage, expected_amount_in in [
(0, 15488),
(5 * 100, 16262),
(200 * 100, 46464),
]:
amount_in = price_helper.get_amount_in(
10_000,
[
weth.address,
usdc.address,
dai.address,
],
[fee, fee],
slippage=slippage,
)
assert amount_in == expected_amount_in
See the fulle example code.
More information about Uniswap v3 and Python.

Altair/HoloVis Panel - Multiple Select not working

I am using Altair to generate my plots (As i need the linked bar-chart selection) and Panel to create my dashboard. I have two dropdowns, where the values in the second are conditional on the value in the first.
When I use a Single Select dropdown the dashboard works as expected. However when I try and use any Mulitiple select widget I get no data rendered on my chart
import panel as pn
import altair as alt
import pandas as pd
from vega_datasets import data
import datetime as dt
from altair import datum
alt.renderers.enable('default')
pn.extension('vega')
data = pd.read_excel('randomtestdata.xlsx')
df = pd.DataFrame(data, columns=['Parent Location','Location','Alert Definition','Alert Type','Initiated Date'])
df = df[(df['Parent Location'] == 'Zone 1') | (df['Parent Location'] == 'Zone 2' )| (df['Parent Location'] == 'Zone 3' )]
df.rename(columns={'Parent Location': 'ParentLocation'},
inplace=True, errors='raise')
source = df
title = '##Dashboard'
subtitle = 'This is a test dashboard. Use widgets below to show desired chart.'
_locations = {
'Zone 1': source.loc[source['ParentLocation'] == 'Zone 1']['Location'].unique().tolist(),
'Zone 2' : source.loc[source['ParentLocation'] == 'Zone 2']['Location'].unique().tolist(),
'Zone 3': source.loc[source['ParentLocation'] == 'Zone 3']['Location'].unique().tolist()
}
zone = pn.widgets.Select(
name = 'Select a Zone',
value ='Zone 1',
options =['Zone 1', 'Zone 2', 'Zone 3']
)
#The following does not work
location = pn.widgets.MultiSelect(
name = 'Select a Location',
value =[True],
options =_locations[zone.value]
)
# The following does works:
# location = pn.widgets.Select(
# name = 'Select a Location',
# value = _locations[zone.value][0],
# options =_locations[zone.value]
# )
date_range_slider = pn.widgets.DateRangeSlider(
name='Date range to consider',
start=dt.datetime(2021, 1, 1), end=dt.datetime(2022, 1, 1),
value=(dt.datetime(2021, 1, 1), dt.datetime(2022, 1, 1))
)
#pn.depends(zone.param.value, location.param.value, date_range_slider.param.value, watch=True)
def get_plot(zone, location, date_range): # start function
df = source
df['Initiated Date'] = pd.to_datetime(df['Initiated Date']) # format date as datetime
start_date = date_range_slider.value[0]
end_date = date_range_slider.value[1]
mask = (df['Initiated Date'] > start_date) & (df['Initiated Date'] <= end_date)
df = df.loc[mask]
selection2 = alt.selection_single(fields=['Alert Type'])
chart = alt.Chart(df).mark_bar(
color="#0c1944",
opacity=0.8).encode(
x=alt.X('Alert Type:O', scale=alt.Scale(domain=source['Alert Type'].unique())),
y='count(Alert Type)').transform_filter(
(datum.Location == location)
).add_selection(selection2)
chart2 = alt.Chart(df).mark_bar(
color="#0c1944",
opacity=0.8).encode(
x='Alert Definition',
y='count(Alert Definition)').transform_filter(
(datum.Location == location)
).transform_filter(selection2)
return (chart|chart2)
#pn.depends(zone.param.value, watch=True)
def _update_locations(zone):
locations = _locations[zone]
location.options = locations
location.value = locations[0]
return
pn.Row(
pn.Column(title, subtitle, zone, location, date_range_slider,
get_plot )
)
Random test data:
https://github.com/KWSpittles/testdata
The reason this is not working is because you are filtering you Altair charts using
.transform_filter(
datum.Location == location
)
which allows filtering for a single value. When you pass a list of multiple values you need to instead use indexof like this
.transform_filter(
f'indexof({location}, datum.Location) != -1'
)

drop a DataFrame column in python

I desperately need help here. I am trying to get the dimension of a dataframe. I always get 31 columns instead of 30: Value should be 30, found 31. I tried to reset_index(drop = True) but I still get the same error. any help is appreciated. Stay safe.
def read_data(dataset_id):
data = None
# Begin CODE
if dataset_id == 'breast_cancer':
disease = 'breast_cancer'
datafile = 'wdbc.data'
bc_columns = ['ptid', 'diagnosis', 'mean_radius', 'mean_texture',
'mean_perimeter', 'mean_area',
'mean_smoothness', 'mean_compactness', 'mean_concavity',
'mean_concave_pts', 'mean_symmetry ',
'mean_fractal_dim', 'std_err_radius', 'std_err_texture',
'std_err_perimeter', 'std_err_area',
'std_err_smoothness', 'std_err_compactness',
'std_err_concavity', 'std_err_concave_pts',
'std_err_symmetry ', 'std_err_fractal_dim', 'worst_radius',
'worst_texture', 'worst_perimeter',
'worst_area', 'worst_smoothness', 'worst_compactness',
'worst_concavity', 'worst_concave_pts',
'worst_symmetry ', 'worst_fractal_dim']
data = pd.read_csv(datafile, skipinitialspace=True, names=bc_columns)
data.drop(labels=['ptid'], axis=1, inplace=True)
bc_diag_class = get_class_list_dict(data['diagnosis'])
elif dataset_id == 'hyperthyroidism':
disease = 'hyperthyroidism'
datafile1 = 'allhyper.data' # tab delimited, no header
datafile2 = 'allhyper.test' # comma delimited, no header
ht_columns = ['age', 'Gender', 'on thyroxine', 'query on thyroxine', 'on
antithyroid medication', 'sick',
'pregnant', 'thyroid surgery', 'I131 treatment', 'query
hypothyroid', 'query hyperthyroid',
'lithium', 'goitre', 'tumor', 'hypopituitary', 'psych',
'TSH measured', 'TSH', 'T3 measured',
'T3', 'TT4 measured', 'TT4', 'T4U measured', 'T4U', 'FTI
measured', 'FTI', 'TBG measured', 'TBG',
'referral source', 'diag_class']
data1 = pd.read_csv(datafile1, sep='\t', skipinitialspace=True,
names=ht_columns)
data2 = pd.read_csv(datafile2, skipinitialspace=True, names=ht_columns)
data = data1.append(data2, ignore_index=True)
data = data.replace(to_replace='?', value=float('nan'))
data[['diag_class', 'ptid']] = data['diag_class'].str.split(pat='.\|',
expand=True)
diag_class = data['diag_class']
data.drop(labels=['diag_class', 'ptid'], axis=1, inplace=True)
data.insert(0, 'diag_class', diag_class)
data[['age', 'TSH', 'T3', 'TT4', 'T4U', 'FTI', 'TBG']] \
= data[['age', 'TSH', 'T3', 'TT4', 'T4U', 'FTI',
'TBG']].apply(pd.to_numeric)
elif dataset_id == 'cervical_cancer':
disease = 'cervical_cancer'
datafile = 'risk_factors_cervical_cancer.csv'
cc_columns = ('Age', 'Num_sex_partners', 'First_sex_intercourse',
'Num_pregnancies',
'Smokes', 'Smokes_years', 'Smokes_packs_year',
'Hormonal_Contraceps',
'Hormonal_Contraceps_years', 'IUD', 'IUD_years', 'STD',
'STD_number',
'STD_condylomatosis', 'STDscervical_condylomatosis',
'STD_vaginal_condylomatosis',
'STD_vulvo_perin_condylomatosis', 'STD_syphilis',
'STD_pelvic_inflam_disease',
'STD_genital_herpes', 'STD_molluscum_contagiosum',
'STD_AIDS', 'STD_HIV', 'STD_HepB',
'STD_HPV', 'STD_Num_diagnosis',
'STD_Time_since_first_diag', 'STDs_Time_since_last_diag',
'Dx_Cancer', 'Dx_CIN', 'Dx_HPV', 'Dx', 'Hinselmann', 'Schiller',
'Citology', 'Biopsy')
data = pd.read_csv(datafile, skipinitialspace=True)
data.columns = cc_columns
data = data.replace(to_replace='?', value=float('nan'))
biopsy_class = data['Biopsy']
data.drop(labels=['Dx_Cancer', 'Dx_CIN', 'Dx_HPV', 'Dx', 'Hinselmann',
'Schiller', 'Citology', 'Biopsy'],
axis=1, inplace=True)
data.insert(0, 'Biopsy', biopsy_class)
data[['Num_sex_partners', 'First_sex_intercourse', 'Num_pregnancies',
'Smokes_years', 'Smokes_packs_year',
'Hormonal_Contraceps_years', 'IUD_years',
'STD_number', 'STD_Time_since_first_diag',
'STDs_Time_since_last_diag']] \
= data[['Num_sex_partners', 'First_sex_intercourse',
'Num_pregnancies', 'Smokes_years', 'Smokes_packs_year',
'Hormonal_Contraceps_years', 'IUD_years',
'STD_number', 'STD_Time_since_first_diag',
'STDs_Time_since_last_diag']].apply(pd.to_numeric)
elif dataset_id == 'liver_cancer':
disease = 'liver_cancer'
datafile = 'Indian Liver Patient Dataset (ILPD).csv' # comma delimited,
no header
ld_columns = ['Age', 'Gender', 'TB', 'DB', 'Alkphos', 'Sgpt', 'Sgot',
'TP', 'ALB', 'A/G Ratio', 'Selector']
data = pd.read_csv(datafile, skipinitialspace=True, names=ld_columns)
data.loc[data['Gender'] == 'Male', 'Gender'] = 'M'
data.loc[data['Gender'] == 'Female', 'Gender'] = 'F'
selector_class = data['Selector']
data.drop(labels=['Selector'], axis=1, inplace=True)
data.insert(0, 'Selector', selector_class)
data.reset_index(drop=True, inplace=True)
# End CODE
print(data.head(20))
return data
def dimensions(dataset_id, dataset):
dim = None
# dim = dataset.shape
num_inst = len(dataset)
num_feat = len(dataset.iloc[0].reset_index())
dim = (num_inst, num_feat)
return dim
If you want to drop a column from DataFrame, You can do like this.
If you want to drop single column:
df.drop(['column_name'], axis = 1)
If you want to drop multiple columns:
df.drop(['Column1', 'Column2'], axis = 1)
If you want to drop based on some other condition instead of column name. You can comment below. I'll update the answer accordingly. Hope it helps!.

How to process streaming FX data and calculate live-stream ATR()?

I am trying out algorithmic trading with python, for backtesting.
First I downloaded some tick data, then resampled them as 10 second OHLC-data, and then used ATR-indicator, as a defined function:
df = pd.read_csv( 'gbpusd jan17.csv',
names = ['instrument', 'time', 'bid', 'ask'],
index_col = 1,
parse_dates = True,
nrows = 1000
)
df = df['ask'].resample('10s').ohlc()
n = list( range( 0, len( df.index ) ) ) # I changed index because my indicator
# doesn't work on datetime index
df.index = n
def ATR(df, n): #________________________# Average True Range
i = 0
TR_l = [0]
while i < df.index[-1]:
TR = max( df.get_value( i + 1, 'high' ),
df.get_value( i, 'close' )
)
- min( df.get_value( i + 1, 'low' ),
df.get_value( i, 'close' )
)
TR_l.append( TR )
i = i + 1
TR_s = pd.Series( TR_l )
ATR = pd.Series( TR_s.rolling( window = 12,
min_periods = 12,
center = False
).mean(),
name = 'ATR_' + str(n)
)
df = df.join( ATR )
return df
Outputs: head()
instrument bid ask
time
2017-01-02 00:00:01.105 GBP/USD 1.23399 1.23551
2017-01-02 00:00:01.561 GBP/USD 1.23399 1.23551
2017-01-02 00:00:05.122 GBP/USD 1.23399 1.23551
2017-01-02 00:00:05.525 GBP/USD 1.23365 1.23577
2017-01-02 00:00:06.139 GBP/USD 1.23365 1.23577
open high low close ATR_8
\
1.23562 1.23562 1.23562 1.23562 0.000120 0.596718
1.23562 1.23575 1.23548 1.23548 0.000121 0.619445
1.23548 1.23548 1.23541 1.23541 0.000122 0.645532
1.23541 1.23541 1.23541 1.23541 0.000117 0.674178
1.23541 1.23548 1.23541 1.23548 0.000123 0.687229
But, the problem starts when I connect to Oanda API to get streaming rates, the while loop for ATR calculation doesn't seem to work, firstly I thought, it's not working because of, not enough rows of data at the begining, so I made ATR calculation start after certain ticks, still not working.
Can any one help with the while loop, how should I change for the streaming data?

Categories

Resources