Flask API error converting JSON string to pandas dataframe - python

I am building my first API in Flask. It shall receive json string from Postman and according to the log in terminal POST request is working, I can see my json string in line 15 in the code print(json_).
However next line is a problem:
query = pd.read_json(json_, orient='index')
This line shall convert json into pd dataframe, so I can convert it to an numpy array and load it into my machine learning model.
Outside of Flask my logic work well, but here code gots broken. I put several print command to trace the code breaking point and it seems to me this query line.
Any suggestions are very much appreciated. Thank you in advance!
Vlad
The complete code for API looks like this:
from flask import Flask, request, jsonify
import joblib
import traceback
import pandas as pd
import numpy as np
app = Flask(__name__)
#app.route('/predict', methods=['POST'])
def predict():
try:
json_ = request.json
print(json_)
query = pd.read_json(json_, orient='index')
print('query', query)
res = np.array(query).reshape(1,-1)
print('results', res)
prediction = rf.predict(res)
print(prediction)
return jsonify({'prediction': list(prediction)})
except:
return jsonify({'trace': traceback.format_exc()})
if __name__ == '__main__':
try:
port = int(sys.argv[1])
except:
port = 12345
rf = joblib.load('random_forest_model_diabetes_refined_31_5_2021.pkl') # Load ML model
print ('Model loaded')
app.run(debug=True, port=port)

When I replaced my query line
query = pd.read_json(json_, orient='index')
with:
query = pd.json_normalize(json_)
It works. I am puzzled.

Related

I want to fetch data from a website and put in MySQL workbench, but it's not working

First time programmer here, please don't be harsh on me.
I want to fetch data from the URL's and put it inside MYSQL workbench database, it says that it's working, see image: enter image description here. But it's not doing so, what is wrong in the script?
# GET ALL WorldRecords from https://api.isuresults.eu/records
import requests
import pandas as pd
from pandas.io.json import json_normalize
from helper_db import make_db_connection
engine = make_db_connection
def get_isu_worldrecord_db(engine):
URL = "https://api.isuresults.eu/records/?type=WR"
df_final=pd.DataFrame()
for i in range(1,20):
params = {'page': i}
api = requests.get(url=URL, params=params)
data = api.json()
df = json_normalize(data,'results')
df_final=df_final.append(df,ignore_index=True,sort=False)
df_final=df_final.drop(['laps'], axis=1)
df_final.to_sql("Tester", con=engine,if_exists="replace", chunksize=1000)
return
You define this method, but you don't really run it.
Add another line at the last:
get_isu_worldrecord_db(engine)

multiple pickel models in a single flask framework and predict the output in a web API

I have made a flask web framework that reads a single pickel model and predicts output in a html file. But now I want to load multiple pickel files that predicts and displays results in my Web API.
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
#app.route('/')
def home():
return render_template('index.html')
#app.route('/predict',methods=['POST'])
def predict():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
return render_template('index.html', prediction_text='Water prediction should be {} litres'.format(output))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
You have already done all the hard work!
You can extend the existing code to multiple pickle files like shown below:
list_of_model_pickles = ['model1.pkl', 'model2.pkl', 'model3.pkl', 'model4.pkl'] # add any model pickle file here
prediction_text_all_pickles = 'Water prediction should be:\n'
for model_file in list_of_model_pickles:
f_pickle = open(model_file, 'rb')
model = pickle.load(f_pickle)
prediction = model.predict(final_features)
output = round(prediction[0], 2)
f_pickle.close()
prediction_text_all_pickles += f' {output} litres according to model in file {model_file}. \n'
# At this point (when the entire loop finishes processing all your pickle files) your prediction_text_all_pickles is correctly populated with required information.
return render_template('index.html', prediction_text=prediction_text_all_pickles)

How to make a Flask app parse JSON by URL input? ie: website/RedMango opens a page of a RedMango JSON object

To clarify the question, using a Flask URL converter how would one parse the json at this website by entering a url such as: https://ds-med-cabinet.herokuapp.com/strainjson/Purple-Kush so that when the user visits the /Purple-Kush website only that json object is displayed? This website is for building an API for educational purposes only so I would appreciate any ideas pertaining to parsing the entire json by URL input mostly likely using Flask URL converter or any other practical method. Thank you very much for your time and consideration. Here is the code I have tried, as a mock up of the Flask URL Converter documentation:
# Directory.py
# Import
from os import path
import pandas as pd
from flask import Blueprint, render_template
from werkzeug.routing import BaseConverter
# Make Blueprint for __init__.py
Directory = Blueprint("Directory", __name__)
# Import Leafly csv
file_name = path.join(path.dirname(__file__), "Leafly.csv")
df = pd.read_csv(file_name)
strains = df['Strain']
# Custom converter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split('+')
def to_url(self, values):
return '+'.join(BaseConverter.to_url(value)
for value in values)
# Flask Url-converter
#Directory.route('/<strain>')
def strain_url(strain):
"""Show the json object for the given strain."""
strain = []
for strain in strains:
strain
return render_template('json.html', strain=strain)
# __init__.py
# Imports
from flask import Flask
from web_app.routes.Directory import Directory, ListConverter
from web_app.routes.GET_PUT_API import GET_PUT_API
# Create Flask app
def create_app():
app = Flask(__name__)
app.register_blueprint(Directory)
app.register_blueprint(GET_PUT_API)
app.url_map.converters['list'] = ListConverter
return app
if __name__ == "__main__":
my_app = create_app()
my_app.run(debug=True)
the strains in the for loop is a list of every strain from the csv version of the data, and the json.html being rendered is the html file of json objects that are being rendered at this website. This code and /whateveristypedintheurl just renders all of the data at the website shared (because the html file is already full of json objects and nothing is getting parsed). Thanks again for checking this out.
Ps. If trying to replicate this by creating a Flask App, you can find the csv here as cannabis.csv (I switched the named to Leafly.csv) and you can convert the df to json by using the following code:
# dftojson.py
# Imports
from os import path
import csv
import json
file_path = r'C:\Users\johnj\OneDrive\Documents\Lambda\BuildWeek3\data-science\cannabis.csv'
csvfile = open(file_path, encoding="utf8")
jsonfile = open('cannabis.json', 'w')
fieldnames = ("Strain", "Type", "Rating", "Effects", "Flavor"," Description")
reader = csv.DictReader(csvfile, fieldnames)
for row in reader:
json.dump(row, jsonfile)
jsonfile.write('\n')
I copied and pasted the json from cannabis.json into a new json.html file (or just change the file extension) and then added the route like so:
# Directory.py
# Strain JSON Page
#Directory.route("/strainjson")
def df():
return render_template("json.html")
This is what I came up with. I got built off this article: Medium article on using CSV with Flask. For example locally in your URL you can type '
5Th-Element' and that should display is JSON format. Adding jsonify on the return will help with API issues.
import csv
from os import path
from flask import Flask, render_template, Blueprint, jsonify, json
# Make Blueprint for __init__.py
ParseURL = Blueprint("ParseURL", __name__)
# Import Leafly csv
file_name = path.join(path.dirname(__file__), "Leafly.csv")
# route to display single dictionary list item as JSON object
#APP.route('/<strain>')
def strain_url(strain):
'''
Parameters: name of strain from database as a string.
For loops the cannabis.csv file, creating a dictionary.
Returning only the strain that was given as a parameter.
'''
with open('cannabis.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',')
dict_strain = {}
for row in data:
if row[0] == strain:
dict_strain = {
"strain": row[0],
"type": row[1],
"rating": row[2],
"effects": row[3],
"flavor": row[4],
"description": row[5]
}
break
return jsonify(dict_strain)
if i understand your concern very well, there's 2 parts:
create Custom URL Converter (refer to tutorial and doc ) to get a 2 separate lists of columns and terms following those patterns:
(venv) C:\Python37\myapps\flask\cannabis>flask routes
Endpoint Methods Rule
------------------ ------- -----------------------------
api.index GET /api/
api.index GET /api/<list:cols>
api.index GET /api/<list:cols>/<list:terms>
with pandas reading a cannabis.csv file (download) you filter data depending on what you get as list of columns combined with an optional list of terms
for the moment the part 1 is SOLVED and i'm little bit stuck with pandas
i've made my best and this how i setup a working Flask app demo:
cannabis
.. cannabis
.... api
__init__.py
views.py
.... errors
__init__.py
views.py
.... __init__.py
.... cannabis.py
.... utils.py
.. tests
.. venv
.. cannabis.csv
.. .flaskenv
/.flaskenv
FLASK_APP=cannabis:create_app()
FLASK_ENV=development
FLASK_DEBUG=0
/cannabis/__init __.py
from .cannabis import create_app
/cannabis/cannabis.py
from flask import Flask
from .utils import ListConverter
def create_app():
"""Create a Flask application using the app factory pattern."""
app = Flask(__name__)
app.url_map.converters['list'] = ListConverter
"""Register blueprints."""
from .errors import bp as errors_bp
app.register_blueprint(errors_bp)
from .api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
return app
/cannabis/utils.py
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, values):
return values.split('+')
def to_url(self, values):
return '+'.join(value for value in values)
for to_url() function i think you don't need BaseConverter.to_url(value) otherwise you'll stuck with error:
[..]
File "C:\Python37\myapps\flask\cannabis\cannabis\utils.py", line 11, in <genexpr>
return '+'.join(BaseConverter.to_url(term) for term in terms)
TypeError: to_url() missing 1 required positional argument: 'value'
/cannabis/api/__init __.py
from .views import bp
/cannabis/api/views.py
from flask import Blueprint, jsonify
bp = Blueprint('api', __name__)
#bp.route('/', defaults={'cols': [], 'terms': []})
#bp.route('/<list:cols>', defaults={'terms': []})
#bp.route('/<list:cols>/<list:terms>')
def index(cols, terms):
cols_a = []
for col in cols:
if col: cols_a.append(col)
terms_a = []
for term in terms:
if term: terms_a.append(term)
# we need list of cols so we can filter data for list of terms within the list of cols
if not cols_a and terms_a:
return jsonify(message="Please choose at least one column")
kws = []
kws.append(cols_a)
kws.append(terms_a)
# form the moment just make sure we get the 2 lists/arrays
# then filter data with pandas using the combination of 2 lists then convert to json and return result
# .. WORKING ..
return jsonify(message="api index page", kws=kws)
/cannabis/errors/__init __.py
from .views import bp
/cannabis/errors/views.py
from flask import Blueprint, jsonify
from werkzeug.exceptions import HTTPException
bp = Blueprint('errors', __name__)
#bp.app_errorhandler(HTTPException)
def handle_exception(e):
return jsonify(code=e.code,
name=e.name,
description=e.description)
now you can run the Flask app and make some tests with different URLS:
no given columns and terms lists, default : return non-filtered data
http://localhost:5000/api/
{"kws":[[],[]],"message":"api index page"}
1 column is given , return data with only the give column
http://localhost:5000/api/Strain
{"kws":[["Strain"],[]],"message":"api index page"}
N columns are given , return data with only the give N columns
http://localhost:5000/api/Strain+Rating
{"kws":[["Strain","Rating"],[]],"message":"api index page"}
notice this url with extra optional '+', it works.
http://localhost:5000/api/Strain++++Rating++Flavor
{"kws":[["Strain","Rating","Flavor"],[]],"message":"api index page"}
N columns and M terms are given , return data with only the give N columns and filtered rows that contains M terms (CHECK how to use this with pandas)
http://localhost:5000/api/Strain++Rating++Flavor/Purple-Kush++5+++Blackberry+++
{"kws":[["Strain","Rating","Flavor"],["Purple-Kush","5" "Blackberry"]],"message":"api index page"}
notice this url when no column is given.
http://localhost:5000/api/+/Purple-Kush
{"message":"Please add at least one column"}
other url, return non-filtered data
http://localhost:5000/api/+++/+++
{"kws":[[],[]],"message":"api page"}
[..]
i'll update my code once i finish implementing pandas..
other resources:
flask jsonify vs python json dumps : json.dumps vs flask.jsonify

Python Flask - Uploading file to Pandas

I'm pretty new to Pandas and Flask, trying to leverage it to output a summarised version of a CSV containing survey feedback that I can email to users periodically.
As a standalone function, it works so long as I give it an input file that's specified (e.g. 'users/sample.csv') and outfile but when running as part of an application and using an uploaded html file, it fails with
TypeError: csuppfb() takes at least 2 arguments (0 given)
Essentially I want to pass the uploaded file to the function, and have Pandas do its thing but it doesn't get that far. Below is the code:
import re,os
import beatbox
import pandas as pd
import numpy as np
import argparse
from jinja2 import Environment, FileSystemLoader
from weasyprint import HTML
from os.path import isfile,join
from flask import Flask, request, redirect, url_for,render_template,json as fjson,send_from_directory
from werkzeug import secure_filename
from mapping import Autotagging,Manualtagging
from defs import *
UPLOAD_FOLDER = './uploads'
PIVOT_FOLDER = './pivot'
ALLOWED_EXTENSIONS = set(['csv'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['PIVOT_FOLDER']= PIVOT_FOLDER
#app.route('/feedback',methods=['GET', 'POST'])
def feedback():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = randomword(6)+'_'+secure_filename(file.filename)
file.save(os.path.join(app.config['PIVOT_FOLDER'], filename))
return redirect(url_for('csuppfb',df=filename))
return render_template('mappingtest.html')
#app.route('/csuppfb', methods=['POST','GET'])
def csuppfb(df,infile, index_list=["Case Owner","Case Number","Support Survey - Service rating"], value_list = ["Age (Hours)"]):
"""
Creating a pivot table from the raw dataframe and returning it as a dataframe
"""
table = pd.pivot_table(df, index=index_list, values = value_list,
aggfunc=[np.sum,np.mean], fill_value=0)
return table
def get_summary_stats(df, product):
"""
Get a stats summary
"""
results.append(df[df["Support Survey - Service rating"]==product]["Closed"].mean())
results.append(df[df["Support Survey - Service rating"]==product]["Age (Hours)"].mean())
return results
def dataform(df):
"""
Take the dataframe and output it in html to output a pdf report or display on a web page
"""
df = pd.read_csv(filename)
csuppreport = pivot_table(df,filename)
agent_df = []
for agent in csuppreport.index.get_level_values(0).unique():
agent_df.append([agent, csuppreport.xs(agent, level=0).to_html()])
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("csupp.html")
template_vars={"title": "CSUPP FB REPORT",
"Excellent": get_summary_stats(df,"Excellent"),
"Good": get_summary_stats(df,"Good"),
"csupp_pivot_table": csuppreport.to_html(),
"agent_detail": agent_df}
html_out = template.render(template_vars)
HTML(string=html_out).write_pdf(args.outfile.name,stylesheets=["style.css"])
return render_template('csupp.html')
What's the best way to have the file I've uploaded be used as the dataframe argument in
def csuppfb(df,infile...
?
Any advice would be very much appreciated. I've a feeling it's something glaringly obvious I'm missing.
you need to use the args object from request which contains all the url params
http://flask.pocoo.org/docs/0.10/quickstart/#the-request-object
See this basic example:
#app.route('/csuppfb', methods=['POST','GET'])
def csuppfb():
if request.args['df'] :
df = request.args['df']
#Do your panda stuff here. Instead of returning the filename :D
return str(df)
else :
return 'nofile'

Unable to parse Yahoo Finance API JSON Data? Code included (Python Flask)

A bit new to JSON... Does anyone know how to properly iterate through and grab the symbol and change for example? I've tried wrapping everything in json.loads and using strings, but I keep getting errors regarding tuples. FYI, I'm using ticker inside the string, but I changed it to be YHOO for this question for convenience of anyone trying to run the same code.
from flask import Flask
from flask.ext.compress import Compress
from flask import render_template
from httplib2 import Http
import json
http = Http()
app = Flask(__name__)
Compress(app)
app.config['DEBUG'] = True
app.config['TESTING'] = True
#app.route('/<ticker>', methods=['GET'])
def check(ticker):
yahoo_api = http.request("http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20IN%20(%22YHOO%22)&format=json&env=http://datatables.org/alltables.env")
return yahoo_api[1]
if __name__ == '__main__':
app.run()
yahoo_api[1] is a string, use json.loads to get the json.
import json
from httplib2 import Http
yahoo_api = Http().request('http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20IN%20(%22YHOO%22)&format=json&env=http://datatables.org/alltables.env')
yahoo_json = json.loads(yahoo_api[1])
change = yahoo_json['query']['results']['quote']['Change']
symbol = yahoo_json['query']['results']['quote']['symbol']
Anthoer way is using requests, no worry about the json, it is esay to use.
import requests
r = requests.get('http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20IN%20%28%22YHOO%22%29&format=json&env=http://datatables.org/alltables.env')
change = r.json()['query']['results']['quote']['Change']
symbol = r.json()['query']['results']['quote']['symbol']
I'd think you might have forgotten to take the second part of the tuple (the content), although that seemed unlikely as you do do this for the return statement. Or maybe you forgot the UTF-8 decode?
import json
import pprint
from httplib2 import Http
http = Http()
url = "http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20IN%20(%22YHOO%22)&format=json&env=http://datatables.org/alltables.env"
yahoo_api = http.request(url)
result = json.loads(yahoo_api[1].decode('utf-8'))
pprint.pprint(result)

Categories

Resources