I take an image in from the user and I want to pass it to tensorflow to determine what is in it:
#app.route('/uploader', methods = ['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save(f.filename)
i = Image.open(f)
image_handler(i)
def image_handler(image):
create_graph()
print("Model loaded")
node_lookup = NodeLookup()
print("Node lookup loaded")
print("Img: ")
print(image)
predictions = dict(run_inference_on_image(image))
print(predictions)
return jsonify(predictions=predictions)
image_handler() is using the functions and the NodeLookup class from the tensorflow imagenet repo found here:
https://github.com/tensorflow/models/blob/master/tutorials/image/imagenet/classify_image.py
The problem is when I run the app it will load fine, the user can select an image fine too but when they hit submit it get the following error when getting the predictions:
Expected binary or unicode string, got <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=200x200 at 0x243126ABA90>
Related
I am trying to make a website that can make predictions on images using tensorflow, flask, and python.
This is my code:
from flask import Flask, render_template
import os
import numpy as np
import pandas as pd
app = Flask(__name__)
#app.route('/')
def index():
return render_template('index.html')
import tensorflow as tf
import tensorflow_hub as hub
model = tf.keras.models.load_model(MODEL_PATH)
IMG_SIZE = 224
BATCH_SIZE = 32
custom_path = "http://t1.gstatic.com/licensed-image?q=tbn:ANd9GcQd6lM4HtInRF3cxw6h3MgUZIIiJCdMgFvXKrhaJrbw61tN3aYpMIVBi0dx0KPv1sdCrLk0sBhPeNVt8m0"
custom_data = create_data_batches(custom_path, test_data=True)
custom_preds = model.predict(custom_data)
# Get custom image prediction labels
custom_pred_labels = [get_pred_label(custom_preds[i]) for i in range(len(custom_preds))]
print(custom_pred_labels)
#app.route('/my-link/')
def my_link():
return f"The predictions are: {custom_pred_labels}"
if __name__ == '__main__':
app.run(host="localhost", port=3000, debug=True)
The process_image function:
def process_image(image_path, img_size=IMG_SIZE):
"""
Takes an image file path and turns the image into a Tensor.
"""
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, size=[img_size, img_size])
return image
The needed part of the create_data_batches function:
def create_data_batches(X, y=None, batch_size=BATCH_SIZE, valid_data=False, test_data=False):
"""
Creates batches out of data out of image (X) and label (y) pairs.
Shuffles the data if it's training data but doesn't shuffle if it's validation data.
Also accepts test data as input (no labels)
"""
if test_data:
print("Creating test data batches...")
data = tf.data.Dataset.from_tensor_slices((tf.constant(X))) # only filepaths (no labels)
data_batch = data.map(process_image).batch(BATCH_SIZE)
return data_batch
The get_image_label function:
def get_image_label(image_path, label):
"""
Takes an image file path name and the associated label, processes the image and returns a tuple of (image, label).
"""
image = process_image(image_path)
return image, label
The get_pred_label function:
def get_pred_label(prediction_probabilites):
"""
Turns an array of prediction probabilities into a label.
"""
return unique_breeds[np.argmax(prediction_probabilites)]
Now when I run this, I get the following error:
ValueError: Unbatching a tensor is only supported for rank >= 1
I tried turning it into a list as one of the solutions I found said:
custom_path = ["http://t1.gstatic.com/licensed-image?q=tbn:ANd9GcQd6lM4HtInRF3cxw6h3MgUZIIiJCdMgFvXKrhaJrbw61tN3aYpMIVBi0dx0KPv1sdCrLk0sBhPeNVt8m0"]
But when I run that, I get this error:
UNIMPLEMENTED: File system scheme 'http' not implemented (file: 'http://t1.gstatic.com/licensed-image?q=tbn:ANd9GcQd6lM4HtInRF3cxw6h3MgUZIIiJCdMgFvXKrhaJrbw61tN3aYpMIVBi0dx0KPv1sdCrLk0sBhPeNVt8m0')
Any help would be appreciated.
I'm currently using Flask in order to combine backend and frontend form image classification. I'm also using .h5 file in order to predict the output. The output is different and is fully wrong. The output should be the prediction probability. Here is the code:
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
MODEL_ARCHITECTURE = 'model_adam_01.json'
MODEL_WEIGHTS = 'model_50_eopchs_adam_01.h5'
json_file = open(MODEL_ARCHITECTURE)
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights(MODEL_WEIGHTS)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
prediction = model_predict(file_path, model)
print("I think that is ")
print(prediction)
# print('I think that is {}.'.format(predicted_class.lower()))
return str(prediction)
Following is the model_predict function where I have passed the image path and the model
def model_predict(img_path, model):
'''
Args:
-- img_path : an URL path where a given image is stored.
-- model : a given Keras CNN model.
'''
IMG = image.load_img(img_path)
print(type(IMG))
IMG_ = np.asarray(IMG)
print(type(IMG_))
print(IMG_.shape)
IMG_ = prepare(IMG_)
print(IMG_.shape)
#print(model)
prediction = model.predict(IMG_)
print(prediction.shape)
return str(prediction)
Following is the output that I am getting:
I think that is
[[0.]]
Why does this problem occur? I am using keras 2.3.1 and tesorflow 1.15.2
When doing the predictions you have to apply the same preprocessing steps that you did to your training data just before training the model. I think that should be the problem rather than the code.
Does tensorflow maintains its own internal global state, which is broken by loading the model in one function and trying to use it in another?
Using singleton for storing model:
class Singleton(object):
_instances = {}
def __new__(class_, *args, **kwargs):
if class_ not in class_._instances:
class_._instances[class_] = super(Singleton, class_).__new__(class_, *args, **kwargs)
return class_._instances[class_]
class Context(Singleton):
pass
When I do:
#app.route('/file', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
filename = file.filename
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
context = Context()
if context.loaded:
img = cv2.imread(filepath)
img = cv2.resize(img, (96, 96))
img = img.astype("float") / 255.0
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
classes = context.model.predict(img)
def api_run():
context = Context()
context.model = load_model('model.h5')
context.loaded = True
I'm getting some error: ValueError: Tensor Tensor("dense_1/Softmax:0", shape=(?, 2), dtype=float32) is not an element of this graph.
However if I will move context.model = load_model('model.h5') inside upload_file function then everything will work. Why is that happening? How to store model for later use?
Yes, Tensorflow in graph mode has its own internal global state.
You don't want to reload your model at every prediction, that's really inefficient.
The right strategy is to load the model at the start of your web app and then reference the global state.
Use a global variable for the model and graph and do something like this:
loaded_model = None
graph = None
def load_model(export_path):
# global variables
global loaded_model
global graph
loaded_model = load_model('model.h5'))
graph = tf.get_default_graph()
then, in your prediction function you do:
#app.route('/', methods=["POST"])
def predict():
if request.method == "POST":
data = request.data
with graph.as_default():
probas = loaded_model.predict(data)
An complete short example for how to do this can be found here.
Alternatively, if you use Tensorflow 2.0, which defaults to Eager mode, you have no graph and therefore no problem.
I had similar issue. Everything solved when
from tensorflow.python.keras import backend as K
and then before loading model called
K.clear_session()
Hello I am making an small page to show my results I am working in a project about sentiment analysis first I have the following labels:
senti=["furious","angry","angry0","Indiferent","happy","enthusiastic","Euphoric"]
I show this labels depending of the result of a predict function that I performed using keras, at this moment all is working well I wish to show an image depending of the label of above I tried creating an array with the path of the images as follows, I am not sure how to write the image function,
images=['home/image0.jpg','home/image1.jpg','home/image2.jpg','home/image3.jpg','home/image4.jpg','home/image5.jpg','home/image6.jpg']
def image():
This is the function that perform the predict, at this moment it is just showing a label of above, I would like to also display a distinct image, so I need to modify the following function:
def predict(text):
seqs = tok.texts_to_sequences([text])
print(text)
word_index = tok.word_index
print('Found %s unique tokens.' % len(word_index))
sequence_pred = sequence.pad_sequences(seqs, maxlen=MAX_SEQUENCE_LENGTH)
print(sequence_pred)
prediction = model.predict(sequence_pred)
print(prediction)
return senti[np.argmax(prediction[0])]
#app.route("/", methods=['GET', 'POST'])
def index():
print(request.method)
if request.method == 'POST':
q=request.form['querytext']
prediction=predict(q)
return render_template("result.html",prediction=prediction,text=q)
return render_template("main.html")
Since I am a beginner at flask I would like to appreciate support or suggestions to overcome this situation thanks for the help,
After a very useful feedback I tried:
senti=["furious","angry","angry0","Indiferent","happy","enthusiastic","Euphoric"]
def predict(text):
seqs = tok.texts_to_sequences([text])
print(text)
word_index = tok.word_index
print('Found %s unique tokens.' % len(word_index))
sequence_pred = sequence.pad_sequences(seqs, maxlen=MAX_SEQUENCE_LENGTH)
print(sequence_pred)
prediction = model.predict(sequence_pred)
print(prediction)
return senti[np.argmax(prediction[0])]
#app.route("/", methods=['GET', 'POST'])
def index():
senti=["furious","angry","angry0","Indiferent","happy","enthusiastic","Euphoric"]
images=['smile.jpg','smile.jpg','smile.jpg','smile.jpg','smile.jpg','smile.jpg','smile.jpg']
lookup_keys = dict(zip(senti, images))
print(request.method)
if request.method == 'POST':
q=request.form['querytext']
prediction=predict(q)
image_path = lookup_keys[prediction] # get the path
return render_template("result.html",
prediction=prediction,
text=q,
image_url=image_path)
return render_template("main.html")
I am not getting any error but the image is not displayed I am not so sure what is wrong, at this moment I am just trying with one image located at the same level of my file called app.py, smile.jpg
$ ls
app.py smile.jpg
Just create a dictionary of your keys and the image values; and use that to return the image for the particular sentiment:
>>> senti=["furious","angry","angry0","Indiferent","happy","enthusiastic","Euphoric"]
>>> images=['home/image0.jpg','home/image1.jpg','home/image2.jpg','home/image3.jpg','home/image4.jpg','home/image5.jpg','home/image6.jpg']
>>> dict(zip(senti, images))
{'enthusiastic': 'home/image5.jpg', 'Indiferent': 'home/image3.jpg', 'furious': 'home/image0.jpg', 'Euphoric': 'home/image6.jpg', 'angry': 'home/image1.jpg', 'happy': 'home/image4.jpg', 'angry0': 'home/image2.jpg'}
>>> lookup_values = dict(zip(senti, images))
>>> lookup_values['angry']
'home/image1.jpg'
You can use this in your view method, to get the right image path and the send it to the template:
#app.route("/", methods=['GET', 'POST'])
def index():
senti=["furious","angry","angry0","Indiferent","happy","enthusiastic","Euphoric"]
images=['home/image0.jpg','home/image1.jpg','home/image2.jpg','home/image3.jpg','home/image4.jpg','home/image5.jpg','home/image6.jpg']
lookup_keys = dict(zip(senti, images))
print(request.method)
if request.method == 'POST':
q=request.form['querytext']
prediction=predict(q)
image_path = lookup_keys[prediction] # get the path
return render_template("result.html",
prediction=prediction,
text=q,
image_url=image_path)
return render_template("main.html")
Suppose i have an Upload field for an Image (like - Profile Picture) for a record , so my questions is that how will i be able to change the format of that picture ?
I would also love to use the PIL or PythonMagick API , but how would i do that in Web2py ??
Let's suppose you have a profile table, and an image table
Then, you have a controller to edit a profile image.
By "change the format of the picture", I suppose you want to resize the image, of create a thumbnail...
Here is an example using PIL :
def edit_image():
"""
Edit a profile image, creates a thumb...
"""
thumb=""
profile = db.profile(request.vars.profile_id)
image = db(db.image.id==profile.image).select().first()
if image:
form = SQLFORM(db.image, image, deletable=True, showid=False)
thumb = image.thumb
else:
form = SQLFORM(db.image)
if form.accepts(request.vars, session):
response.flash = T('form accepted')
#resize the original image to a better size and create a thumbnail
__makeThumbnail(db.image,form.vars.id,(800,800),(260,260))
redirect(URL('images'))
elif form.errors:
response.flash = T('form has errors')
return dict(form=form,thumb=thumb)
Here is the code of __makeThumbnail
def __makeThumbnail(dbtable,ImageID,image_size=(600,600), thumbnail_size=(260,260)):
try:
thisImage=db(dbtable.id==ImageID).select()[0]
from PIL import Image
except: return
full_path = path.join(request.folder,'static','images', thisImage.file)
im = Image.open(full_path)
im.thumbnail(image_size,Image.ANTIALIAS)
im.save(full_path)
thumbName='thumb.%s' % (thisImage.file)
full_path = path.join(request.folder,'static','images', 'thumbs',thumbName)
try:
im.thumbnail(thumbnail_size,Image.ANTIALIAS)
except:
pass
im.save(full_path)
thisImage.update_record(thumb=thumbName)
return