I have a Django project that receives an image, process it and return a response. I am writing a script to test my API, but the bytes that client sends is not the same that the server receives.
Client code:
# client.py
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import cv2
img = cv2.imread(image_file)
data = {'image': img.tobytes(), 'shape': img.shape}
data = urlencode(data).encode("utf-8")
req = Request(service_url, data)
response = urlopen(req)
print(response.read().decode('utf-8'))
Views code:
# service/app/views.py
import ast
import numpy as np
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
#csrf_exempt
def process_image(request):
if request.method == 'POST':
# Converts string to tuple
shape = ast.literal_eval(request.POST.get('shape'))
img_bytes = request.POST.get('image')
# Reconstruct the image
img = np.fromstring(img_bytes, dtype=np.uint8).reshape(shape)
# Process image
return JsonResponse({'result': 'Hello'})
When i run cliente code i get ValueError: total size of new array must be unchanged. I did the following checks, with a 8x8 RGB image:
# client.py
>> print(img.shape)
(8, 8, 3)
>> print(img.dtype)
uint8
>> print(len(img.tobytes()))
192
# service/app/views.py
>> print(shape)
(8, 8, 3)
>> print(len(img_bytes))
187
The shape field is ok, but the image filed has different size. As the image is small, i printed the bytes from client and server, and i did not get the same. I think that this is an encoding problem.
I want to send image as bytes, because i think this is a compact way to send this kind of data. If anyone know a better approach to send image via HTTP, let me know.
Thanks!
Inspired by John Morris's commentary, i found the answer to my question in Numpy Array to base64 and back to Numpy Array post. If anyone has the same doubt, here is the solution:
Client code:
# client.py
import base64
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import cv2
img = cv2.imread(image_file)
img_b64 = base64.b64encode(img)
data = {'image': img_b64, 'shape': img.shape}
data = urlencode(data).encode("utf-8")
req = Request(service_url, data)
response = urlopen(req)
print(response.read().decode('utf-8'))
Views code:
# service/app/views.py
import ast
import base64
import numpy as np
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
#csrf_exempt
def process_image(request):
if request.method == 'POST':
shape = ast.literal_eval(request.POST.get('shape'))
buffer = base64.b64decode(request.POST.get('image'))
# Reconstruct the image
img = np.frombuffer(buffer, dtype=np.uint8).reshape(shape)
# Process image
return JsonResponse({'result': 'Hello'})
Thank you all!
Related
I am trying to convert image to JSON file and POST it with REST API by using MLFLow. Below you can see my code. I got an error like "cannot reshape array of size 535500 into shape (1,4096)". Can you please help me. Thank you in advance.
import json
import cv2
import requests
import base64
import numpy as np
from PIL import Image
data = np.asarray(Image.open('Dataset/test2/dog_PNG50348.png').convert('LA'))
data = data.reshape((1, 64*64))
columns = [f"col_{c}" for c in range(0, data[0].shape[0])]
dct = {"columns": columns, "data": [data[0].tolist()]}
print(json.dumps(dct, indent=2) + "\n")
#print(data)
headers = {'Content-Type': 'application/json'}
request_uri = 'http://127.0.0.1:5000/invocations'
if __name__ == '__main__':
try:
response = requests.post(request_uri, data=json.dumps(dct,indent=2)+"\n", headers=headers)
print(response.content)
print('done!!!')
except Exception as ex:
raise (ex)
I have been trying to follow an easy tutorial on how to get sentinel 2 images for a series of polygons I have. For some reason, no matter what I do I keep running into the same error (detailed above).
from sentinelsat import SentinelAPI, read_geojson, geojson_to_wkt
import geopandas as gpd
import folium
import rasterio as rio
from rasterio.plot import show
from rasterio.mask import mask
import matplotlib.pyplot as plt
from pyproj import Proj, transform
import pandas as pd
import os
from datetime import date
import sentinelhub
user = 'xxxxx'
password = 'xxxxx'
url = 'https://scihub.copernicus.eu/dhus'
api = SentinelAPI(user, password, url)
validation = gpd.read_file('EarthData/tutakoke_permafrost_validation/Tutakoke_permafrost_validation.shp')
plateau_transects = gpd.read_file('EarthData/tutakoke_permafrost_plateau_transects/Tutakoke_Permafrost_Plateau_Transects.shp')
validation = validation.set_crs(epsg=32604, inplace=True, allow_override=True)
validation['imdate']='01-01-2019'
validation['imdate'] = pd.to_datetime(validation2['imdate'])
validation['geometry2'] = validation.geometry.buffer(2, cap_style=3)
footprint=validation['geometry2'][1]
products = api.query(footprint,
date = ('20200109', '20200510'),
platformname = 'Sentinel-2',
processinglevel = 'Level-2A',
cloudcoverpercentage = (0, 20))
The error I keep getting is:
SentinelAPIError: HTTP status 200 OK: API response not valid. JSON decoding failed.
Ah - it was that my footprint was not in the correct lat lon format!
I'm successfully trained my own dataset using Keras yolov3 Github project link
and I've got good predictions:
I would like to deploy this model on the web using flask to make it work with a stream or with IP cameras.
I saw many tutorials explains how to do that but, in reality, I did not find what I am looking for.
How can I get started?
You can use flask-restful to design a simple rest API.
You can use opencv VideoCapture to grab the video stream and get frames.
import numpy as np
import cv2
# Open a sample video available in sample-videos
vcap = cv2.VideoCapture('URL')
The client will take an image/ frame, encode it using base64, add other details like height, width, and make a request.
import numpy as np
import base64
import zlib
import requests
import time
t1 = time.time()
for _ in range(1000): # 1000 continuous request
frame = np.random.randint(0,256, (416,416,3), dtype=np.uint8) # dummy rgb image
# replace frame with your image
# compress
data = frame # zlib.compress(frame)
data = base64.b64encode(data)
data_send = data
#data2 = base64.b64decode(data)
#data2 = zlib.decompress(data2)
#fdata = np.frombuffer(data2, dtype=np.uint8)
r = requests.post("http://127.0.0.1:5000/predict", json={'imgb64' : data_send.decode(), 'w': 416, 'h': 416})
# make a post request
# print the response here
t2 = time.time()
print(t2-t1)
Your server will load the darknet model, and when it receives a post request it will simply return the model output.
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
import json
import numpy as np
import base64
# compression
import zlib
# load keras model
# load_model('model.h5')
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('imgb64', location='json', help = 'type error')
parser.add_argument('w', type = int, location='json', help = 'type error')
parser.add_argument('h', type = int, location='json', help = 'type error')
class Predict(Resource):
def post(self):
request.get_json(force=True)
data = parser.parse_args()
if data['imgb64'] == "":
return {
'data':'',
'message':'No file found',
'status':'error'
}
img = data['imgb64']
w = data['w']
h = data['h']
data2 = img.encode()
data2 = base64.b64decode(data2)
#data2 = zlib.decompress(data2)
fdata = np.frombuffer(data2, dtype=np.uint8).reshape(w, h, -1)
# do model inference here
if img:
return json.dumps({
'mean': np.mean(fdata),
'channel': fdata.shape[-1],
'message':'darknet processed',
'status':'success'
})
return {
'data':'',
'message':'Something when wrong',
'status':'error'
}
api.add_resource(Predict,'/predict')
if __name__ == '__main__':
app.run(debug=True, host = '0.0.0.0', port = 5000, threaded=True)
In the # do model inference here part, just use your detect/predict function.
If you want to use native darknet, https://github.com/zabir-nabil/tf-model-server4-yolov3
If you want to use gRPC instead of REST, https://github.com/zabir-nabil/simple-gRPC
This code is from here
I have the following code for a telegram bot which i am building:
import pandas as pd
from pandas import datetime
from pandas import DataFrame as df
import matplotlib
from pandas_datareader import data as web
import matplotlib.pyplot as plt
import datetime
import requests
from bottle import (
run, post, response, request as bottle_request
)
BOT_URL = 'https://api.telegram.org/bot128secretns/'
def get_chat_id(data):
"""
Method to extract chat id from telegram request.
"""
chat_id = data['message']['chat']['id']
return chat_id
def get_message(data):
"""
Method to extract message id from telegram request.
"""
message_text = data['message']['text']
return message_text
def send_message(prepared_data):
"""
Prepared data should be json which includes at least `chat_id` and `text`
"""
message_url = BOT_URL + 'sendMessage'
requests.post(message_url, json=prepared_data)
def get_ticker(text):
stock = f'^GSPC'
start = datetime.date(2000,1,1)
end = datetime.date.today()
data = web.DataReader(stock, 'yahoo',start, end)
plot = data.plot(y='Open')
return plot
def prepare_data_for_answer(data):
answer = get_ticker(get_message(data))
json_data = {
"chat_id": get_chat_id(data),
"text": answer,
}
return json_data
#post('/')
def main():
data = bottle_request.json
answer_data = prepare_data_for_answer(data)
send_message(answer_data) # <--- function for sending answer
return response # status 200 OK by default
if __name__ == '__main__':
run(host='localhost', port=8080, debug=True)
When i run this code i am getting the following error:
TypeError: Object of type AxesSubplot is not JSON serializable
What this code is suppose to do is take ticker symbols from telegram app and return its chart back.
I know this is because json does not handle images.
What can i do to resolve it?
Sorry, I'm a bit late to the party. Here is a possible solution below, though I didn't test it. Hope it works or at least gives you a way to go about solving the issue :)
import datetime
from io import BytesIO
import requests
from pandas_datareader import data as web
from bottle import (
run, post, response, request as bottle_request
)
BOT_URL = 'https://api.telegram.org/bot128secretns/'
def get_chat_id(data):
"""
Method to extract chat id from telegram request.
"""
chat_id = data['message']['chat']['id']
return chat_id
def get_message(data):
"""
Method to extract message id from telegram request.
"""
message_text = data['message']['text']
return message_text
def send_photo(prepared_data):
"""
Prepared data should be json which includes at least `chat_id` and `plot_file`
"""
data = {'chat_id': prepared_data['chat_id']}
files = {'photo': prepared_data['plot_file']}
requests.post(BOT_URL + 'sendPhoto', json=data, files=files)
def get_ticker(text):
stock = f'^GSPC'
start = datetime.date(2000,1,1)
end = datetime.date.today()
data = web.DataReader(stock, 'yahoo',start, end)
plot = data.plot(y='Open')
return plot
def prepare_data_for_answer(data):
plot = get_ticker(get_message(data))
# Write the plot Figure to a file-like bytes object:
plot_file = BytesIO()
fig = plot.get_figure()
fig.savefig(plot_file, format='png')
plot_file.seek(0)
prepared_data = {
"chat_id": get_chat_id(data),
"plot_file": plot_file,
}
return prepared_data
#post('/')
def main():
data = bottle_request.json
answer_data = prepare_data_for_answer(data)
send_photo(answer_data) # <--- function for sending answer
return response # status 200 OK by default
if __name__ == '__main__':
run(host='localhost', port=8080, debug=True)
The idea is not to send a message using the sendMessage Telegram API endpoint, but to send a photo file by using the sendPhoto endpoint. Here, we use savefig call in the prepare_data_for_answer function body to convert AxesSubplot instance, that we get as a return value from the get_ticker function, to a file-like BytesIO object, which we then send as a photo to Telegram using send_photo function (previously named as send_message).
You may use bob-telegram-tools
from bob_telegram_tools.bot
import TelegramBot
import matplotlib.pyplot as plt
token = '<your_token>'
user_id = int('<your_chat_id>')
bot = TelegramBot(token, user_id)
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
bot.send_plot(plt)
# This method delete the generetad image
bot.clean_tmp_dir()
You cannot send a matplotlib figure directly. You will need to convert it to bytes and then send it as a multipart message.
data.plot will return a matplotlib.axes.Axes object. You can save convert the figure to bytes like this
import StringIO
img = StringIO.StringIO()
plot.fig.savefig(img, format='png')
img.seek(0)
yukuku/telebot has some good code on how to send the image as a message. Check this line here.
I have read that you can use a bytes like object to reportlab.lib.utils.ImageReader(). If I read in a file path it works fine, but I want to use a byte like object instead that way I can save the plot I want in memory, and not have to constantly be saving updated plots on the drive.
This is where I found the code to convert the image into a string
https://www.programcreek.com/2013/09/convert-image-to-string-in-python/
This is an example of how to use BytesIO as input for ImageReader()
How to draw image from raw bytes using ReportLab?
This class is used to make a plot and pass in a save it to memory with BytesIO(). string is the value I'm going to pass later
#imports
import PyPDF2
from io import BytesIO
from reportlab.lib import utils
from reportlab.lib.pagesizes import landscape, letter
from reportlab.platypus import (Image, SimpleDocTemplate,
Paragraph, Spacer)
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch, mm
import datetime
import os
import csv
import io
import base64
import urllib
from django.contrib import admin
from django.forms import model_to_dict
from django.http import HttpResponse
from django.urls import path
from django.views.decorators.csrf import csrf_protect
from django.utils.decorators import method_decorator
from reporting import models, functions, functions2
import matplotlib
matplotlib.use('agg')
from matplotlib import pyplot as plt
import numpy as np
def make_plot(data):
items = [tuple(item) for item in data.items()]
keys = [item[0] for item in items]
vals = [item[1] for item in items]
fig, ax = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
rects1 = ax.bar(ind - width/2, vals, width)
ax.set_ylabel('Count')
ax.set_xticks(ind)
ax.set_xticklabels(keys)
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
string = base64.b64encode(buf.read())
return 'data:image/png;base64,' + urllib.parse.quote(string), string
This is the minimum code to show how the information is moved to where the error occurs.
class ProgressReportAdmin(ReadOnlyAdmin):
current_extra_context = None
#csrf_protect_m
def changelist_view(self, request, extra_context=None):
plot = make_plot(data)
self.current_extra_context = plot[1]
def export(self, request):
image = self.current_extra_context
pdf = functions.LandscapeMaker(image, fname, rotate=True)
pdf.save()
This is where the error occurs, in the scaleImage function
class LandscapeMaker(object):
def __init__(self, image_path, filename, rotate=False):
self.pdf_file = os.path.join('.', 'media', filename)
self.logo_path = image_path
self.story = [Spacer(0, 1*inch)]
def save(self):
fileObj = BytesIO()
self.doc = SimpleDocTemplate(fileObj, pagesize=letter,
leftMargin=1*inch)
self.doc.build(self.story,
onFirstPage=self.create_pdf)
def create_pdf(self, canvas, doc):
logo = self.scaleImage(self.logo_path)
def scaleImage(self, img_path, maxSize=None):
#Error1 occurs on
img = utils.ImageReader(img_path)
img.fp.close()
#Error2
#image = BytesIO(img_path)
#img = utils.ImageReader(image)
#img.fp.close()
For Error1 I receive:
raise IOError('Cannot open resource "%s"' % name)
img = utils.ImageReader(img_path)
"OSError: Cannot open resource "b'iVBORw0KGgoAAA' etc.,
For Error2 I receive
OSError: cannot identify image file <_io.BytesIO object at 0x7f8e4057bc50>
cannot identify image file <_io.BytesIO object at 0x7f8e4057bc50>
fileName=<_io.BytesIO object at 0x7f8e4057bc50> identity=[ImageReader#0x7f8e43fd15c0]
I think you have to pass buff to ImageReader somehow.
I'm using this function to save and draw the figures I generate with matplotlib and it works perfectly for me.
seek(offset, whence=SEEK_SET) Change the stream position to the given offset. Behaviour depends on the whence parameter. The default value for whence is SEEK_SET.
getvalue() doesn't work except the seek(0)
def save_and_draw(fig, x_img, y_img, width_img=width_img, height_img=height_img):
imgdata = BytesIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0)
imgdata = ImageReader(imgdata)
self.c.drawImage(imgdata, x_img, y_img, width_img, height_img)
plt.close(fig)