I am trying to convert image to JSON file and POST it with REST API by using MLFLow. Below you can see my code. I got an error like "cannot reshape array of size 535500 into shape (1,4096)". Can you please help me. Thank you in advance.
import json
import cv2
import requests
import base64
import numpy as np
from PIL import Image
data = np.asarray(Image.open('Dataset/test2/dog_PNG50348.png').convert('LA'))
data = data.reshape((1, 64*64))
columns = [f"col_{c}" for c in range(0, data[0].shape[0])]
dct = {"columns": columns, "data": [data[0].tolist()]}
print(json.dumps(dct, indent=2) + "\n")
#print(data)
headers = {'Content-Type': 'application/json'}
request_uri = 'http://127.0.0.1:5000/invocations'
if __name__ == '__main__':
try:
response = requests.post(request_uri, data=json.dumps(dct,indent=2)+"\n", headers=headers)
print(response.content)
print('done!!!')
except Exception as ex:
raise (ex)
Related
I have been trying to follow an easy tutorial on how to get sentinel 2 images for a series of polygons I have. For some reason, no matter what I do I keep running into the same error (detailed above).
from sentinelsat import SentinelAPI, read_geojson, geojson_to_wkt
import geopandas as gpd
import folium
import rasterio as rio
from rasterio.plot import show
from rasterio.mask import mask
import matplotlib.pyplot as plt
from pyproj import Proj, transform
import pandas as pd
import os
from datetime import date
import sentinelhub
user = 'xxxxx'
password = 'xxxxx'
url = 'https://scihub.copernicus.eu/dhus'
api = SentinelAPI(user, password, url)
validation = gpd.read_file('EarthData/tutakoke_permafrost_validation/Tutakoke_permafrost_validation.shp')
plateau_transects = gpd.read_file('EarthData/tutakoke_permafrost_plateau_transects/Tutakoke_Permafrost_Plateau_Transects.shp')
validation = validation.set_crs(epsg=32604, inplace=True, allow_override=True)
validation['imdate']='01-01-2019'
validation['imdate'] = pd.to_datetime(validation2['imdate'])
validation['geometry2'] = validation.geometry.buffer(2, cap_style=3)
footprint=validation['geometry2'][1]
products = api.query(footprint,
date = ('20200109', '20200510'),
platformname = 'Sentinel-2',
processinglevel = 'Level-2A',
cloudcoverpercentage = (0, 20))
The error I keep getting is:
SentinelAPIError: HTTP status 200 OK: API response not valid. JSON decoding failed.
Ah - it was that my footprint was not in the correct lat lon format!
I'm successfully trained my own dataset using Keras yolov3 Github project link
and I've got good predictions:
I would like to deploy this model on the web using flask to make it work with a stream or with IP cameras.
I saw many tutorials explains how to do that but, in reality, I did not find what I am looking for.
How can I get started?
You can use flask-restful to design a simple rest API.
You can use opencv VideoCapture to grab the video stream and get frames.
import numpy as np
import cv2
# Open a sample video available in sample-videos
vcap = cv2.VideoCapture('URL')
The client will take an image/ frame, encode it using base64, add other details like height, width, and make a request.
import numpy as np
import base64
import zlib
import requests
import time
t1 = time.time()
for _ in range(1000): # 1000 continuous request
frame = np.random.randint(0,256, (416,416,3), dtype=np.uint8) # dummy rgb image
# replace frame with your image
# compress
data = frame # zlib.compress(frame)
data = base64.b64encode(data)
data_send = data
#data2 = base64.b64decode(data)
#data2 = zlib.decompress(data2)
#fdata = np.frombuffer(data2, dtype=np.uint8)
r = requests.post("http://127.0.0.1:5000/predict", json={'imgb64' : data_send.decode(), 'w': 416, 'h': 416})
# make a post request
# print the response here
t2 = time.time()
print(t2-t1)
Your server will load the darknet model, and when it receives a post request it will simply return the model output.
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
import json
import numpy as np
import base64
# compression
import zlib
# load keras model
# load_model('model.h5')
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('imgb64', location='json', help = 'type error')
parser.add_argument('w', type = int, location='json', help = 'type error')
parser.add_argument('h', type = int, location='json', help = 'type error')
class Predict(Resource):
def post(self):
request.get_json(force=True)
data = parser.parse_args()
if data['imgb64'] == "":
return {
'data':'',
'message':'No file found',
'status':'error'
}
img = data['imgb64']
w = data['w']
h = data['h']
data2 = img.encode()
data2 = base64.b64decode(data2)
#data2 = zlib.decompress(data2)
fdata = np.frombuffer(data2, dtype=np.uint8).reshape(w, h, -1)
# do model inference here
if img:
return json.dumps({
'mean': np.mean(fdata),
'channel': fdata.shape[-1],
'message':'darknet processed',
'status':'success'
})
return {
'data':'',
'message':'Something when wrong',
'status':'error'
}
api.add_resource(Predict,'/predict')
if __name__ == '__main__':
app.run(debug=True, host = '0.0.0.0', port = 5000, threaded=True)
In the # do model inference here part, just use your detect/predict function.
If you want to use native darknet, https://github.com/zabir-nabil/tf-model-server4-yolov3
If you want to use gRPC instead of REST, https://github.com/zabir-nabil/simple-gRPC
import requests
import cv2
frame=cv2.imread('C:\\Users\\aaa\\Downloads\\abc.jpg')
url = 'https://app.nanonets.com/api/v2/ObjectDetection/Model/4729a79f-ab19-4c1b-8fe9'
data = {'file': open('C:\\Users\\aaa\\Downloads\\abc.jpg', 'rb')}
response = requests.post(url, auth=requests.auth.HTTPBasicAuth('S5zsN-yFZJxRH9tMwsaHUCxJg3dZaDWj', ''), files=data)
print (type(response))
print(response)
I uploaded an image for object detection. I got the response like this.
{"message":"Success","result":[{"message":"Success","input":"abc.jpg","prediction":[{"label":"car","xmin":411,"ymin":332,"xmax":585,"ymax":462,"score":0.99097943},{"label":"car","xmin":496,"ymin":170,"xmax":592,"ymax":248,"score":0.96399206},{"label":"car","xmin":223,"ymin":147,"xmax":294,"ymax":202,"score":0.9383388},{"label":"car","xmin":164,"ymin":130,"xmax":230,"ymax":175,"score":0.8968652},{"label":"car","xmin":448,"ymin":489,"xmax":623,"ymax":540,"score":0.8311123}],"page":0,"request_file_id":"5a8549f1-fb2c-487a-83b5-234608b3168b","filepath":"uploadedfiles/4729a79f-ab19-fac0fb807e6d/PredictionImages/53207.jpeg"}]}
I want to make a box in the image with the given coordinates.
import requests
import cv2
import math
from PIL import Image, ImageDraw
import json
frame=cv2.imread('C:\Users\aaa\Downloads\abc.jpg')
url = 'https://app.nanonets.com/api/v2/ObjectDetection/Model/4729a79f-ab19-4c1b-8fe9'
data = {'file': open('C:\Users\aaa\Downloads\abc.jpg', 'rb')}
response = requests.post(url, auth=requests.auth.HTTPBasicAuth('S5zsN-yFZJxRH9tMwsaHUCxJg3dZaDWj', ''), files=data)
predictions = json.loads(response.text)['result'][0]['prediction']
img = Image.open('C:\Users\aaa\Downloads\abc.jpg')
img1 = ImageDraw.Draw(img)
for prediction in predictions:
shape = [(prediction['xmin'], prediction['ymin']), (prediction['xmax'], prediction['ymax'])]
img1.rectangle(shape, fill ="# ffff33", outline ="red")
img.show()
I have a Django project that receives an image, process it and return a response. I am writing a script to test my API, but the bytes that client sends is not the same that the server receives.
Client code:
# client.py
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import cv2
img = cv2.imread(image_file)
data = {'image': img.tobytes(), 'shape': img.shape}
data = urlencode(data).encode("utf-8")
req = Request(service_url, data)
response = urlopen(req)
print(response.read().decode('utf-8'))
Views code:
# service/app/views.py
import ast
import numpy as np
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
#csrf_exempt
def process_image(request):
if request.method == 'POST':
# Converts string to tuple
shape = ast.literal_eval(request.POST.get('shape'))
img_bytes = request.POST.get('image')
# Reconstruct the image
img = np.fromstring(img_bytes, dtype=np.uint8).reshape(shape)
# Process image
return JsonResponse({'result': 'Hello'})
When i run cliente code i get ValueError: total size of new array must be unchanged. I did the following checks, with a 8x8 RGB image:
# client.py
>> print(img.shape)
(8, 8, 3)
>> print(img.dtype)
uint8
>> print(len(img.tobytes()))
192
# service/app/views.py
>> print(shape)
(8, 8, 3)
>> print(len(img_bytes))
187
The shape field is ok, but the image filed has different size. As the image is small, i printed the bytes from client and server, and i did not get the same. I think that this is an encoding problem.
I want to send image as bytes, because i think this is a compact way to send this kind of data. If anyone know a better approach to send image via HTTP, let me know.
Thanks!
Inspired by John Morris's commentary, i found the answer to my question in Numpy Array to base64 and back to Numpy Array post. If anyone has the same doubt, here is the solution:
Client code:
# client.py
import base64
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import cv2
img = cv2.imread(image_file)
img_b64 = base64.b64encode(img)
data = {'image': img_b64, 'shape': img.shape}
data = urlencode(data).encode("utf-8")
req = Request(service_url, data)
response = urlopen(req)
print(response.read().decode('utf-8'))
Views code:
# service/app/views.py
import ast
import base64
import numpy as np
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
#csrf_exempt
def process_image(request):
if request.method == 'POST':
shape = ast.literal_eval(request.POST.get('shape'))
buffer = base64.b64decode(request.POST.get('image'))
# Reconstruct the image
img = np.frombuffer(buffer, dtype=np.uint8).reshape(shape)
# Process image
return JsonResponse({'result': 'Hello'})
Thank you all!
I am using requests to get the image from remote URL. Since the images will always be 16x16, I want to convert them to base64, so that I can embed them later to use in HTML img tag.
import requests
import base64
response = requests.get(url).content
print(response)
b = base64.b64encode(response)
src = "data:image/png;base64," + b
The output for response is:
response = b'GIF89a\x80\x00\x80\x00\xc4\x1f\x00\xff\xff\xff\x00\x00\x00\xff\x00\x00\xff\x88\x88"""\xffff\...
The HTML part is:
<img src="{{src}}"/>
But the image is not displayed.
How can I properly base-64 encode the response?
I think it's just
import base64
import requests
response = requests.get(url)
uri = ("data:" +
response.headers['Content-Type'] + ";" +
"base64," + base64.b64encode(response.content))
Assuming content-type is set.
This worked for me:
import base64
import requests
response = requests.get(url)
uri = ("data:" +
response.headers['Content-Type'] + ";" +
"base64," + base64.b64encode(response.content).decode("utf-8"))
You may use the base64 package.
import requests
import base64
response = requests.get(url).content
print(response)
b64response = base64.b64encode(response)
print b64response
Here's my code to send/receive images over Http requests, encoded with base64
Send Request:
# Read Image
image_data = cv2.imread(image_path)
# Convert numpy array To PIL image
pil_detection_img = Image.fromarray(cv2.cvtColor(img_detections, cv2.COLOR_BGR2RGB))
# Convert PIL image to bytes
buffered_detection = BytesIO()
# Save Buffered Bytes
pil_detection_img.save(buffered_detection, format='PNG')
# Base 64 encode bytes data
# result : bytes
base64_detection = base64.b64encode(buffered_detection.getvalue())
# Decode this bytes to text
# result : string (utf-8)
base64_detection = base64_detection.decode('utf-8')
base64_plate = base64_plate.decode('utf-8')
data = {
"cam_id": "10415",
"detecion_image": base64_detection,
}
Recieve Request
content = request.json
encoded_image = content['image']
decoded_image = base64.b64decode(encoded_image)
out_image = open('image_name', 'wb')
out_image.write(decoded_image)