I want to get the size of the image from a buffer.
import io
from PIL import Image
class Scraper:
asnyc _save_image(res):
buffer = await res.body()
img = Image.open(io.BytesIO(buffer))
img_w, img_h = img.size
async def scrape():
playwright = await async_playwright().start()
browser = await playwright.chromium.launch( headless = True, devtools = False )
page = browser.new_page()
page.on('response', _save_image)
await page.goto('https://www.example.com')
scraper = Scraper(key, id)
asyncio.run(scraper.scrape())
img = Image.open(io.BytesIO(buffer))
This code above is not an async function. I want know the size of images from the buffer asynchronously. How to so this?
Related
So, I want the bot to send a welcome card with the user's profile picture & text saying " Welcome {user.name} ", but the text part isn't working. I have got no error in the console.
Here's my code:
from PIL import Image, ImageFilter, ImageFont, ImageDraw
from io import BytesIO
#client.event
async def on_member_join(member):
wc = Image.open("wc.jpg")
asset = member.avatar_url_as(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
draw = ImageDraw.Draw(wc)
font = ImageFont.truetype("Littlecandy.ttf", 24)
pfp = pfp.resize((474,382))
draw.text((549,284), f"{member.display_name}", (171, 5, 171), font=font)
wc.paste(pfp, (727,209))
wc.save("wcm.jpg")
await client.get_channel(850634788633182218).send(file = discord.File('wcm.jpg'))
So, i have not got the answer but when i removed the RGB code that is this (171, 5, 171) than i tested it & it worked.
Here's my changed code:
from PIL import Image, ImageFilter, ImageFont, ImageDraw
from io import BytesIO
#client.comamnd()
async def on_member_join(member):
wc2 = Image.open("wc2.jpg")
asset = member.avatar_url_as(size=64)
data = BytesIO(await asset.read())
pfp = Image.open(data)
draw = ImageDraw.Draw(wc2)
font = ImageFont.truetype("BalsamiqSans-BoldItalic.ttf", 45)
text = f"{member}"
pfp = pfp.resize((211,181))
wc2.paste(pfp, (30,28))
draw.text((26,235),text,font=font,fill='orange')
wc2.save("wcm.jpg")
await client.get_channel(850634788633182218).send(file = discord.File('wcm.jpg'))
I currently am working on a program that pastes two images together with PIL, but PIL is weird, so I had to do a bunch of extra stuff so that I could use links. Anyways, now I can't use what PIL outputted because:
AttributeError: 'Image' object has no attribute 'getvalue'
here's the important chunk of my code:
async with aiohttp.ClientSession() as session:
async with session.get(avurl) as second_image:
image_bytes = await second_image.read()
with Image.open(BytesIO(image_bytes)).convert("RGB") as first_image:
output_buffer = BytesIO()
first_image.save(output_buffer, "png")
output_buffer.seek(0)
async with aiohttp.ClientSession() as session:
async with session.get("https://i.imgur.com/dNS0WJO.png") as second_image:
image_bytes = await second_image.read()
with Image.open(BytesIO(image_bytes)) as second_image:
output_buffer = BytesIO()
second_image.save(output_buffer, "png")
output_buffer.seek(0)
first_image.paste(second_image, (0, 0))
buf = io.BytesIO()
first_image.save(buf, "png")
first_image = first_image.getvalue()
could anyone tell me what line of code i'm missing to fix this? or maybe what i did incorrectly?
Image objects indeed do not have a getvalue method, it's BytesIO instances that do.
Here you should be calling buf.getvalue instead of first_image.getvalue.
buf = io.BytesIO()
first_image.save(buf, "png")
first_image = first_image.getvalue()
Your code looks a bit like this: https://stackoverflow.com/a/33117447/7051394 ; but if you look at the last three lines of that snipper, imgByteArr is still a BytesIO, so imgByteArr.getvalue() is valid.
Here's a simpler and possibly faster reformulation of your code:
async with aiohttp.ClientSession() as session:
image1data = await session.get(avurl).read()
image2data = await session.get("https://i.imgur.com/dNS0WJO.png").read()
image1 = Image.open(BytesIO(image1data)).convert("RGB")
image2 = Image.open(BytesIO(image2data))
image1.paste(image2, (0, 0))
buf = BytesIO()
image1.save(buf, "png")
composite_image_data = buf.getvalue()
You'll end up with composite_image_data containing PNG data for the composited image.
My project uses socket.io to send/receive data.
I added aiohttp to help display the results on the browser.
import asyncio
from aiohttp import web
sio = socketio.AsyncServer(async_mode='`aiohttp`')
app = web.Application()
sio.attach(app)
I followed
https://us-pycon-2019-tutorial.readthedocs.io/aiohttp_file_uploading.html
to upload an image but I cannot upload a video.
def gen1():
# while True:
# if len(pm.list_image_display) > 1 :
image = cv2.imread("/home/duong/Pictures/Chess_Board.svg")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# img = PIL.Image.new("RGB", (64, 64), color=(255,255,0))
image_pil = PIL.Image.fromarray(image)
fp = io.BytesIO()
image_pil.save(fp, format="JPEG")
content = fp.getvalue()
return content
async def send1():
print("11")
return web.Response(body=gen1(), content_type='image/jpeg')
How to display video via aiohttp on browsers?
To stream a video in aiohttp you may open a StreamResponse in response to the fetching of a img HTML node:
#routes.get('/video')
async def video_feed(request):
response = web.StreamResponse()
response.content_type = 'multipart/x-mixed-replace; boundary=frame'
await response.prepare(request)
for frame in frames('/dev/video0'):
await response.write(frame)
return response
and send your frames in the form of bytes:
def frames(path):
camera = cv2.VideoCapture(path)
if not camera.isOpened():
raise RuntimeError('Cannot open camera')
while True:
_, img = camera.read()
img = cv2.resize(img, (480, 320))
frame = cv2.imencode('.jpg', img)[1].tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n'+frame+b'\r\n'
This may be however network demanding as the bitrate required to send each frame individually is high. For real-time streaming with further compression you may want to use WebRTC implementations like aiortc.
I'm making a Get request to Dicom server, which returns a Multipart/Related Type=image/jpeg. I tried using aiohttp libararies Multipart feature to parse but it didnt work. The file saved is corrupted.
Here is my code.
import asyncio
import aiohttp
'''
async def fetch(url,session,header):
async with session.get(url,headers=header) as response:
await response
async def multiHit(urls,header):
tasks = []
async with aiohttp.ClientSession() as session:
for i,url in enumerate(urls):
tasks.append(fetch(url,session,header))
result = await asyncio.gather(*tasks)
return result
loop = asyncio.get_event_loop()
res = loop.run_until_complete(multiHit(["FRAME URL"],{"Accept":"multipart/related;type=image/jpeg"}))
print(res)
'''
async def xyz(loop):
async with aiohttp.ClientSession(loop=loop).get(url="FRAME URL",headers={"Accept":"multipart/related;type=image/jpeg"}) as response:
reader = aiohttp.MultipartReader.from_response(response)
while True:
part = await reader.next()
if part is None:
break
filedata = await part.read(decode=False)
import base64
with open('m.jpeg','wb') as outFile:
outFile.write(part.decode(filedata))
return 1
loop = asyncio.get_event_loop()
res = loop.run_until_complete(xyz(loop))
How to I parse the Multipart/related response and save the images?
I figured out that I was parsing the multi-part response properly, but I had to use another library (library name : imagecodecs , method name : jpegsof3_decode) to decompresses individual part into the images. This is give a numpy array of the image. Here is the updated code
reader = aiohttp.MultipartReader.from_response(response)
while True:
part = await reader.next()
if part is None:
break
data = await part.read()
imageDecompressed = jpegsof3_decode(data)
Further the numpy array can be converted into a image using cv2 libray
success, encoded_image = cv2.imencode('.png',imageDecompressed)
Byte version of the converted image can be obtained this way
imageInBytes = encoded_image.tobytes()
I'm using opencv to capture a video from my webcam. Every 5 seconds, I'm processing a single frame / an image which can take some seconds. So far everything works. But whenever a frame is processed the entire video is freezing for a couple of seconds (Until the process is finished). I'm trying to get rid of it by using Threading. Here is what I did so far:
Inside the while loop which is capturing the video:
while True:
ret, image = cap.read()
if next_time <= datetime.now():
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(self.async_faces(img_encoded, headers))
loop.run_until_complete(future)
next_time += period
...
cv2.imshow('img', image)
Here are the methods:
async def async_faces(self, img, headers):
with ThreadPoolExecutor(max_workers=10) as executor:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
self.face_detection,
*(img, headers) # Allows us to pass in multiple arguments to `fetch`
)
]
for response in await asyncio.gather(*tasks):
pass
def face_detection(self, img, headers):
try:
response = requests.post(self.url, data=img.tostring(), headers=headers)
...
except Exception as e:
...
...
But unfortunately it's not working.
EDIT 1
In the following I add what the whole thing is supposed to do.
Originally, the function looked like:
import requests
import cv2
from datetime import datetime, timedelta
def face_recognition(self):
# Start camera
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
emotional_states = []
font = cv2.FONT_HERSHEY_SIMPLEX
period = timedelta(seconds=self.time_period)
next_time = datetime.now() + period
cv2.namedWindow('img', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('img', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
while True:
ret, image = cap.read()
if next_time <= datetime.now():
# Prepare headers for http request
content_type = 'image/jpeg'
headers = {'content-type': content_type}
_, img_encoded = cv2.imencode('.jpg', image)
try:
# Send http request with image and receive response
response = requests.post(self.url, data=img_encoded.tostring(), headers=headers)
emotional_states = response.json().get("emotions")
face_locations = response.json().get("locations")
except Exception as e:
emotional_states = []
face_locations = []
print(e)
next_time += period
for i in range(0, len(emotional_states)):
emotion = emotional_states[i]
face_location = face_locations[i]
cv2.putText(image, emotion, (int(face_location[0]), int(face_location[1])),
font, 0.8, (0, 255, 0), 2, cv2.LINE_AA)
cv2.imshow('img', image)
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
break
if k == ord('a'):
cv2.resizeWindow('img', 700,700)
I use the above method to film myself. This film will be shown live on my screen. Further, every 5 seconds one frame is send to an API where the image is processed in such a way that the emotion of the person in the image is returned. This emotion is displayed on my screen, next to myself. The problem is, that the live video is freezing for a couple of seconds until the emotion is returned from the API.
My OS is Ubuntu.
EDIT 2
The API is running locally. I created a Flask App and the following method is receiving the request:
from flask import Flask, request, Response
import numpy as np
import cv2
import json
#app.route('/api', methods=['POST'])
def facial_emotion_recognition():
# Convert string of image data to uint8
nparr = np.fromstring(request.data, np.uint8)
# Decode image
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Analyse the image
emotional_state, face_locations = emotionDetection.analyze_facial_emotions(img)
json_dump = json.dumps({'emotions': emotional_state, 'locations': face_locations}, cls=NumpyEncoder)
return Response(json_dump, mimetype='application/json')