I have a rule set, if a color is in a list for this cell delete the cell like so
image = PILImage.open('revamp'+img_url)
img = image
random_name = random_filename(path="revamp/media/colorfest/")
img.save(random_name, "PNG")
pixels = image.load()
for pos in area_id:
pixel = pixels[pos[0], pos[1]]
pixel = [pixel[0], pixel[1], pixel[2]]
if pixel in colors:
img.putpixel([pos[0], pos[1]], (0, 0, 0, 0))
img.save(random_name, "PNG")
response['status'] = 'ok'
here's what I get, blacked out cells
original image
Related
i have a 7 minute video. from this video i extract rgb data from the set ROI. Im using Mediapipe facemesh to track face and set the ROI.
however, this evaluation takes several minutes. What can I do to speed this up? Or what am I doing wrong?
can it be that it is because of the facemesh initialization that it has to re-identify the face in each frame and this is the reason for the long duration? How else should I solve this?
cap = cv2.VideoCapture("Video.mp4")
red, image = cap.read()
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# print toal number of frames
print("total number of Frames: ",total)
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
while red:
red, image = cap.read()
height, width, _ = image.shape
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
if image is None:
continue
processed_img = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # convert the RGB image to BGR.
if processed_img.multi_face_landmarks:
for face_landmarks in processed_img.multi_face_landmarks:
landmark_points = []
for i in range(0, 468):
x = int(face_landmarks.landmark[i].x * width)
y = int(face_landmarks.landmark[i].y * height)
p = [x, y]
landmark_points.append([x, y])
forehead = np.array((
landmark_points[9], landmark_points[107], landmark_points[66], landmark_points[105],
landmark_points[104], landmark_points[103],
landmark_points[67], landmark_points[109], landmark_points[10],
landmark_points[338], landmark_points[297], landmark_points[332],
landmark_points[333], landmark_points[334], landmark_points[296],
landmark_points[336]))
left_cheek = np.array((landmark_points[266], landmark_points[426], landmark_points[436],
landmark_points[416], landmark_points[376],
landmark_points[352], landmark_points[347], landmark_points[330]))
right_cheek = np.array((landmark_points[36], landmark_points[206], landmark_points[216],
landmark_points[192], landmark_points[147],
landmark_points[123], landmark_points[117], landmark_points[118],
landmark_points[101]))
forehead_New = np.array((landmark_points[109],landmark_points[10],landmark_points[338],landmark_points[337],landmark_points[336],landmark_points[285],landmark_points[417],
landmark_points[168],landmark_points[193],landmark_points[55],landmark_points[107],landmark_points[108]))
rightCheek_New = np.array((landmark_points[355],landmark_points[329],landmark_points[348],landmark_points[347],landmark_points[346],landmark_points[345],
landmark_points[352],landmark_points[280],landmark_points[266],landmark_points[371]))
leftCheek_New = np.array((landmark_points[116],landmark_points[117],landmark_points[118],landmark_points[119],landmark_points[100],landmark_points[126],
landmark_points[142],landmark_points[36],landmark_points[50],landmark_points[123]))
cv2.polylines(image, [forehead_New], True, (0, 255, 255), 2)
cv2.polylines(image, [leftCheek_New], True, (0, 255, 255), 2)
cv2.polylines(image, [rightCheek_New], True, (0, 255, 255), 2)
mask = np.zeros((height, width), dtype=np.uint8)
cv2.fillPoly(mask, [forehead_New, leftCheek_New, rightCheek_New], (255))
crop_img = cv2.bitwise_and(image, image, mask=mask)
b, g, r = cv2.split(crop_img)
indices_list = np.where(np.any(crop_img != [0, 0, 0], axis=-1))
roi_pixel_img =crop_img[indices_list]
r = (roi_pixel_img == [0,255,255]).all(axis = -1)
roi_pixel_img = roi_pixel_img[~r]
b_plot.append(roi_pixel_img[:, 0].mean())
g_plot.append(roi_pixel_img[:, 1].mean()) # -//- ... green-channel
r_plot.append(roi_pixel_img[:, 2].mean()) # -//- ... red-channel
frame_count += 1
print("Frame_progress:", frame_count, "of: ", total)
t_plot.append(round(time_count))
time_count += (1000 / fps)
# Draw the face mesh on the image
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
mean_rgb = np.vstack((red, green, blue)).T
Here is an example of the output I want to generate. I'm able to create an image with one color, but I don't have idea of how can use two colors, and how to color only certain parts of the image .
I solved in this way. I created two image with two different colors, and then paste them in another one image.
width = 400
height = 300
img = Image.new( mode = "RGB", size = (width, height), color = (209, 123, 193) )
#First IMG
img2 = Image.new( mode = "RGB", size = (width, height + 400), color = (255, 255, 255) )
#Second IMG
img3 = Image.new('RGB', (img.width, img.height + img2.height)) img3.paste(img, (0, 0)) img3.paste(img2, (img.width, 0))
#IMG + IMG2
I got my result.
I have this image and i want to detect the text of the image.
Can you help me with the Python Code.
I have done till here.
from PIL import Image
img = Image.open("temp_temp.png")
# resize to make more clearer
m = 6.5
img = img.resize((int(img.size[0]*m), int(img.size[1]*m))).convert('RGBA')
# img.show()
pixdata = img.load()
limit = 75
for y in range(img.size[1]):
print("a",y)
for x in range(img.size[0]):
if pixdata[x, y][0] < limit:
# make dark color black
pixdata[x, y] = (170, 170, 170, 255)
else:
# make light color white
pixdata[x, y] = (255, 255, 255, 255)
img.convert('L')
img.show()
img.save("temp_temp1.png")
# Now correcting the Radial Distortion
I'm trying to create a french flag without using most of python's built in functions but for some reason my loop won't work... can anyone tell me why?
from PIL import Image
def french():
flag= Image.new('RGB', (300, 200))
pixels=flag.getdata()
pixs= list(pixels)
r=0
w=100
b=200
while True:
for x in range(r,(r+100)):
pixs[x]= (255,0,0)
flag.putdata(pixs)
r=+300
for x in range(w,(w+100)):
pixs[x]= (255,255,255)
flag.putdata(pixs)
w=+300
for x in range(b,(b+100)):
pixs[x]=(0,0,255)
flag.putdata(pixs)
b=+300
return (r>60000 or w>60000 or b>60000)
flag.save('frenchflag.png')
french()
Instead of
# on first pass through the loop, exit the function and return False
return (r>60000 or w>60000 or b>60000)
I think you mean
# when we are at the end of the pixel buffer, leave the loop
if r >= 60000:
break
Pixel-poking is a horribly inefficient way to create an image; use PIL.ImageDraw instead:
from PIL import Image, ImageDraw
RED = (255, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
def french(fname, width=300, height=200):
flag = Image.new("RGB", (width, height))
p, q = width//3, 2*width//3 # bar edge coordinates
draw = ImageDraw.Draw(flag)
draw.rectangle((0, 0, p, height), RED)
draw.rectangle((p, 0, q, height), WHITE)
draw.rectangle((q, 0, width, height), BLUE)
flag.save(fname)
french("frenchflag.png")
which produces
I have 2 images:
PNG (99x97) with white, rotated frame and rest is in full transparency.
JPG - is my generated thumbnail (80x80)
Now I want to put thumbnail into my frame so it looks like some kind of painting. What should I do?
EDIT:
I forgot to add, that picture must be under the frame.
Frame image
I have some script but it shows only a frame. There is no picture in it :/
import Image, ImageDraw
img_size = (99,97)
im = Image.open('logo.jpg')
picture = im.crop((0,0,80,80))
frame = Image.open('thumb-frame.png')
picture = picture.convert('RGBA')
background = Image.new('RGBA', img_size, (255, 255, 255, 0))
background.paste(picture, (10,9))
background.paste(frame, (0,0))
background.save('logocopy.png', 'PNG')
EDIT:
Problem solved. I had to add alpha mask to .paste()
import Image
im = Image.open('logo.jpg')
picture = im.crop((0,0,80,80))
picture = picture.convert('RGBA')
frame = Image.open('thumb-frame.png')
background = Image.new('RGBA', frame.size, (255, 255, 255, 0))
background.paste(picture, (10,9))
background.paste(frame, (0,0), frame)
background.save('logocopy.png', 'PNG')
Here you go. This should take original picture and paste transparent frame image above it. Both pictures should be 100x100, but you can add needed resizing.
from PIL import Image
frame = Image.open('frame.png')
img = Image.open('image.jpg')
img_dest = img.copy().convert('RGBA')
img_dest.paste(frame, (0, 0, 100, 100), frame)
img_dest = img_dest.convert('RGB') # Optional, to remove transparency info
img_dest.save('output.png')