Draw circle with PIL (Old ones doesn't works) - python

I am trying to draw a circle with PIL, but I get an attribute error.
Current code is for square.
Part of code to draw:
youtube = Image.open(f"cache/thumb{videoid}.png")
image1 = changeImageSize(1280, 720, youtube)
image2 = image1.convert("RGBA")
background = image2.filter(filter=ImageFilter.BoxBlur(30))
enhancer = ImageEnhance.Brightness(background)
background = enhancer.enhance(0.6)
Xcenter = youtube.width / 2
Ycenter = youtube.height / 2
x1 = Xcenter - 250
y1 = Ycenter - 250
x2 = Xcenter + 250
y2 = Ycenter + 250
logo = youtube.crop((x1, y1, x2, y2))
logo.thumbnail((520, 520), Image.ANTIALIAS)
logo = ImageOps.expand(logo, border=15, fill="pink")
background.paste(logo, (50, 100))
draw = ImageDraw.Draw(background)
My whole code:
Code

You can use either ImageDraw.arc() or ImageDraw.ellipse.
from PIL import Image, ImageDraw
# Image size
W, H = 100, 100
# Bounding box points
X0 = int(W / 4)
X1 = int(X0 * 3)
Y0 = int(H / 4)
Y1 = int(X0 * 3)
# Bounding box
bbox = [X0, Y0, X1, Y1]
# Set up
im = Image.new("RGB", (W, H))
draw = ImageDraw.Draw(im)
# Draw a circle
draw.arc(bbox, 0, 360)
# Show the image
im.show()
Or:
# Draw a circle
draw.ellipse(bbox)

Related

How to calculate the positions of new points after rotating the image around arbitrary axis

Having a dataset of aligned objects, I would like to augment it by applying random rotations with the axis at the center of the object. Below is the rotation representation (left original, right image rotated around the point (xc, yc).
for rotation, I have used the following logic:
import cv2
import random
image_source = cv2.imread('sample.png')
height, width = image_source.shape[:2]
random_angle = random.uniform(90, 90)
yolo_annotation_sample = get_annotation() # this function retrieves yolo annotation
label_id, xc, yc, object_width, object_height = yolo_annotation_sample # e.g. 4, 0.0189, 0.25, 0.0146, 0.00146
center_x = width * xc
center_y = height * yc
left = center_x - (width * object_width) / 2
top = center_y - (height * object_height) / 2
right = left + width * object_width
bottom = top + height * object_height
M = cv2.getRotationMatrix2D((cx, cy), random_angle, 1.0)
image_rotated = cv2.warpAffine(image_source, M, (width, height))
# logic for calculating new point position (doesn't work)
x1_y1 = np.asarray([[left, top]])
x1_y1_new = np.dot(x1_y1, M)
x2_y2 = np.asarray([[right, top]])
x2_y2_new = np.dot(x2_y2, M)
x3_y3 = np.asarray([[right, bottom]])
x3_y3_new = np.dot(x3_y3, M)
x4_y4 = np.asarray([[left, bottom]])
x4_y4_new = np.dot(x4_y4, M)
Does anyone know how to recalculate the point(s) after rotating around the arbitrary point as shown above?
Use cv2.transform(points, M). Points with shape: (4, 1, 2). Full code:
import cv2
import random
import numpy as np
image_source = cv2.imread('sample.png')
height, width = image_source.shape[:2]
random_angle = 40 #random.uniform(90, 90)
yolo_annotation_sample = (4, 0.6189, 0.25, 0.246, 0.0846) # this function retrieves yolo annotation
label_id, xc, yc, object_width, object_height = yolo_annotation_sample
center_x = width * xc
center_y = height * yc
left = center_x - (width * object_width) / 2
top = center_y - (height * object_height) / 2
right = left + width * object_width
bottom = top + height * object_height
cx, cy = width / 2, height / 2
M = cv2.getRotationMatrix2D((cx, cy), random_angle, 1.0)
image_rotated = cv2.warpAffine(image_source, M, (width, height))
# logic for calculating new point position (doesn't work)
bbox_points = [[left, top], [right, top], [right, bottom], [left, bottom]]
bbox_points = np.array(bbox_points).reshape((-1,1,2))
rotated_points = cv2.transform(bbox_points, M) # what you need
cv2.polylines(image_source,[bbox_points.astype(int)],True,(255,100,0), 10)
cv2.polylines(image_rotated,[rotated_points.astype(int)],True,(255,100,0), 10)
cv2.imshow("orig", image_source)
cv2.imshow("rotated", image_rotated)
cv2.waitKey()
Look at [https://en.wikipedia.org/wiki/Transformation_matrix][1]
Once I tried to calculate it myself:
class rotm :
'''set up rotation matrix'''
def __init__(self,axis,angle,unit="radians") :
self.m = scipy.zeros((4,4),scipy.float128)
if unit=="radians" :
angler = angle
else :
angler = math.radians(angle)
pass
if axis=='x' :
self.m[0][0]=1.0
self.m[1][1]=math.cos(angler)
self.m[2][2]=self.m[1][1]
self.m[3][3]=1.0
self.m[1][2]=-math.sin(angler)
self.m[2][1]=-self.m[1][2]
elif axis=='y' :
self.m[0][0]=math.cos(angler)
self.m[1][1]=1.0
self.m[2][2]=self.m[0][0]
self.m[3][3]=1.0
self.m[0][2]=math.sin(angler)
self.m[2][0]=-self.m[0][2]
elif axis=='z' :
self.m[0][0]=math.cos(angler)
self.m[1][1]=self.m[0][0]
self.m[2][2]=1.0
self.m[3][3]=1.0
self.m[0][1]=-math.sin(angler)
self.m[1][0]=-self.m[0][1]
pass
pass
def fPrint(self) :
'''auxiliary function: print transformation matrix '''
print(self.m)
pass
pass #end of rotm class

Trying to resize images for MatchTemplate and getting: error: (-215:Assertion failed)

I'm learning python by building a bot.
My code;
Grabs all the images from a specified directory and draws rectangles around any it finds in a real-time display.
The display is created with ImageGrab in a While loop.
then uses OpenCV and MatchTemplate to actually perform the object detection.
This all works.
I'm trying to speed up my by reducing the size & grayscaling both the images being used for detection and the ImageGrab I'm using for my template to match against.
This is my code so far:
image_list = []
# Get list of all files in current directory
directory = glob.glob(r"C:\*.png")
# Add images to array
for img in directory:
ship_img = cv.imread(img, cv.IMREAD_REDUCED_COLOR_2)
image_list.append(ship_img)
# debug the loop rate
loop_time = time()
# Video Loop
while True:
# Grab PSS Window and find size
window = pygetwindow.getWindowsWithTitle('Window')[0]
x1 = window.left
y1 = window.top
height = window.height
width = window.width
x2 = x1 + width
y2 = y1 + height
# Actual Video Loop, cropped down to the specific window,
# resized to 1/2 size, and converted to BGR for OpenCV
haystack_img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
(width, height) = (haystack_img.width // 2, haystack_img.height // 2)
haystack_img_resized = haystack_img.resize((width, height))
haystack_img_np = np.array(haystack_img_resized)
haystack = cv.cvtColor(haystack_img_np, cv.COLOR_BGR2RGB)
# debug the loop rate
print('FPS {}'.format(1 / (time() - loop_time)))
loop_time = time()
# Object Detection
for ships in image_list:
needle_img = ships
result = cv.matchTemplate(haystack, needle_img, cv.TM_CCOEFF_NORMED)
# Get the best match position
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
# Define top left and bottom right
(H, W) = ships.shape[:2]
top_left = max_loc
bottom_right = (top_left[0] + W, top_left[1] + H)
# Highlight Detection
threshold = 0.9
if max_val >= threshold:
cv.rectangle(haystack, top_left, bottom_right, 255, 2)
cv.imshow("Screen", haystack)
if cv.waitKey(1) == ord('q'):
cv.destroyAllWindows()
break
I've managed to capture, crop and reduce my template window by 50% with this code section;
# Grab Window and find size
window = pygetwindow.getWindowsWithTitle('Window')[0]
x1 = window.left
y1 = window.top
height = window.height
width = window.width
x2 = x1 + width
y2 = y1 + height
# Actual Video Loop, cropped down to the specific window,
# resized to 1/2 size, and converted to BGR for OpenCV
haystack_img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
(width, height) = (haystack_img.width // 2, haystack_img.height // 2)
haystack_img_resized = haystack_img.resize((width, height))
haystack_img_np = np.array(haystack_img_resized)
haystack = cv.cvtColor(haystack_img_np, cv.COLOR_BGR2RGB)
But when I try and change
haystack = cv.cvtColor(haystack_img_np, cv.COLOR_BGR2RGB) to haystack = cv.cvtColor(haystack_img_np, cv.IMREAD_GRAYSCALE)
and
ship_img = cv.imread(img, cv.IMREAD_REDUCED_COLOR_2) to ship_img = cv.imread(img, cv.IMREAD_REDUCED_GRAYSCALE_2)
I get the error;
cv2.error: OpenCV(4.5.1) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wvn_it83\opencv\modules\imgproc\src\templmatch.cpp:1163: error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 in function 'cv::matchTemplate'
My dtypes are both the same and I haven't changed the shape of my array so I'm not sure why this would be causing an issue with matching.
The documentation says IMREAD_REDUCED_GRAYSCALE_2 downscales an image by 1/2 and I'm resizing my template by 1/2. I'm not sure where I'm going wrong.

Zoom Into Image With OpenCV

I have the following picture as an example:
529 x 550 px (100 %)
As a target I would like to have the image zoomed to about
150 %, but it should still be
529 x 550 px:
I was able to write the code using PIL, but I want to have it with Cv2. Can someone help me please?
from PIL import Image
import cv2 as cv
def zoom_at(img, x, y, zoom):
w, h = img.size
zoom2 = zoom * 2
img = img.crop((x - w / zoom2, y - h / zoom2,
x + w / zoom2, y + h / zoom2))
return img.resize((w, h), Image.LANCZOS)
img = Image.open("image.png")
img = zoom_at(img, 264.5, 275, 1.5)
img = img.save('image_zoomed.png')
#Ofer Sadan
import cv2 as cv
def zoom(img, zoom_factor=1.5):
return cv.resize(img, None, fx=zoom_factor, fy=zoom_factor)
img = cv.imread('original.png')
# Original: 529 × 550
height, width = img.shape[:2]
zoomed = zoom(img, 1.5)
# Zoomed: 794 × 825
cropped = zoomed[0:550, 0:529] # Wrong area
# Now I want to crop the middle of the new image as variable.
cv.imwrite('zoomed.png', zoomed)
cv.imwrite('cropped.png', cropped)
There you go:
cv:
import cv2 as cv
def zoom_at(img, zoom=1, angle=0, coord=None):
cy, cx = [ i/2 for i in img.shape[:-1] ] if coord is None else coord[::-1]
rot_mat = cv2.getRotationMatrix2D((cx,cy), angle, zoom)
result = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
Laymans manual:
import cv2 as cv
def zoom_at(img, zoom, coord=None):
"""
Simple image zooming without boundary checking.
Centered at "coord", if given, else the image center.
img: numpy.ndarray of shape (h,w,:)
zoom: float
coord: (float, float)
"""
# Translate to zoomed coordinates
h, w, _ = [ zoom * i for i in img.shape ]
if coord is None: cx, cy = w/2, h/2
else: cx, cy = [ zoom*c for c in coord ]
img = cv.resize( img, (0, 0), fx=zoom, fy=zoom)
img = img[ int(round(cy - h/zoom * .5)) : int(round(cy + h/zoom * .5)),
int(round(cx - w/zoom * .5)) : int(round(cx + w/zoom * .5)),
: ]
return img
img = cv.imread('x3Lkg.png')
cv.imwrite('x3Lkg_zoomed.png', zoom_at(img, 1.5, coord=(264.5, 275)) )
I have a little snippet I used a while ago that I can't currently test so let me know if it actually works or not
import cv2 as cv
def zoom(img, zoom_factor=2):
return cv.resize(img, None, fx=zoom_factor, fy=zoom_factor)
And you can crop before the zoom or after it as you wish:
img = cv.imread(img_path)
cropped = img[200:300, 150:250]
zoomed = zoom(img, 3)
zoomed_and_cropped = zoom(cropped, 3)
For anyone who does not want to the math manually this works for me.
import cv2
def zoom_center(img, zoom_factor=1.5):
y_size = img.shape[0]
x_size = img.shape[1]
# define new boundaries
x1 = int(0.5*x_size*(1-1/zoom_factor))
x2 = int(x_size-0.5*x_size*(1-1/zoom_factor))
y1 = int(0.5*y_size*(1-1/zoom_factor))
y2 = int(y_size-0.5*y_size*(1-1/zoom_factor))
# first crop image then scale
img_cropped = img[y1:y2,x1:x2]
return cv2.resize(img_cropped, None, fx=zoom_factor, fy=zoom_factor)
# read original
img = cv2.imread('original.png')
# call our function
img_zoomed_and_cropped = zoom_center(img)
# write zoomed and cropped version
cv.imwrite('zoomed_and_cropped.png', img_zoomed_and_cropped)
Notice that I first cropped and then rescaled. It is more efficient and you will notice it when dealing with a live video feed.
For putting a specific point in the input image at a specific point in the output image, with a precise scale factor, you would want to use cv.warpAffine.
This function requires you to build a transformation matrix. That is easy.
def translate(tx=0, ty=0):
T = np.eye(3)
T[0:2,2] = [tx, ty]
return T
def scale(s=1, sx=1, sy=1):
T = np.diag([s*sx, s*sy, 1])
return T
def rotate(degrees):
T = np.eye(3)
# just involves some sin() and cos()
T[0:2] = cv.getRotationMatrix2D(center=(0,0), angle=-degrees, scale=1.0)
return T
im = cv.imread("x3Lkg.png")
(ih,iw) = im.shape[:2] # input height, input width
# parameters
scale_factor = 10
angle_degrees = 15
(ow, oh) = (529, 550) # output size
(icx, icy) = (459, 352) # zoom onto that pixel in input
(ocx, ocy) = ((ow-1)/2, (oh-1)/2) # put there in output (it's the exact center)
# the transformation, read from right to left
H = translate(+ocx, +ocy) # rotate(degrees=angle_degrees) # scale(scale_factor) # translate(-icx, -icy)
# assume that H is affine, not a full homography
assert np.allclose(H[2], [0,0,1])
M = H[0:2]
# produce the picture
# use INTER_LINEAR, INTER_CUBIC, INTER_LANCZOS4 for smooth interpolation
# use INTER_AREA for scale factors much below 1
out = cv.warpAffine(im, dsize=(ow,oh), M=M, flags=cv.INTER_NEAREST)
# imshow(out)

Additive color with Tkinter

I'm trying to reproduce additive color with Tkinter.
My function :
def synthese(red,green,blue):
win2 = Tk()
win2.title("ADDITIVE COLOR")
win2.geometry("500x500")
win2.resizable(0,0)
hred = "#%02x%02x%02x" % (red, 0, 0) #RGB to Hexadecimal
hgreen = "#%02x%02x%02x" % (0, green, 0)
hblue = "#%02x%02x%02x" % (0, 0, blue)
r = 50
Width = 450
Height = 450
win3 = Canvas(win2, width = Width, height = Height, bg = 'white')
win3.pack(padx=5,pady=5)
win3.create_oval(10,150,300,440, outline=hred, fill=hred)
win3.create_oval(150,150,440,440, outline=hblue, fill=hblue)
win3.create_oval(75,10,375,300, outline=hgreen, fill=hgreen)
win2.mainloop()
What I get :
And what I would like :
It is possible to merge the colors or I need to find the collision zones?
You can use ImageChops to add images.
So you can do something like this:
from Tkinter import Tk, Canvas, Label
import ImageDraw, ImageChops, Image, ImageTk
image1 = Image.new("RGBA", (500, 500), color=0)
image2 = Image.new("RGBA", (500, 500), color=0)
image3 = Image.new("RGBA", (500, 500), color=0)
draw1 = ImageDraw.Draw(image1)
draw2 = ImageDraw.Draw(image2)
draw3 = ImageDraw.Draw(image3)
draw1.ellipse([10, 150, 300, 440], (128,0,0))
draw2.ellipse([150, 150, 440, 440], (0,0,128))
draw3.ellipse([75, 10, 375, 300], (0,128,0))
out = ImageChops.add(image1,image2,0.5)
out = ImageChops.add(out,image3,0.5)
win2 = Tk()
photo = ImageTk.PhotoImage(out)
label = Label(win2, image=photo)
label.pack()
win2.mainloop()
output:
Here's a way to draw additive RGB circles using Numpy. It converts the Numpy data to a Tkinter PhotoImage object using PIL (Pillow), and displays the results in a Tkinter Label. I use a black background because we're doing additive color mixing.
import numpy as np
from PIL import Image, ImageTk
import tkinter as tk
width, height = 400, 360
# Make RGB colors
red, grn, blu = np.eye(3, dtype=np.uint8) * 255
class GUI:
def __init__(self, width, height):
self.root = root = tk.Tk()
root.title('Circles')
root.geometry('%dx%d' % (width, height))
self.img_label = tk.Label(self.root)
self.img_label.pack(fill='both', expand=True)
gui = GUI(width, height)
# Increase the scale for smoother circles
scale = 4
width *= scale
height *= scale
screen = np.zeros((height, width, 3), dtype=np.uint8)
def show(fname=None):
img = Image.fromarray(screen, 'RGB')
img = img.resize((width // scale, height // scale), resample=Image.BILINEAR)
gui.photo = ImageTk.PhotoImage(image=img)
gui.img_label.config(image=gui.photo)
gui.root.update()
if fname is not None:
img.save(fname)
def disc(radius):
diameter = 2 * radius
yy, xx = np.mgrid[:diameter, :diameter] - radius
c = xx * xx + yy * yy < radius * radius
return c.reshape(diameter, diameter, 1)
def get_region(cx, cy, radius):
ylo = cy - radius
yhi = cy + radius
xlo = cx - radius
xhi = cx + radius
return screen[ylo:yhi, xlo:xhi]
radius = 120 * scale
circle = disc(radius)
cx = width // 2
cy = 130 * scale
region = get_region(cx, cy, radius)
region |= circle * red
show()
cy += 97 * scale
cx -= 56 * scale
region = get_region(cx, cy, radius)
region |= circle * grn
show()
cx += 112 * scale
region = get_region(cx, cy, radius)
region |= circle * blu
show('rgb.png')
gui.root.mainloop()
output
Using PIL you can create three grayscale layers, draw circles and use them to create expected circles but on black background.
If you use inverted layers then you get white background but with wrong circles.
With PIL you can even display it or save in file.
from PIL import Image, ImageDraw
def synthese(red=255, green=255, blue=255):
background = 0 # black
# layers in greyscale
layer_R = Image.new('L', (450, 450), background)
layer_G = Image.new('L', (450, 450), background)
layer_B = Image.new('L', (450, 450), background)
# draw circle on red layer
draw_R = ImageDraw.Draw(layer_R)
draw_R.ellipse((10,150,300,440), red)
# draw circle on green layer
draw_G = ImageDraw.Draw(layer_G)
draw_G.ellipse((150,150,440,440), green)
# draw circle on blue layer
draw_B = ImageDraw.Draw(layer_B)
draw_B.ellipse((75,10,375,300), blue)
#layer_R.show()
#layer_G.show()
#layer_B.show()
#layer_R.save('layer_r.png')
#layer_G.save('layer_g.png')
#layer_B.save('layer_b.png')
# create RGB image using greyscale layers
image_RGB = Image.merge('RGB', (layer_R, layer_G, layer_B))
# show it
image_RGB.show()
#image_RGB.save('rgb.png')
synthese(255, 255, 255)

How to rotate image with 180 degree when image angle is not fixed in python?

From given image I want to bring flat surface to 180 degree i.e at the bottom. Following code i have tried..
def get_line(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 50)
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
# cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
# print(x1, x2, y1, y2)
cx = int((x1 + x2) / 2)
cy = int((y1 + y2) / 2)
# print("abc",cx, cy)
deg = math.degrees(math.atan2(cx, cy))
print(deg)
# cv2.circle(img,(cx,cy),5,(0,255,0),-1)
cv2.imshow('frame', img)
cv2.waitKey(0)
return cx, cy
def rotation():
img=cv2.imread("pr43.jpg")
cx,cy=get_line(img)
height, width, _ = img.shape
# print(width, height)
xpercent = int(abs((cx / width) * 100))
ypercent = int(abs((cy / height) * 100))
# print ("XPercent",xpercent)
# print("YPercent",ypercent)
# print("xpercent, ypercent", xpercent, ypercent)
if xpercent > 50:
print("point in right palne and vertical")
# Todo: rotate clock by 90*
r = imutils.rotate_bound(img, 90)
cv2.imshow("roatated", r)
cv2.waitKey(0)
elif xpercent > 0 and 0 < ypercent < 50:
print("point in upper left plane and vertical")
# Todo: rotate anti-clock by 90*
r = imutils.rotate_bound(img, -90)
cv2.imshow("roatated", r)
cv2.waitKey(0)
elif xpercent <= 0:
print("point in upper left plane and Horizontal")
# Todo: rotate clock by 180*
r = imutils.rotate_bound(img, 180)
cv2.imshow("roatated", r)
elif xpercent < 50 and ypercent > 50:
print("No rotation needed")
r = img
cv2.imwrite("Output_Image.jpg", r)
Below is my output of the code
From this code I'am getting 50% correct output but the surface is not coming at 180 degree. As i cant keep the image angle static because image angle will vary but i have to bring flat surface to Bottom.

Categories

Resources