I am pretty new to Python and want to do the following: I want to divide the following image into 8 pie segments:
I want it to look something like this (I made this in PowerPoint):
The background should be black and the edge of the figure should have an unique color as well as each pie segment.
EDIT: I have written a code that divides the whole image in 8 segments:
from PIL import Image, ImageDraw
im=Image.open('C:/Users/20191881/Documents/OGO Beeldanalyse/Python/asymmetrie/rotation.png')
fill = 255
draw = ImageDraw.Draw(im)
draw.line((0,0) + im.size, fill)
draw.line((0, im.size[1], im.size[0], 0), fill)
draw.line((0.5*im.size[0],0, 0.5*im.size[0], im.size[1]), fill)
draw.line((0, 0.5*im.size[1], im.size[0], 0.5*im.size[1]), fill)
del draw
im.show()
The output gives:
The only thing that is left to do is to find a way to make each black segment inside the border an unique color and also give all the white edge segments an unique color.
Your code divides the image in eight parts, that's correct, but with respect to the image center, you don't get eight "angular equally" pie segments like you show in your sketch.
Here would be my solution, only using Pillow and the math module:
import math
from PIL import Image, ImageDraw
def segment_color(i_color, n_colors):
r = int((192 - 64) / (n_colors - 1) * i_color + 64)
g = int((224 - 128) / (n_colors - 1) * i_color + 128)
b = 255
return (r, g, b)
# Load image; generate ImageDraw
im = Image.open('path_to/vgdrD.png').convert('RGB')
draw = ImageDraw.Draw(im)
# Number of pie segments (must be an even number)
n = 8
# Replace (all-white) edge with defined edge color
edge_color = (255, 128, 0)
pixels = im.load()
for y in range(im.height):
for x in range(im.width):
if pixels[x, y] == (255, 255, 255):
pixels[x, y] = edge_color
# Draw lines with defined line color
line_color = (0, 255, 0)
d = min(im.width, im.height) - 10
center = (int(im.width/2), int(im.height)/2)
for i in range(int(n/2)):
angle = 360 / n * i
x1 = math.cos(angle/180*math.pi) * d/2 + center[0]
y1 = math.sin(angle/180*math.pi) * d/2 + center[1]
x2 = math.cos((180+angle)/180*math.pi) * d/2 + center[0]
y2 = math.sin((180+angle)/180*math.pi) * d/2 + center[1]
draw.line([(x1, y1), (x2, y2)], line_color)
# Fill pie segments with defined segment colors
for i in range(n):
angle = 360 / n * i + 360 / n / 2
x = math.cos(angle/180*math.pi) * 20 + center[0]
y = math.sin(angle/180*math.pi) * 20 + center[1]
ImageDraw.floodfill(im, (x, y), segment_color(i, n))
im.save(str(n) + '_pie.png')
For n = 8 pie segments, the following result is produced:
The first step is to replace all white pixels in the original image with the desired edge color. Of course, the assumption here is, that there are no other (white) pixels in the image. Also, this might be better done using NumPy and vectorized code, but I wanted to keep the solution Pillow-only.
Next step is to draw the (green) lines. Here, I calculate the proper coordinates of the lines' start and end using sin and cos.
The last step is to flood fill the pie segments' area, cf. ImageDraw.floodfill. Therefore, I calculate the seed points the same way as before, but add an angular shift to hit a point exactly within the pie segment.
As you can see, n is variable in my solution (n must be even):
Of course, there are limitations regarding the angular resolution, most due to the small image.
Hope that helps!
EDIT: Here's a modified version to also allow for individually colored edges.
import math
from PIL import Image, ImageDraw
def segment_color(i_color, n_colors):
r = int((192 - 64) / (n_colors - 1) * i_color + 64)
g = int((224 - 128) / (n_colors - 1) * i_color + 128)
b = 255
return (r, g, b)
def edge_color(i_color, n_colors):
r = 255
g = 255 - int((224 - 32) / (n_colors - 1) * i_color + 32)
b = 255 - int((192 - 16) / (n_colors - 1) * i_color + 16)
return (r, g, b)
# Load image; generate ImageDraw
im = Image.open('images/vgdrD.png').convert('RGB')
draw = ImageDraw.Draw(im)
center = (int(im.width/2), int(im.height)/2)
# Number of pie segments (must be an even number)
n = 8
# Replace (all-white) edge with defined edge color
max_len = im.width + im.height
im_pix = im.load()
for i in range(n):
mask = Image.new('L', im.size, 0)
mask_draw = ImageDraw.Draw(mask)
angle = 360 / n * i
x1 = math.cos(angle/180*math.pi) * max_len + center[0]
y1 = math.sin(angle/180*math.pi) * max_len + center[1]
angle = 360 / n * (i+1)
x2 = math.cos(angle/180*math.pi) * max_len + center[0]
y2 = math.sin(angle/180*math.pi) * max_len + center[1]
mask_draw.polygon([center, (x1, y1), (x2, y2)], 255)
mask_pix = mask.load()
for y in range(im.height):
for x in range(im.width):
if (im_pix[x, y] == (255, 255, 255)) & (mask_pix[x, y] == 255):
im_pix[x, y] = edge_color(i, n)
# Draw lines with defined line color
line_color = (0, 255, 0)
d = min(im.width, im.height) - 10
for i in range(int(n/2)):
angle = 360 / n * i
x1 = math.cos(angle/180*math.pi) * d/2 + center[0]
y1 = math.sin(angle/180*math.pi) * d/2 + center[1]
x2 = math.cos((180+angle)/180*math.pi) * d/2 + center[0]
y2 = math.sin((180+angle)/180*math.pi) * d/2 + center[1]
draw.line([(x1, y1), (x2, y2)], line_color)
# Fill pie segments with defined segment colors
for i in range(n):
angle = 360 / n * i + 360 / n / 2
x = math.cos(angle/180*math.pi) * 20 + center[0]
y = math.sin(angle/180*math.pi) * 20 + center[1]
ImageDraw.floodfill(im, (x, y), segment_color(i, n))
im.save(str(n) + '_pie.png')
Binary masks for each pie segment are created, and all white pixels only within that binary mask are replaced with a defined edge color.
Using NumPy still seems favorable, but I was curious to do that in Pillow only.
Related
How do I set the Mandelbrot Set background to cyan? I don't understand the code.
Here's the code:
# Python code for Mandelbrot Fractal
# Import necessary libraries
from PIL import Image
from numpy import complex, array
import colorsys
# setting the width of the output image as 1024
WIDTH = 1024
# a function to return a tuple of colors
# as integer value of rgb
def rgb_conv(i):
color = 255 * array(colorsys.hsv_to_rgb(i / 255.0, 1.0, 0.5))
return tuple(color.astype(int))
# function defining a mandelbrot
def mandelbrot(x, y):
c0 = complex(x, y)
c = 0
for i in range(1, 1000):
if abs(c) > 2:
return rgb_conv(i)
c = c * c + c0
return (0, 0, 0)
# creating the new image in RGB mode
img = Image.new('RGB', (WIDTH, int(WIDTH / 2)))
pixels = img.load()
for x in range(img.size[0]):
# displaying the progress as percentage
print("%.2f %%" % (x / WIDTH * 100.0))
for y in range(img.size[1]):
pixels[x, y] = mandelbrot((x - (0.75 * WIDTH)) / (WIDTH / 4),
(y - (WIDTH / 4)) / (WIDTH / 4))
# to display the created fractal after
# completing the given number of iterations
img.show()
I would like to set the background color to cyan. More Info needs to be entered here in order for me to post but I have no more info.
Thanks.
Neo
try to change "mandelbrot" function to
def mandelbrot(x, y):
c0 = complex(x, y)
c = 0
for i in range(1, 1000):
if abs(c) > 2:
return (0, 0, 0)
c = c * c + c0
return (0, 255, 255)
Final return statement is a background color
What I'm trying to do is generate equal diagonal lines in PIL. What I'm doing is first making a horizontal equally square and then rotating it 45 degrees. But when I'm rotating it the lines aren't big enough, there shouldn't be any black and still be equal. It also should work with more colors
code:
import random
im = Image.new('RGB', (1000, 1000), (255, 255, 255))
draw = ImageDraw.Draw(im)
colors = [(255,0,255), (0,0,255)]
random.shuffle(colors)
length = len(colors)
amount = 1000 / length
x1 = 0
y1 = 0
x2 = 1000
y2 = 0
for color in colors:
shape = [(x1, y1 + amount // 2), (x2, y2 + amount // 2)]
draw.line(shape, fill=color, width=int(amount))
y1 += amount
y2 += amount
im.save("pre_diagonal.png")
colorimage = Image.open('pre_diagonal.png')
out = colorimage.rotate
You can do it by first generating an image of vertical lines like I showed you in my answer to your other question, rotating that by 45°, and then cropping it. To avoid having areas of black, you need to generate an initial image that is large enough for the cropping.
In this case that's simply a square image with sides the length of the hypotenuse (diagonal) of the final target image's size.
i.e.
Graphically, here's what I mean:
At any rate, here's the code that does it:
from math import hypot
from PIL import Image, ImageDraw
import random
IMG_WIDTH, IMG_HEIGHT = 1000, 1000
DIAG = round(hypot(IMG_WIDTH, IMG_HEIGHT))
img = Image.new('RGB', (DIAG, DIAG), (255, 255, 255))
draw = ImageDraw.Draw(img)
colors = [(255,0,255), (0,0,255)]
random.shuffle(colors)
length = len(colors) # Number of lines.
line_width = DIAG / length # Width of each.
difx = line_width / 2
x1, y1 = difx, 0
x2, y2 = difx, DIAG
for color in colors:
endpoints = (x1, y1), (x2, y2)
draw.line(endpoints, fill=color, width=round(line_width))
x1 += line_width
x2 += line_width
img = img.rotate(-45, resample=Image.Resampling.BICUBIC)
difx, dify = (DIAG-IMG_WIDTH) // 2, (DIAG-IMG_HEIGHT) // 2
img = img.crop((difx, dify, difx+IMG_WIDTH, dify+IMG_HEIGHT))
img.save('diagonal.png')
#img.show()
Here's the resulting image:
I'm using a RealSense D455 camera and trying to detect objects and calculate the width of them. I found some code that does it for the height but when I try to change this the calculations are wrong. For height it's usually pretty accurate only showing small increases in height when wrong. But with the changed code it says for example an object that's ~40cm as 1-1,5 meters.
if score > 0.8 and class_ == 1: # 1 for human
left = box[1] * W
top = box[0] * H
right = box[3] * W
bottom = box[2] * H
width = right - left
height = bottom - top
bbox = (int(left), int(top), int(width), int(height))
heightB = bbox[1] + bbox[3]
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
# draw box
cv2.rectangle(color_image, p1, p2, (255,0,0), 2, 1)
# x,y,z of bounding box
obj_points = verts[int(bbox[1]):int(bbox[1] + bbox[3]), int(bbox[0]):int(bbox[0] + bbox[2])].reshape(-1, 3)
print(obj_points.shape)
zs = obj_points[:, 2]
z = np.median(zs)
ys = obj_points[:, 0]
ys = np.delete(ys, np.where(
(zs < z - 1) | (zs > z + 1))) # take only y for close z to prevent including background
my = np.amin(ys, initial=1)
My = np.amax(ys, initial=-1)
height = (My - my) # add next to rectangle print of height using cv library
height = float("{:.2f}".format(height))
print("[INFO] object height is: ", height, "[m]")
height_txt = str(height) + "[m]"
# Write some Text
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (p1[0], p1[1] + 20)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
cv2.putText(color_image, height_txt,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', color_image)
cv2.waitKey(1)
Object pointers are used, they split up the dimensions into their own array, so zs = obj_points[:, 2] will be for z ys = obj_points[:, 1] is for y. I thought just changing ys = obj_points[:, 1] to ys = obj_points[:, 0] would calculate width but aforementioned it does not work.
ys = np.delete(ys, np.where((zs < z - 1) | (zs > z + 1)))
This is is just to take out the outliers so as to not take into account background values.
This is the part that calculates the height, since the camera will be horizontal the height difference will be the width.
my = np.amin(ys, initial=1)
My = np.amax(ys, initial=-1)
height = (My - my) # add next to rectangle print of height using cv library
Since the camera is horizontal I can just the the length of Y. But this does not seem to work when I try the same for X.
If it's necessary this is the link to the original GitHub repo: https://github.com/IntelRealSense/librealsense/tree/master/wrappers/tensorflow I'm using Example2.
Okay, so i have these annotation functions
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
def blur_image(image, radius):
pil_img = Image.fromarray(image)
pil_img = pil_img.filter(ImageFilter.GaussianBlur(radius=radius))
opencv_image = np.array(pil_img)
return opencv_image
def enhance_brightness(image, value):
for x in range(image.shape[0]):
for y in range(image.shape[1]):
if image[x][y][0] < 245 and image[x][y][1] < 245 and image[x][y][2] < 245:
if image[x][y][0] + value <= 255 and image[x][y][0] + value >= 0:
image[x][y][0] += value
if image[x][y][1] + value <= 255 and image[x][y][0] + value >= 0:
image[x][y][1] += value
if image[x][y][2] + value <= 255 and image[x][y][0] + value >= 0:
image[x][y][2] += value
return image
When i try to use them, i get errors i cant fix.
Both functions rotate_bound and enhance_brightness give the same error:
'JpegImageFile' object has no attribute 'shape'.
And the blur function returns another error:
a bytes-like object is required, not 'JpegImageFile'
I would be very glad if someone gives me a hand.
You're importing the image using Pillow. Pillow has no attribute 'shape'. Instead, try
img = cv2.imread("test.jpg")
This should fix all the issues.
I'm creating images using Python, using
myImage = Image.new('RGB', (250, 250), 'rgb(155,89,182)')
and this actually creates the image. But is there a way to create an image with a background of the color I'm choosing but with gradients? I want to pick blue as my color, then, I want deep blue in the edges and more light blue in the center of the image. Is that possible using simple PIL and Python?
Thank you in advance.
The code depends on how you want the gradient to look.
You could make it a rectangular gradient which would look like this:
Or you could make it a round gradient like this:
This would be the code for the round gradient:
import Image
import math
imgsize = (250, 250) #The size of the image
image = Image.new('RGB', imgsize) #Create the image
innerColor = [80, 80, 255] #Color at the center
outerColor = [0, 0, 80] #Color at the corners
for y in range(imgsize[1]):
for x in range(imgsize[0]):
#Find the distance to the center
distanceToCenter = math.sqrt((x - imgsize[0]/2) ** 2 + (y - imgsize[1]/2) ** 2)
#Make it on a scale from 0 to 1
distanceToCenter = float(distanceToCenter) / (math.sqrt(2) * imgsize[0]/2)
#Calculate r, g, and b values
r = outerColor[0] * distanceToCenter + innerColor[0] * (1 - distanceToCenter)
g = outerColor[1] * distanceToCenter + innerColor[1] * (1 - distanceToCenter)
b = outerColor[2] * distanceToCenter + innerColor[2] * (1 - distanceToCenter)
#Place the pixel
image.putpixel((x, y), (int(r), int(g), int(b)))
image.save('circlegradient.jpg')
For each pixel, it sets the red, green, and blue values somewhere in between innerColor and outerColor depending on the distance from the pixel to the center.
This would be the code for the rectangular gradient:
import Image
imgsize = (250, 250) #The size of the image
image = Image.new('RGB', imgsize) #Create the image
innerColor = [80, 80, 255] #Color at the center
outerColor = [0, 0, 80] #Color at the edge
for y in range(imgsize[1]):
for x in range(imgsize[0]):
#Find the distance to the closest edge
distanceToEdge = min(abs(x - imgsize[0]), x, abs(y - imgsize[1]), y)
#Make it on a scale from 0 to 1
distanceToEdge = float(distanceToEdge) / (imgsize[0]/2)
#Calculate r, g, and b values
r = innerColor[0] * distanceToEdge + outerColor[0] * (1 - distanceToEdge)
g = innerColor[1] * distanceToEdge + outerColor[1] * (1 - distanceToEdge)
b = innerColor[2] * distanceToEdge + outerColor[2] * (1 - distanceToEdge)
#Place the pixel
image.putpixel((x, y), (int(r), int(g), int(b)))
image.save('rectgradient.jpg')
This works the same way, except it measures the distance to the closest edge, not the center.
make a gradiend arround line ax+by+c=0
a = -0.5
b = -1
c = 250
width = 55
imgsize = (180, 320) #The size of the image
image = PIL.Image.new('RGB', imgsize, color="white") #Create the image
innerColor = [255, 0, 0] #Color at the center
outerColor = [0, 0, 0] #Color at the edge
for y in range(imgsize[1]):
for x in range(imgsize[0]):
dist = (a*x + b*y + c)/np.sqrt(a*a+b*b)
color_coef = abs(dist)/width
if abs(dist) < width:
red = outerColor[0] * color_coef + innerColor[0] * (1 - color_coef)
green = outerColor[1] * color_coef + innerColor[1] * (1 - color_coef)
blue = outerColor[2] * color_coef + innerColor[2] * (1 - color_coef)
image.putpixel((x, y), (int(red), int(green), int(blue)))
image.save('linegradient.jpg')
image sample
As I would do if they offer me the gradient data in x, y. Which would not be central.
This is all the data they give me:
Spotlight_Size 90.81163
RefractionDepthBias: 0
GradientPOSX: 50
GradientPOSY: 99.68244
GradientSIZE: 121.87289
Spotlight_Intensity: 105
Spotlight_PoSX: 50.192413
Spotlight_PosY: 52.344917
FallOffColor_Fill_Percent: 40
FallOffColor_Postion: 50
and 3 colors:
A_COLOR:100600ff
B_COLOR: f7d32aff
FallOff_COLOR: f7d32aff