How do I set the Mandelbrot Set background to cyan? I don't understand the code.
Here's the code:
# Python code for Mandelbrot Fractal
# Import necessary libraries
from PIL import Image
from numpy import complex, array
import colorsys
# setting the width of the output image as 1024
WIDTH = 1024
# a function to return a tuple of colors
# as integer value of rgb
def rgb_conv(i):
color = 255 * array(colorsys.hsv_to_rgb(i / 255.0, 1.0, 0.5))
return tuple(color.astype(int))
# function defining a mandelbrot
def mandelbrot(x, y):
c0 = complex(x, y)
c = 0
for i in range(1, 1000):
if abs(c) > 2:
return rgb_conv(i)
c = c * c + c0
return (0, 0, 0)
# creating the new image in RGB mode
img = Image.new('RGB', (WIDTH, int(WIDTH / 2)))
pixels = img.load()
for x in range(img.size[0]):
# displaying the progress as percentage
print("%.2f %%" % (x / WIDTH * 100.0))
for y in range(img.size[1]):
pixels[x, y] = mandelbrot((x - (0.75 * WIDTH)) / (WIDTH / 4),
(y - (WIDTH / 4)) / (WIDTH / 4))
# to display the created fractal after
# completing the given number of iterations
img.show()
I would like to set the background color to cyan. More Info needs to be entered here in order for me to post but I have no more info.
Thanks.
Neo
try to change "mandelbrot" function to
def mandelbrot(x, y):
c0 = complex(x, y)
c = 0
for i in range(1, 1000):
if abs(c) > 2:
return (0, 0, 0)
c = c * c + c0
return (0, 255, 255)
Final return statement is a background color
I'm using a RealSense D455 camera and trying to detect objects and calculate the width of them. I found some code that does it for the height but when I try to change this the calculations are wrong. For height it's usually pretty accurate only showing small increases in height when wrong. But with the changed code it says for example an object that's ~40cm as 1-1,5 meters.
if score > 0.8 and class_ == 1: # 1 for human
left = box[1] * W
top = box[0] * H
right = box[3] * W
bottom = box[2] * H
width = right - left
height = bottom - top
bbox = (int(left), int(top), int(width), int(height))
heightB = bbox[1] + bbox[3]
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
# draw box
cv2.rectangle(color_image, p1, p2, (255,0,0), 2, 1)
# x,y,z of bounding box
obj_points = verts[int(bbox[1]):int(bbox[1] + bbox[3]), int(bbox[0]):int(bbox[0] + bbox[2])].reshape(-1, 3)
print(obj_points.shape)
zs = obj_points[:, 2]
z = np.median(zs)
ys = obj_points[:, 0]
ys = np.delete(ys, np.where(
(zs < z - 1) | (zs > z + 1))) # take only y for close z to prevent including background
my = np.amin(ys, initial=1)
My = np.amax(ys, initial=-1)
height = (My - my) # add next to rectangle print of height using cv library
height = float("{:.2f}".format(height))
print("[INFO] object height is: ", height, "[m]")
height_txt = str(height) + "[m]"
# Write some Text
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (p1[0], p1[1] + 20)
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
cv2.putText(color_image, height_txt,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', color_image)
cv2.waitKey(1)
Object pointers are used, they split up the dimensions into their own array, so zs = obj_points[:, 2] will be for z ys = obj_points[:, 1] is for y. I thought just changing ys = obj_points[:, 1] to ys = obj_points[:, 0] would calculate width but aforementioned it does not work.
ys = np.delete(ys, np.where((zs < z - 1) | (zs > z + 1)))
This is is just to take out the outliers so as to not take into account background values.
This is the part that calculates the height, since the camera will be horizontal the height difference will be the width.
my = np.amin(ys, initial=1)
My = np.amax(ys, initial=-1)
height = (My - my) # add next to rectangle print of height using cv library
Since the camera is horizontal I can just the the length of Y. But this does not seem to work when I try the same for X.
If it's necessary this is the link to the original GitHub repo: https://github.com/IntelRealSense/librealsense/tree/master/wrappers/tensorflow I'm using Example2.
I want to write a code that corrects the distortion and also helps defish a fisheye image.
I found a pseudocode for it here and I have tried to stick to it:
http://www.tannerhelland.com/4743/simple-algorithm-correcting-lens-distortion/
from PIL import Image
import numpy as np
im = Image.open('myimage.png')
img = Image.new("RGB",(512,512),'green')
im = im.convert("RGB")
pix_val = im.load()
pix_valNew = img.load()
width, height = im.size
strength = 1.5
zoom = 1.0
halfWidth = width/2
halfHeight = height/2
theta = -1
if strength == 0:
strength = 0.00001
correctionRadius = ((width**2 + height**2)/strength)**0.5
for x in range(512):
for y in range(512):
newX = x - halfWidth
newY = y - halfHeight
distance = (newX**2 + newY**2)**0.5
r = distance/correctionRadius
if r == 0:
theta = 1
else:
theta = np.arctan(r)/r
sourceX = (int)(halfWidth + theta * newX * zoom)
sourceY = (int)(halfHeight + theta * newY * zoom)
pix_valNew[x,y] = pix_val[sourceX,sourceY]
img.show()
I keep getting an image that is completely white and I am not able to troubleshoot it because I am completely new to it.
512x512 is the resolution of the image i want to "de-fish".
The logic as far as I understand is to find the location of a particular pixel in
the fisheye image and map it on its corresponding location in t he normal image
Someone asked for the pseudocode for which I did put the link but I am pasting it here as well. It is as Follows:
input:
strength as floating point >= 0. 0 = no change, high numbers equal stronger correction.
zoom as floating point >= 1. (1 = no change in zoom)
algorithm:
set halfWidth = imageWidth / 2
set halfHeight = imageHeight / 2
if strength = 0 then strength = 0.00001
set correctionRadius = squareroot(imageWidth ^ 2 + imageHeight ^ 2) / strength
for each pixel (x,y) in destinationImage
set newX = x - halfWidth
set newY = y - halfHeight
set distance = squareroot(newX ^ 2 + newY ^ 2)
set r = distance / correctionRadius
if r = 0 then
set theta = 1
else
set theta = arctangent(r) / r
set sourceX = halfWidth + theta * newX * zoom
set sourceY = halfHeight + theta * newY * zoom
set color of pixel (x, y) to color of source image pixel at (sourceX, sourceY)
Any form of help will be very much appreciated.
It appears that under some combinations of inputs, illegal indices for the source image are being calculated. A simple fix is to replace
pix_valNew[x,y] = pix_val[sourceX,sourceY]
with:
try:
pix_valNew[x,y] = pix_val[sourceX,sourceY]
except IndexError:
print('IndexError', x, y, sourceX, sourceY)
pix_valNew[x, y] = (0, 0, 0)
Also, just noticed that a line of your code:
correctionRadius = ((width**2 + height**2)/strength)**0.5
should be:
correctionRadius = ((width**2 + height**2)**0.5)/strength
I am pretty new to Python and want to do the following: I want to divide the following image into 8 pie segments:
I want it to look something like this (I made this in PowerPoint):
The background should be black and the edge of the figure should have an unique color as well as each pie segment.
EDIT: I have written a code that divides the whole image in 8 segments:
from PIL import Image, ImageDraw
im=Image.open('C:/Users/20191881/Documents/OGO Beeldanalyse/Python/asymmetrie/rotation.png')
fill = 255
draw = ImageDraw.Draw(im)
draw.line((0,0) + im.size, fill)
draw.line((0, im.size[1], im.size[0], 0), fill)
draw.line((0.5*im.size[0],0, 0.5*im.size[0], im.size[1]), fill)
draw.line((0, 0.5*im.size[1], im.size[0], 0.5*im.size[1]), fill)
del draw
im.show()
The output gives:
The only thing that is left to do is to find a way to make each black segment inside the border an unique color and also give all the white edge segments an unique color.
Your code divides the image in eight parts, that's correct, but with respect to the image center, you don't get eight "angular equally" pie segments like you show in your sketch.
Here would be my solution, only using Pillow and the math module:
import math
from PIL import Image, ImageDraw
def segment_color(i_color, n_colors):
r = int((192 - 64) / (n_colors - 1) * i_color + 64)
g = int((224 - 128) / (n_colors - 1) * i_color + 128)
b = 255
return (r, g, b)
# Load image; generate ImageDraw
im = Image.open('path_to/vgdrD.png').convert('RGB')
draw = ImageDraw.Draw(im)
# Number of pie segments (must be an even number)
n = 8
# Replace (all-white) edge with defined edge color
edge_color = (255, 128, 0)
pixels = im.load()
for y in range(im.height):
for x in range(im.width):
if pixels[x, y] == (255, 255, 255):
pixels[x, y] = edge_color
# Draw lines with defined line color
line_color = (0, 255, 0)
d = min(im.width, im.height) - 10
center = (int(im.width/2), int(im.height)/2)
for i in range(int(n/2)):
angle = 360 / n * i
x1 = math.cos(angle/180*math.pi) * d/2 + center[0]
y1 = math.sin(angle/180*math.pi) * d/2 + center[1]
x2 = math.cos((180+angle)/180*math.pi) * d/2 + center[0]
y2 = math.sin((180+angle)/180*math.pi) * d/2 + center[1]
draw.line([(x1, y1), (x2, y2)], line_color)
# Fill pie segments with defined segment colors
for i in range(n):
angle = 360 / n * i + 360 / n / 2
x = math.cos(angle/180*math.pi) * 20 + center[0]
y = math.sin(angle/180*math.pi) * 20 + center[1]
ImageDraw.floodfill(im, (x, y), segment_color(i, n))
im.save(str(n) + '_pie.png')
For n = 8 pie segments, the following result is produced:
The first step is to replace all white pixels in the original image with the desired edge color. Of course, the assumption here is, that there are no other (white) pixels in the image. Also, this might be better done using NumPy and vectorized code, but I wanted to keep the solution Pillow-only.
Next step is to draw the (green) lines. Here, I calculate the proper coordinates of the lines' start and end using sin and cos.
The last step is to flood fill the pie segments' area, cf. ImageDraw.floodfill. Therefore, I calculate the seed points the same way as before, but add an angular shift to hit a point exactly within the pie segment.
As you can see, n is variable in my solution (n must be even):
Of course, there are limitations regarding the angular resolution, most due to the small image.
Hope that helps!
EDIT: Here's a modified version to also allow for individually colored edges.
import math
from PIL import Image, ImageDraw
def segment_color(i_color, n_colors):
r = int((192 - 64) / (n_colors - 1) * i_color + 64)
g = int((224 - 128) / (n_colors - 1) * i_color + 128)
b = 255
return (r, g, b)
def edge_color(i_color, n_colors):
r = 255
g = 255 - int((224 - 32) / (n_colors - 1) * i_color + 32)
b = 255 - int((192 - 16) / (n_colors - 1) * i_color + 16)
return (r, g, b)
# Load image; generate ImageDraw
im = Image.open('images/vgdrD.png').convert('RGB')
draw = ImageDraw.Draw(im)
center = (int(im.width/2), int(im.height)/2)
# Number of pie segments (must be an even number)
n = 8
# Replace (all-white) edge with defined edge color
max_len = im.width + im.height
im_pix = im.load()
for i in range(n):
mask = Image.new('L', im.size, 0)
mask_draw = ImageDraw.Draw(mask)
angle = 360 / n * i
x1 = math.cos(angle/180*math.pi) * max_len + center[0]
y1 = math.sin(angle/180*math.pi) * max_len + center[1]
angle = 360 / n * (i+1)
x2 = math.cos(angle/180*math.pi) * max_len + center[0]
y2 = math.sin(angle/180*math.pi) * max_len + center[1]
mask_draw.polygon([center, (x1, y1), (x2, y2)], 255)
mask_pix = mask.load()
for y in range(im.height):
for x in range(im.width):
if (im_pix[x, y] == (255, 255, 255)) & (mask_pix[x, y] == 255):
im_pix[x, y] = edge_color(i, n)
# Draw lines with defined line color
line_color = (0, 255, 0)
d = min(im.width, im.height) - 10
for i in range(int(n/2)):
angle = 360 / n * i
x1 = math.cos(angle/180*math.pi) * d/2 + center[0]
y1 = math.sin(angle/180*math.pi) * d/2 + center[1]
x2 = math.cos((180+angle)/180*math.pi) * d/2 + center[0]
y2 = math.sin((180+angle)/180*math.pi) * d/2 + center[1]
draw.line([(x1, y1), (x2, y2)], line_color)
# Fill pie segments with defined segment colors
for i in range(n):
angle = 360 / n * i + 360 / n / 2
x = math.cos(angle/180*math.pi) * 20 + center[0]
y = math.sin(angle/180*math.pi) * 20 + center[1]
ImageDraw.floodfill(im, (x, y), segment_color(i, n))
im.save(str(n) + '_pie.png')
Binary masks for each pie segment are created, and all white pixels only within that binary mask are replaced with a defined edge color.
Using NumPy still seems favorable, but I was curious to do that in Pillow only.
fontScale = 1
fontThickness = 1
# make sure font thickness is an integer, if not, the OpenCV functions that use this may crash
fontThickness = int(fontThickness)
upperLeftTextOriginX = int(imageWidth * 0.05)
upperLeftTextOriginY = int(imageHeight * 0.05)
textSize, baseline = cv2.getTextSize(resultText, fontFace, fontScale, fontThickness)
textSizeWidth, textSizeHeight = textSize
# calculate the lower left origin of the text area based on the text area center, width, and height
lowerLeftTextOriginX = upperLeftTextOriginX
lowerLeftTextOriginY = upperLeftTextOriginY + textSizeHeight
# write the text on the image
cv2.putText(openCVImage, resultText, (lowerLeftTextOriginX, lowerLeftTextOriginY), fontFace, fontScale, Color,
fontThickness)
It seems fontScale does not scale text according to the image width and height because the text is almost in the same size for different sized images. So how can I resize the text according to the image size so that all the text could fit in the image?
Here is the solution that will fit the text inside your rectangle. If your rectangles are of variable width, then you can get the font scale by looping through the potential scales and measuring how much width (in pixels) would your text take. Once you drop below your rectangle width you can retrieve the scale and use it to actually putText:
def get_optimal_font_scale(text, width):
for scale in reversed(range(0, 60, 1)):
textSize = cv.getTextSize(text, fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=scale/10, thickness=1)
new_width = textSize[0][0]
if (new_width <= width):
print(new_width)
return scale/10
return 1
for this worked!
scale = 1 # this value can be from 0 to 1 (0,1] to change the size of the text relative to the image
fontScale = min(imageWidth,imageHeight)/(25/scale)
just keep in mind that the font type can affect the 25 constant
Approach
One way to approach this is to scale the font size proportionally to the size of the image. In my experience, more natural results are obtained when applying this not only to fontScale, but also to thickness. For example:
import math
import cv2
FONT_SCALE = 2e-3 # Adjust for larger font size in all images
THICKNESS_SCALE = 1e-3 # Adjust for larger thickness in all images
img = cv2.imread("...")
height, width, _ = img.shape
font_scale = min(width, height) * FONT_SCALE
thickness = math.ceil(min(width, height) * THICKNESS_SCALE)
Example
Let's take this free-to-use stock photo as an example. We create two versions of the base image by rescaling to a width of 2000px and 600px (keeping the aspect ratio constant). With the approach above, text looks appropriately sized to the image size in both cases (here shown in an illustrative use case where we label bounding boxes):
2000px
600px
Full code to reproduce (but note: input images have to be preprocessed):
import math
import cv2
FONT_SCALE = 2e-3 # Adjust for larger font size in all images
THICKNESS_SCALE = 1e-3 # Adjust for larger thickness in all images
TEXT_Y_OFFSET_SCALE = 1e-2 # Adjust for larger Y-offset of text and bounding box
img_width_to_bboxes = {
2000: [
{"xywh": [120, 400, 1200, 510], "label": "car"},
{"xywh": [1080, 420, 790, 340], "label": "car"},
],
600: [
{"xywh": [35, 120, 360, 155], "label": "car"},
{"xywh": [325, 130, 235, 95], "label": "car"},
],
}
def add_bbox_and_text() -> None:
for img_width, bboxes in img_width_to_bboxes.items():
# Base image from https://www.pexels.com/photo/black-suv-beside-grey-auv-crossing-the-pedestrian-line-during-daytime-125514/
# Two rescaled versions of the base image created with width of 600px and 2000px
img = cv2.imread(f"pexels-kaique-rocha-125514_{img_width}.jpg")
height, width, _ = img.shape
for bbox in bboxes:
x, y, w, h = bbox["xywh"]
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(
img,
bbox["label"],
(x, y - int(height * TEXT_Y_OFFSET_SCALE)),
fontFace=cv2.FONT_HERSHEY_TRIPLEX,
fontScale=min(width, height) * FONT_SCALE,
thickness=math.ceil(min(width, height) * THICKNESS_SCALE),
color=(0, 255, 0),
)
cv2.imwrite(f"pexels-kaique-rocha-125514_{img_width}_with_text.jpg", img)
if __name__ == "__main__":
add_bbox_and_text()
If you take fontScale = 1 for images with size approximately 1000 x 1000, then this code should scale your font correctly.
fontScale = (imageWidth * imageHeight) / (1000 * 1000) # Would work best for almost square images
If you are still having any problem, do comment.
I implemented a function to find best fitted centered location for text.
Take a look if these codes help you.
def findFontLocate(s_txt, font_face, font_thick, cv_bgd):
best_scale = 1.0
bgd_w = cv_bgd.shape[1]
bgd_h = cv_bgd.shape[0]
txt_rect_w = 0
txt_rect_h = 0
baseline = 0
for scale in np.arange(1.0, 6.0, 0.2):
(ret_w, ret_h), tmp_bsl = cv2.getTextSize(
s_txt, font_face, scale, font_thick)
tmp_w = ret_w + 2 * font_thick
tmp_h = ret_h + 2 * font_thick + tmp_bsl
if tmp_w >= bgd_w or tmp_h >= bgd_h:
break
else:
baseline = tmp_bsl
txt_rect_w = tmp_w
txt_rect_h = tmp_h
best_scale = scale
lt_x, lt_y = round(bgd_w/2-txt_rect_w/2), round(bgd_h/2-txt_rect_h/2)
rb_x, rb_y = round(bgd_w/2+txt_rect_w/2), round(bgd_h/2+txt_rect_h/2)-baseline
return (lt_x, lt_y, rb_x, rb_y), best_scale, baseline
Note that, the function accept four arguments: s_txt(string to render), font_face, font_thick and cv_bgd(background image in ndarray format)
When you putText(), write codes as following:
cv2.putText(
cv_bgd, s_txt, (lt_x, rb_y), font_face,
best_scale, (0,0,0), font_thick, cv2.LINE_AA)
You can use get_optimal_font_scale function as bellow, to adjust font size according to the image size:
def get_optimal_font_scale(text, width):
for scale in reversed(range(0, 60, 1)):
textSize = cv2.getTextSize(text, fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=scale/10, thickness=1)
new_width = textSize[0][0]
if (new_width <= width):
return scale/10
return 1
fontScale = 3*(img.shape[1]//6)
font_size = get_optimal_font_scale(text, fontScale)
cv2.putText(img, text, org, font, font_size, color, thickness, cv2.LINE_AA)
You can change fontScale for your image.
It`s work for me.
double calc_scale_rectbox(const char *txt, int box_width, int box_height,
cv::Size &textSize, int &baseline)
{
if (!txt) return 1.0;
double scale = 2.0;
double w_aprx = 0;
double h_aprx = 0;
do
{
textSize = cv::getTextSize(txt, FONT_HERSHEY_DUPLEX, scale, 2,
&baseline);
w_aprx = textSize.width * 100 / box_width;
h_aprx = textSize.height * 100 / box_height;
scale -= 0.1;
} while (w_aprx > 50 || h_aprx > 50);
return scale;
}
......
cv::Size textSize;
int baseline = 0;
double scale = calc_scale_rectbox(win_caption.c_str(), width,
height, textSize, baseline);
cv::putText(img, win_caption, Point(width / 2 - textSize.width / 2,
(height + textSize.height - baseline + 2) / 2),
FONT_HERSHEY_DUPLEX, scale, CV_RGB(255, 255, 255), 2);
A simple utility function:
def optimal_font_dims(img, font_scale = 2e-3, thickness_scale = 5e-3):
h, w, _ = img.shape
font_scale = min(w, h) * font_scale
thickness = math.ceil(min(w, h) * thickness_scale)
return font_scale, thickness
Usage:
font_scale, thickness = optimal_font_dims(image)
cv2.putText(image, "LABEL", (x, y), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255,0,0), thickness)