I am trying to turn an object detector for images into object detector for videos.
But, I am getting multiple bounding boxes and I don't know why.
It seems like the first frame of the video has the correct number of bounding boxes, namely 1. But as it loops the function draw_boxes outputs images that have multiple or overlapping bounding boxes.
If you can help I will appreciate it. Thanks.
Here is an example of some frame:
And here is the code:
for i in tqdm(range(nb_frames)):
_, frame = video_reader.read()
cv2.imwrite("framey.jpg", frame)
filename = "framey.jpg"
image, image_w, image_h = load_image_pixels(filename, (input_w, input_h))
yhat = model.predict(image)
for i in range(len(yhat)):
# decode the output of the network
boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w)
# correct the sizes of the bounding boxes for the shape of the image
correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w)
# suppress non-maximal boxes
do_nms(boxes, 0.5)
# get the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
# draw what we found
imagex = draw_boxes(filename, v_boxes, v_labels, v_scores)
video_writer.write(imagex)
video_reader.release()
video_writer.release()
And here is the function that is spitting out the above image:
def draw_boxes(filename, v_boxes, v_labels, v_scores):
# load the image
data = pyplot.imread(filename)
# plot the image
pyplot.imshow(data)
# get the context for drawing boxes
ax = pyplot.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='white')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
pyplot.text(x1, y1, label, color='white')
# show the plot
pyplot.savefig('detected.jpg')
filename = "detected.jpg"
image = load_img(filename)
image_array = img_to_array(image)
image_array = (image_array*255).astype(np.uint8)
return image_array
So, the error was in the 'draw_boxes' function.
I changed 'draw_boxes' and it worked.
def draw_bounding_boxes(image, v_boxes, v_labels, v_scores):
for i in range(len(v_boxes)):
box = v_boxes[i]
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
width, height = x2 - x1, y2 - y1
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
region = np.array([[x1 - 3, y1],
[x1-3, y1 - height-26],
[x1+width+13, y1-height-26],
[x1+width+13, y1]], dtype='int32')
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 5)
cv2.fillPoly(image,[region], (255, 0, 0))
cv2.putText(image,
label,
(x1+13, y1-13),
cv2.FONT_HERSHEY_SIMPLEX,
1e-3 * image.shape[0],
(0,0,0),
2)
return image
Related
What I'm trying to do is generate equal diagonal lines in PIL. What I'm doing is first making a horizontal equally square and then rotating it 45 degrees. But when I'm rotating it the lines aren't big enough, there shouldn't be any black and still be equal. It also should work with more colors
code:
import random
im = Image.new('RGB', (1000, 1000), (255, 255, 255))
draw = ImageDraw.Draw(im)
colors = [(255,0,255), (0,0,255)]
random.shuffle(colors)
length = len(colors)
amount = 1000 / length
x1 = 0
y1 = 0
x2 = 1000
y2 = 0
for color in colors:
shape = [(x1, y1 + amount // 2), (x2, y2 + amount // 2)]
draw.line(shape, fill=color, width=int(amount))
y1 += amount
y2 += amount
im.save("pre_diagonal.png")
colorimage = Image.open('pre_diagonal.png')
out = colorimage.rotate
You can do it by first generating an image of vertical lines like I showed you in my answer to your other question, rotating that by 45°, and then cropping it. To avoid having areas of black, you need to generate an initial image that is large enough for the cropping.
In this case that's simply a square image with sides the length of the hypotenuse (diagonal) of the final target image's size.
i.e.
Graphically, here's what I mean:
At any rate, here's the code that does it:
from math import hypot
from PIL import Image, ImageDraw
import random
IMG_WIDTH, IMG_HEIGHT = 1000, 1000
DIAG = round(hypot(IMG_WIDTH, IMG_HEIGHT))
img = Image.new('RGB', (DIAG, DIAG), (255, 255, 255))
draw = ImageDraw.Draw(img)
colors = [(255,0,255), (0,0,255)]
random.shuffle(colors)
length = len(colors) # Number of lines.
line_width = DIAG / length # Width of each.
difx = line_width / 2
x1, y1 = difx, 0
x2, y2 = difx, DIAG
for color in colors:
endpoints = (x1, y1), (x2, y2)
draw.line(endpoints, fill=color, width=round(line_width))
x1 += line_width
x2 += line_width
img = img.rotate(-45, resample=Image.Resampling.BICUBIC)
difx, dify = (DIAG-IMG_WIDTH) // 2, (DIAG-IMG_HEIGHT) // 2
img = img.crop((difx, dify, difx+IMG_WIDTH, dify+IMG_HEIGHT))
img.save('diagonal.png')
#img.show()
Here's the resulting image:
I have an image where I am creating rectangle over a specified area. The image is :
I am reading this image passing it through yolo algorithm gives me co-ordinates for rectangle around this gesture
the x1 , y1 , x2 , y2 values are
print(x1 , y1 , x2 , y2)
tensor(52.6865) tensor(38.8428) tensor(143.1934) tensor(162.9857)
Using these to add a rectangle over the image
box_w = x2 - x1
box_h = y2 - y1
color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
# Create a Rectangle patch
bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
# Add the bbox to the plot
ax.add_patch(bbox)
It results in the follwing image :
Now, I want to blacken everything around this square. For this purpose i am saving the above image and reading it back then using opencv to blacken the rest using following code.
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
# read image
img = cv2.imread(output_path)
#creating black mask
mask = np.zeros_like(img)
mask = cv2.rectangle(mask, (x1, y1), (x2,y2), (255,255,255), -1)
# apply mask to image
result = cv2.bitwise_and(img, mask)
# save results
cv2.imwrite(output_path, result)
I am getting the following image as result :
There are 2 issues :
cv2.rectangle only takes integer values as co-ordinates
May be x, y axis has different direction in yolo and open cv. Just guessing cause integers co-ordinate values should not be giving such vast difference from the rectangle.
This is being done in Jupyter notebook on Win 10.
I've succesfully trained a Mask_RCNN, and for illustration purposes, let's focus on this sample image the network generates:
It's all very good, no problem. What I'd like to achieve however is to have the following variables with their values per instance:
mask: (as an image which shows the detected object only, like a binary map)
box: (as a list)
mask_border_positions (x,y) : (as a list)
mask_center_position (x,y) : (as a tuple)
I've also the function which visualizes the above image, from the official site:
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True, show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
else:
caption = captions[i]
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
ax.imshow(masked_image.astype(np.uint8))
if auto_show:
plt.show()
These code snippets below are then called in the main as follows:
file_names = glob(os.path.join(IMAGE_DIR, "*.jpg"))
masks_prediction = np.zeros((510, 510, len(file_names)))
for i in range(len(file_names)):
print(i)
image = skimage.io.imread(file_names[i])
predictions = model.detect([image], verbose=1)
p = predictions[0]
masks = p['masks']
merged_mask = np.zeros((masks.shape[0], masks.shape[1]))
for j in range(masks.shape[2]):
merged_mask[masks[:,:,j]==True] = True
masks_prediction[:,:,i] = merged_mask
print(masks_prediction.shape)
and:
file_names = glob(os.path.join(IMAGE_DIR, "*.jpg"))
class_names = ['BG', 'car', 'traffic_light', 'person']
test_image = skimage.io.imread(file_names[random.randint(0,len(file_names)-1)])
predictions = model.detect([test_image], verbose=1) # We are replicating the same image to fill up the batch_size
p = predictions[0]
visualize.display_instances(test_image, p['rois'], p['masks'], p['class_ids'],
class_names, p['scores'])
I know it's probably a trivial question and they already exist in the code somewhere, but since I am a starter, I could not get the mask outliers or their centers. If there is a way to have these information per instance, it would be great.
Thanks in advance.
The following does it right:
masks = p['masks']
class_ids = p['class_ids']
rois = p['rois']
scores = p['scores']
bounding_box = rois[enumerator]
as for the outline coordinates:
def getBoundaryPositions(im):
class_ids = p['class_ids'] # for usage convenience
im = im.astype(np.uint8)
# Find contours:
(im, contours, hierarchy) = cv2.findContours(im, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
cnts = contours[0]
outline_posesXY = np.array([np.append(x[0]) for x in cnts])
# Calculate image moments of the detected contour
M = cv2.moments(contours[0])
# collect pose points (for now only position because we don't have pose) of the center
positionXY = []
positionXY.append(round(M['m10'] / M['m00']))
positionXY.append(round(M['m01'] / M['m00']))
return (im, positionXY, outline_posesXY)
Context: I am performing Object Localisation and wanting to implement an Inhibition of Return mechanism (i.e. drawing a black cross on the image where the red bounding box is after a trigger action.)
Problem: I do not know how to accurately scale the bounding box (red) in relation to the original input (init_input). If this scaling is understood, then the black cross should be accurately placed in the middle of the red bounding box.
My current code for this function is as follows:
def IoR(b, init_input, prev_coord):
"""
Inhibition-of-Return mechanism.
Marks the region of the image covered by
the bounding box with a black cross.
:param b:
The current bounding box represented as [x1, y1, x2, y2].
:param init_input:
The initial input volume of the current episode.
:param prev_coord:
The previous state's bounding box coordinates (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = prev_coord
width = 12
x_mid = (b[2] + b[0]) // 2
y_mid = (b[3] + b[1]) // 2
# Define vertical rectangle coordinates
ver_x1 = int(((x_mid) * IMG_SIZE / (x2 - x1)) - width)
ver_x2 = int(((x_mid) * IMG_SIZE / (x2 - x1)) + width)
ver_y1 = int((b[1]) * IMG_SIZE / (y2 - y1))
ver_y2 = int((b[3]) * IMG_SIZE / (y2 - y1))
# Define horizontal rectangle coordinates
hor_x1 = int((b[0]) * IMG_SIZE / (x2 - x1))
hor_x2 = int((b[2]) * IMG_SIZE / (x2 - x1))
hor_y1 = int(((y_mid) * IMG_SIZE / (y2 - y1)) - width)
hor_y2 = int(((y_mid) * IMG_SIZE / (y2 - y1)) + width)
# Draw vertical rectangle
cv2.rectangle(init_input, (ver_x1, ver_y1), (ver_x2, ver_y2), (0, 0, 0), -1)
# Draw horizontal rectangle
cv2.rectangle(init_input, (hor_x1, hor_y1), (hor_x2, hor_y2), (0, 0, 0), -1)
The desired effect can be seen below:
Note: I believe the complexity in this problem arises due to the image being resized (to 224, 224, 3) each time I take an action (and consequently move onto the next state). Therefore, the "anchor" to determine the scaling must be extracted from the previous states scaling, which is shown in the following code:
def next_state(init_input, b_prime, g):
"""
Returns the observable region of the next state.
Formats the next state's observable region, defined
by b_prime, to be of dimension (224, 224, 3). Adding 16
additional pixels of context around the original bounding box.
The ground truth box must be reformatted according to the
new observable region.
IMG_SIZE = 224
:param init_input:
The initial input volume of the current episode.
:param b_prime:
The subsequent state's bounding box.
:param g: (init_g)
The initial ground truth box of the target object.
"""
# Determine the pixel coordinates of the observable region for the following state
context_pixels = 16
x1 = max(b_prime[0] - context_pixels, 0)
y1 = max(b_prime[1] - context_pixels, 0)
x2 = min(b_prime[2] + context_pixels, IMG_SIZE)
y2 = min(b_prime[3] + context_pixels, IMG_SIZE)
# Determine observable region
observable_region = cv2.resize(init_input[y1:y2, x1:x2], (224, 224), interpolation=cv2.INTER_AREA)
# Resize ground truth box
g[0] = int((g[0] - x1) * IMG_SIZE / (x2 - x1)) # x1
g[1] = int((g[1] - y1) * IMG_SIZE / (y2 - y1)) # y1
g[2] = int((g[2] - x1) * IMG_SIZE / (x2 - x1)) # x2
g[3] = int((g[3] - y1) * IMG_SIZE / (y2 - y1)) # y2
return observable_region, g, (b_prime[0], b_prime[1], b_prime[2], b_prime[3])
Explanation:
There is a state t in which the agent is predicting the location of the target object. The target object has a ground truth box (yellow in image, dotted in sketch), and the agent's current "localising box" is the red bounding box. Say, at state t the agent decides it is best to move right. Consequently, the bounding box is moved to the right, and then the next state, t' is determined by adding an additional 16 pixels of context around the red bounding box, cropping the original image with respect to this boundary, and then upscaling the cropped image back to 224, 224 in dimensions.
Say the agent is now confident that its prediction is accurate, so it chooses the trigger action. This basically means, end the current target object's localisation episode and place a black cross on where the agent predicted the object was (i.e. in the middle of the red bounding box). Now, since the current state is zoomed in after being cropped following the previous action, the bounding box must be re-scaled with respect to the normal/original/initial image and then the black cross can be drawn accurately onto the image.
In the context of my problem, the first rescaling between states is working perfectly well (the second code in this post). However, scaling back to normal and drawing the black cross is what I cannot seem to get my head around.
Here is an image which hopefully helps the explanation:
Here is the output of my current solution (please click the image to zoom in):
I think it's better to save the coordinate globally instead of using a bunch of upscale/downscale. They give me headache and there might be loss of precision due to rounding.
That is, every time you detect something, you convert it to global (original image) coordinate first. I have written a small demo here, imitating your detection and trigger behavior.
Initial detection:
Zoomed in, another detection:
Zoomed in, another detection:
Zoomed in, another detection:
Zoomed back to original scale, with the detection box in the correct location
Code:
import cv2
import matplotlib.pyplot as plt
IMG_SIZE = 224
im = cv2.cvtColor(cv2.imread('lena.jpg'), cv2.COLOR_BGR2GRAY)
im = cv2.resize(im, (IMG_SIZE, IMG_SIZE))
# Your detector results
detected_region = [
[(10, 20) , (80, 100)],
[(50, 0) , (220, 190)],
[(100, 143) , (180, 200)],
[(110, 45) , (180, 150)]
]
# Global states
x_scale = 1.0
y_scale = 1.0
x_shift = 0
y_shift = 0
x1, y1 = 0, 0
x2, y2 = IMG_SIZE-1, IMG_SIZE-1
for region in detected_region:
# Detection
x_scale = IMG_SIZE / (x2-x1)
y_scale = IMG_SIZE / (y2-y1)
x_shift = x1
y_shift = y1
cur_im = cv2.resize(im[y1:y2, x1:x2], (IMG_SIZE, IMG_SIZE))
# Assuming the detector return these results
cv2.rectangle(cur_im, region[0], region[1], (255))
plt.imshow(cur_im)
plt.show()
# Zooming in, using part of your code
context_pixels = 16
x1 = max(region[0][0] - context_pixels, 0) / x_scale + x_shift
y1 = max(region[0][1] - context_pixels, 0) / y_scale + y_shift
x2 = min(region[1][0] + context_pixels, IMG_SIZE) / x_scale + x_shift
y2 = min(region[1][1] + context_pixels, IMG_SIZE) / y_scale + y_shift
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# Assuming the detector confirm its choice here
print('Confirmed detection: ', x1, y1, x2, y2)
# This time no padding
x1 = detected_region[-1][0][0] / x_scale + x_shift
y1 = detected_region[-1][0][1] / y_scale + y_shift
x2 = detected_region[-1][1][0] / x_scale + x_shift
y2 = detected_region[-1][1][1] / y_scale + y_shift
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
cv2.rectangle(im, (x1, y1), (x2, y2), (255, 0, 0))
plt.imshow(im)
plt.show()
This also prevents resizing on a resized image which might create more artifacts and worsen the detector's performance.
Imagine a point (x, y) in a 500x500 image. Let it be (100, 200).
After scaling it to a different size, say 250x250 - the correct way to scale it would be to just look at the current co-ordinate and do new_coord = old_coord * NEW_SIZE/OLD_SIZE.
Thus, (100,200) will be transformed to (50,100)
If you replace your scaling using x2-x1 and use a simpler rescaling formula, it should fix your problem.
Update: NEW_SIZE and OLD_SIZE may be different for the two co-ordinates based on the shape of the original image and final image, if they are rectangular and not square.
I have a transparent-background image with some non-transparent text.
And I want to find all the bounding boxes of each individual word in the text.
Here is the code about creating a transparent image and draw some text ("Hello World", for example) , after that, do affine transform and thumbnail it.
from PIL import Image, ImageFont, ImageDraw, ImageOps
import numpy as np
fontcolor = (255,255,255)
fontsize = 180
# padding rate for setting the image size of font
fimg_padding = 1.1
# check code bbox padding rate
bbox_gap = fontsize * 0.05
# Rrotation +- N degree
# Choice a font type for output---
font = ImageFont.truetype('Fonts/Bebas.TTF', fontsize)
# the text is "Hello World"
code = "Hello world"
# Get the related info of font---
code_w, code_h = font.getsize(code)
# Setting the image size of font---
img_size = int((code_w) * fimg_padding)
# Create a RGBA image with transparent background
img = Image.new("RGBA", (img_size,img_size),(255,255,255,0))
d = ImageDraw.Draw(img)
# draw white text
code_x = (img_size-code_w)/2
code_y = (img_size-code_h)/2
d.text( ( code_x, code_y ), code, fontcolor, font=font)
# img.save('initial.png')
# Transform the image---
img = img_transform(img)
# crop image to the size equal to the bounding box of whole text
alpha = img.split()[-1]
img = img.crop(alpha.getbbox())
# resize the image
img.thumbnail((512,512), Image.ANTIALIAS)
# img.save('myimage.png')
# what I want is to find all the bounding box of each individual word
boxes=find_all_bbx(img)
Here is the code about affine transform (provided here for those who want to do some experiment)
def find_coeffs(pa, pb):
matrix = []
for p1, p2 in zip(pa, pb):
matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]])
matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]])
A = np.matrix(matrix, dtype=np.float)
B = np.array(pb).reshape(8)
res = np.dot(np.linalg.inv(A.T * A) * A.T, B)
return np.array(res).reshape(8)
def rand_degree(st,en,gap):
return (np.fix(np.random.random()* (en-st) * gap )+st)
def img_transform(img):
width, height = img.size
print img.size
m = -0.5
xshift = abs(m) * width
new_width = width + int(round(xshift))
img = img.transform((new_width, height), Image.AFFINE,
(1, m, -xshift if m > 0 else 0, 0, 1, 0), Image.BICUBIC)
range_n = width*0.2
gap_n = 1
x1 = rand_degree(0,range_n,gap_n)
y1 = rand_degree(0,range_n,gap_n)
x2 = rand_degree(width-range_n,width,gap_n)
y2 = rand_degree(0,range_n,gap_n)
x3 = rand_degree(width-range_n,width,gap_n)
y3 = rand_degree(height-range_n,height,gap_n)
x4 = rand_degree(0,range_n,gap_n)
y4 = rand_degree(height-range_n,height,gap_n)
coeffs = find_coeffs(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4)],
[(0, 0), (width, 0), (new_width, height), (xshift, height)])
img = img.transform((width, height), Image.PERSPECTIVE, coeffs, Image.BICUBIC)
return img
How to implement find_all_bbx to find the bounding box of each individual word?
For example, one of the box can be found in 'H' ( you can download the image to see the partial result).
For what you want to do you need to label the individual words and then compute the bounding box of each object with the same label.
The most straigh forward approach here is just taking the min and max positions of the pixels that make up that word.
The labeling is a little bit more difficult. For example you could use a morphological operation to combine the letters of the words (morphological opening, see PIL documentation) and then use ImageDraw.floodfill. Or you could try to anticipate the positions of the words from the position where you first draw the text
code_x and code_y
and the chosen font and size of the letters and the spacing (this will trickier I think).