I have an array of tuples:
a = [(375, 193)
(364, 113)
(277, 20)
(271, 16)
(52, 106)
(133, 266)
(289, 296)
(372, 282)]
How to draw lines between points in OpenCV?
Here is my code that isn't working:
for index, item in enumerate(a):
print (item[index])
#cv2.line(image, item[index], item[index + 1], [0, 255, 0], 2)
Using draw contours, you can draw the shape all at once.
img = np.zeros([512, 512, 3],np.uint8)
a = np.array([(375, 193), (364, 113), (277, 20), (271, 16), (52, 106), (133, 266), (289, 296), (372, 282)])
cv2.drawContours(img, [a], 0, (255,255,255), 2)
If you don't want the image closed and want to continue how you started:
image = np.zeros([512, 512, 3],np.uint8)
pointsInside = [(375, 193), (364, 113), (277, 20), (271, 16), (52, 106), (133, 266), (289, 296), (372, 282)]
for index, item in enumerate(pointsInside):
if index == len(pointsInside) -1:
break
cv2.line(image, item, pointsInside[index + 1], [0, 255, 0], 2)
Regarding your current code, it looks like you are trying to access the next point by indexing the current point. You need to check for the next point in the original array.
A more Pythonic way of doing the second version would be:
for point1, point2 in zip(a, a[1:]):
cv2.line(image, point1, point2, [0, 255, 0], 2)
If you just want to draw lines, how about cv2.polylines? cv2.drawContours would be preferred when you already have a contours object.
cv2.polylines(image,
a,
isClosed = False,
color = (0,255,0),
thickness = 3,
linetype = cv2.LINE_AA)
Related
I have an object I need to track. The object moves up and back mostly in a straight line, however, sometimes the object moves to the left. I need to track that left movement and convert the distance of the move to a measurement.
Image 1:
The object moves up and down the, however wanders to the left.
Image 2:
I was thinking to find an edge I wanted to track, then draw a line from that edge to a normal point. The line is measured in pixels and a real distance inferred.
My question is, how to track that edge circle (in my case is 1049, 390) when it moves up and back and still keep a right angle line for the measurement?
Note: in my case i've had to crop the image because the image quality is poor, many grainy lines. The cv.cornerHarris() method pics up those grainy lines as edges/contours, of which there are many.
Image 3:
Am I on the right track or need a different approach?
Below is a mod of some code I found on Stack. The distance part im not concerned about, just tracking the movement of the selected edge.
import cv2 as cv
import numpy as np
from scipy.spatial import distance as dist
image = cv.imread('image_mask.jpg')
object_width_known_a = .1445
object_width_known_b = .0849
object_width_known_x = .0849
object_height = 1
img_shape_x = image.shape[1]
img_shape_y = image.shape[0]
x_centre = round(img_shape_x/2)
y_centre = round(img_shape_y/2)
# Find Corners
def find_centroids(dst):
ret, dst = cv.threshold(dst, 0.02 * dst.max(), 255, 0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 1)
corners = cv.cornerSubPix(gray, np.float32(centroids[1:]), (5, 5), (-1, -1), criteria)
return corners
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# gray = cv.GaussianBlur(gray, (3, 3), 1) # Gaussian filter with a 7 x 7 kernel,
gray = np.float32(gray)
dst = cv.cornerHarris(gray, 4, 3, 0.065)
# Get coordinates of the corners.
corners = find_centroids(dst)
for i in range(10, len(corners)):
print("Pixels found for this object are:", corners[i])
image[dst > 0.1 * dst.max()] = [0, 0, 255]
cv.circle(image, (int(corners[i, 0]), int(corners[i, 1])), 5, (0, 255, 0), 1)
for corner in corners:
image[int(corner[1]), int(corner[0])] = [0, 0, 255]
line_x = cv.line(image, (x_centre, 0), (x_centre, x_centre), (255, 0, 0), 1)
line_y = cv.line(image, (1049, 390), (x_centre, 390), (255, 0, 0), 1)
line_ang = cv.line(image, (1049, 390), (x_centre, x_centre), (0, 0, 255), 1)
d = (dist.euclidean((1049, 390), (x_centre, x_centre)))
y_px_dist = (dist.euclidean((1049, 390), (x_centre, 390)))
print(y_px_dist)
dist_to_edge = d / object_width_known_x
print(dist_to_edge, 'm')
a = (dist.euclidean((657, 540), (720, 540)))
b = (dist.euclidean((650, 400), (687, 400)))
x = (dist.euclidean((1049, 390), (x_centre, 390)))
object_width_a = a
object_width_b = b
object_width_x = x
pixels_per_metric_a = object_width_a / object_width_known_a # y pixels per 500mm
pixels_per_metric_b = object_width_b / object_width_known_b
pixels_per_metric_x = object_width_x / object_width_known_x
print(a, '=', pixels_per_metric_a, 'mm')
print(b, '=', pixels_per_metric_b, 'mm')
print(x, '=', pixels_per_metric_x, 'mm')
a = len(corners)
print("Number of corners found:", a)
# List to store pixel difference.
distance_pixel = []
# List to store mm distance.
distance_mm = []
P1 = corners[0]
P2 = corners[1]
P3 = corners[2]
P4 = corners[3]
P1P2 = cv.norm(P2 - P1)
P1P3 = cv.norm(P3 - P1)
P2P4 = cv.norm(P4 - P2)
P3P4 = cv.norm(P4 - P3)
pixelsPerMetric_width1 = P1P2 / object_width_a
pixelsPerMetric_width2 = P3P4 / object_width_a
pixelsPerMetric_height1 = P1P3 / object_height
pixelsPerMetric_height2 = P2P4 / object_height
# Average of PixelsPerMetric
pixelsPerMetric_avg = pixelsPerMetric_width1 + pixelsPerMetric_width2 + pixelsPerMetric_height1 + pixelsPerMetric_height2
pixelsPerMetric = pixelsPerMetric_avg / 4
# print(pixelsPerMetric)
P1P2_mm = P1P2 / pixelsPerMetric
P1P3_mm = P1P3 / pixelsPerMetric
P2P4_mm = P2P4 / pixelsPerMetric
P3P4_mm = P3P4 / pixelsPerMetric
distance_mm.append(P1P2_mm)
distance_mm.append(P1P3_mm)
distance_mm.append(P2P4_mm)
distance_mm.append(P3P4_mm)
distance_pixel.append(P1P2)
distance_pixel.append(P1P3)
distance_pixel.append(P2P4)
distance_pixel.append(P3P4)
# print(distance_pixel)
# print(distance_mm)
cv.imshow('image', image)
cv.waitKey(0)e
EDIT -------------------------------
Ive edited the question with another image. Ive overlaid the image with the coordinates of the same spot (manually, the left images same as 2nd from the left). As you can see, the coords change in each image as the object moves. I was thinking to add an ID to the cornerHarris() corner coords but then the question is, how to find the coords of that corner when it moves.
Image 4
Code for the new image:
image = cv.imread('./image_masks/image_mask.jpg')
object_height = 1
img_shape_x = image.shape[1]
img_shape_y = image.shape[0]
x_centre = round(img_shape_x / 2)
y_centre = round(img_shape_y / 2)
gray_img = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
corners = cv.goodFeaturesToTrack(gray_img, 50, 0.06, 10)
# kernel = np.ones((7,7), np.uint8)
# corners = cv.dilate(corners, kernel, iterations=2)
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv.circle(image, (x, y), 3, (255, 0, 0), -1)
print(x,y)
line_x = cv.line(image, (x_centre, 0), (x_centre, x_centre), (255, 0,
0), 1)
line_y2 = cv.line(image, (1028, 490), (x_centre, 390), (0, 255, 255),
1)
d2 = (dist.euclidean((1080, 593), (x_centre, x_centre)))
line_y5 = cv.line(image, (1050, 540), (x_centre, 390), (0, 255,
255), 1)
d5 = (dist.euclidean((1050, 540), (x_centre, x_centre)))
line_y3 = cv.line(image, (1071, 476), (x_centre, 390), (0, 255,
255), 1)
d3 = (dist.euclidean((1071, 476), (x_centre, x_centre)))
line_y6 = cv.line(image, (1071, 451), (x_centre, 390), (0, 255,
255), 1)
d6 = (dist.euclidean((1071, 451), (x_centre, x_centre)))
line_y4 = cv.line(image, (1047, 405), (x_centre, 390), (0, 255,
255), 1)
d4 = (dist.euclidean((1047, 405), (x_centre, x_centre)))
line_y = cv.line(image, (x_centre, 0), (x_centre, x_centre), (255,
0, 0), 1)
#print(d2, d5, d3, d6, d4)
cv.imshow('image', image)
cv.waitKey(0)nter code here
All the internet object tracking solutions assume that the object being track is very different from everything else in the frame, e.g. a bright red dot on a piece of paper. They do this to find the largest contours, a reference point, a reference object). Measuring distances between objects uses similar techniques. This assumption that is not mentioned (well, pyimagesearch.com does mention, way down the end of a blog that if the contours are too many, not distinct, than the technique will not work). Real world problems don't have bright red dots on a piece of paper for a reference.
The centroid tracking method, using indexing (I found), only work if the centroid is very close to the pervious, in the previous frame (a video). I my situation, using still images, and possibly slightly different pictures to pervious, the centroid technique did not work.
I my case, the solution was to use a combination of masking, thresholding and contours. This limited the number of contours found and framed the problem.
The lesson (an for anyone else), experimentation is the key, experimentation with as many techniques as possible. This is a solution or a combination of for your problem. Internet examples often shown a perfect scenario, they generally do not directly transfer to real world problems.
Experiment!
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 2 years ago.
Improve this question
I want to create an array 2D contain like this.
that every tuple contain three numbers that represent the coloring RGB system
[
(0,0,0) (0,0,1) (0,0,2) (0,0,3) (0,0,4) (0,0,5) (0,0,6) (0,0,7)
(0,0,8) (0,0,9) (0,0,10) (0,0,11) (0,0,12) (0,0,13) (0,0,14) (0,0,15)
(0,0,16) (0,0,17) (0,0,18) (0,0,19) (0,0,20) (0,0,21) (0,0,22) (0,0,23)
.....
(250,250,242) (250,250,243) (250,250,244) .... (250,250,250)
]
and the same but for a common RGB, number .this array for example 8x8
and to give a key to each tuple that refers to the name of this color
for example ,like (0,0,0) => black
--
I tried .. and did like this
arr2 = np.array([(i,i,i) for i in range(250)] , dtype = [('Red','i2'),('Green','i2'),('Blue','i2')])
print(arr2)
but that did not work!
You could use a list comprehension:
>>> colors = [(r, g, b) for r in range(256) for g in range(256) for b in range(256)]
>>> colors[:10] # First 10 colors
[(0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3), (0, 0, 4), (0, 0, 5), (0, 0, 6), (0, 0, 7), (0, 0, 8), (0, 0, 9)]
>>> colors[-10:] # Last 10 colors
[(255, 255, 246), (255, 255, 247), (255, 255, 248), (255, 255, 249), (255, 255, 250), (255, 255, 251), (255, 255, 252), (255, 255, 253), (255, 255, 254), (255, 255, 255)]
>>> len(colors) # Numbers of colors in RGB
16777216
You could try using a dictionary.
dict = {(0, 0, 0): "black", ... }
You could then search for color with the RGB like this.
print(dict[(0, 0, 0)])
I'm using the cv2.matchShapes() function in OpenCV to find the shape in an image most similar to another shape.
It's giving me some weird results, e.g. when I match the shape (a circular coin) against the shape of a sweater it returns 0.09, a close score, and a better score than when it matches against an actual coin.
This is my code:
for contour in cnts:
box = bounding_box(contour)
orig = image.copy()
cv2.drawContours(orig, [contour, reference_contour],
-1, (0, 0, 255), 2)
cv2.putText(
orig, "SIMILARITY: {0:.4f}".format(
cv2.matchShapes(
contour, reference_contour, 1, 0.0)
),
(10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 0, 0), 2)
cv2.imshow("coin_metric_cnn.py", orig)
cv2.waitKey(0)
Am I doing something wrong?
I am using the Semantic Segmentation network (SegNet). I am trying to reduce the number of classes and thus rearranging the network.
Therefore, I am also changing the color-coding of the predictions as well. My problem is I don't get the intended colors in the output image.
For e.g.
pascal_palette = np.array([(0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 128), (0, 128, 0), (0, 0, 0), (0, 0, 0), (128, 0, 0),
(0, 0, 0), (0, 0, 0)
], dtype=np.uint8)
The above line gives perfect results for the three classes as the pixels are only in 1 channel.
The output is as below:
However, if I modify the line and add values to different channels it gives weird output. The output is attached below:
pascal_palette = np.array([(0, 0, 0),
(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0),
(0, 0, 128), (124, 252, 0), (0, 0, 0), (0, 0, 0), (128, 0, 0),
(0, 0, 0), (0, 0, 0)
], dtype=np.uint8)
Changed the color code to (124, 252, 0). The code should be for lawn green color. I also checked it on a website like RBG codes
What am I missing here? Any explanation will be helpful.
Prediciton code:
prob = model.predict(net_in)[0]
# Reshape to 2d here since the networks outputs a flat array per channel
prob_edge = np.sqrt(prob.shape[0]).astype(np.int)
prob = prob.reshape((prob_edge, prob_edge, 13))
# Upsample
if args.zoom > 1:
prob = interp_map(prob, args.zoom, image_size[1], image_size[0])
# Recover the most likely prediction (actual segment class)
prediction = np.argmax(prob, axis=2)
# Apply the color palette to the segmented image
color_image = np.array(pascal_palette)[prediction.ravel()].reshape(
prediction.shape + (3,))
print('Saving results to: ', args.output_path)
with open(args.output_path, 'wb') as out_file:
Image.fromarray(np.multiply(color_image,255)).save(out_file)
PS. I have used same model for predictions in both case
The problem is very probably in np.multiply(color_image,255).
As you created a pallete already with values from 0 to 255 and you're simply gathering values from this pallete, you don't need to multiply it by 255.
Use simply Image.fromarray(color_image).save(out_file).
How do you change just the color of some pixels from an image that are not in a predefined list ?
I tried something like this:
from PIL import Image
picture = Image.open("// location")
imshow (picture)
_colors = [[0, 128, 0], [128, 128, 0], [128, 128, 128], [192, 128, 0], [128, 64, 0], [0, 192, 0], [128, 64, 128], [0, 0, 0]]
width, height = picture.size
for x in range(0, width-1):
for y in range(0, height-1):
current_color = picture.getpixel( (x,y) )
if current_color!= _colors[0] and current_color!= _colors[1] and current_color!= _colors[2] and current_color!= _colors[3] and current_color!= _colors[4] and current_color!= _colors[5] and current_color!= _colors[6] and current_color!= _colors[7]:
picture.putpixel( (x,y), (0, 0, 0))
imshow (picture)
I want to make just some pixels black, but somehow this would return a black image altogether
This line :
if current_color!= _colors[0] and current_color!= _colors[1] and current_color!= _colors[2] and current_color!= _colors[3] and current_color!= _colors[4] and current_color!= _colors[5] and current_color!= _colors[6] and current_color!= _colors[7]:
always returns True, so you iterate over the whole picture, changing it to black. getpixel returns a tuple :
>>> print picture.getpixel((1, 1))
(79, 208, 248)
and you compare it to a list( [0,128,0]). They are not the same:
>>> (1,2,3) == [1,2,3]
False
change colors to a list of tuples rather than a list of lists.
keep the type of pixel data the same and shorten that if statement with an "in"
import Image
filename ="name.jpg"
picture = Image.open(filename, 'r')
_colors = [(0, 128, 0), (128, 128, 0), (128, 128, 128), (192, 128, 0), (128, 64, 0), (0, 192, 0), (128, 64, 128), (0, 0, 0)]
width, height = picture.size
for x in range(0, width):
for y in range(0, height):
current_color = picture.getpixel((x,y))
if current_color in _colors:
picture.putpixel((x,y), (0, 0, 0))
picture.show()