I am trying to determine if this object is upward or downward.
Object in question: (shot with phone)
Problem: I cannot determine if the part is flipped right side up or right side down
The ones with the back light are cropped using cv2.minAreaRect so this is the true resolution of what the camera sees.
Work area:
Camera resolution: 2592 (H) × 1944 (V)
Camera is 15-18 inches above the tray (can be moved)
2 Trays side by side both 6x9 inches with a
back light
<<< This image has no local lighting and the hump is down. This finds contours, XY, and rotation.
<<< In this image I added local lighting and the hump is down. The idea was to threshold the image and detect the ridge but since the local lighting is fixed, orientation effects the light reaching the ridge leading to inconsistent data.
<<< This was an image taken with no back light and with my phone. Hump is down (reference image).
<<< This is the same but the hump is up and only a back light.
<<< Hump is up back light and local light
<<< Hump is up (reference image)
There is no general solution, but I guess if you do Hough Circular Transform to detect the circle position and compare that to the middle of the image, you might have a special solution to this case:
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.color import rgb2gray, gray2rgb
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
# Read image
img = imread('21.png')
# RGB to Gray
raw = rgb2gray(img)
# Edge detector
edges = canny(raw)
# Detect two radii
hough_radii = np.arange(5, 25, 2)
hough_res = hough_circle(edges, hough_radii)
# Select the most prominent 3 circles
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,
total_num_peaks=1)
# Check wither shape is up or down
if(cy > raw.shape[0]//2):
pos = "shape is down"
else:
pos = "shape is up"
# Draw them
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(10, 4))
image = gray2rgb(raw)
for center_y, center_x, radius in zip(cy, cx, radii):
circy, circx = circle_perimeter(center_y, center_x, radius,
shape=image.shape)
image[circy, circx] = (220, 20, 20)
image[image.shape[0]//2,...] = (255,0,0)
ax[0].imshow(img)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].imshow(image)
ax[1].set_title(pos)
ax[1].axis('off')
plt.show()
Related
I have 20 small images (that I want to put in a target area of a background image (13x12). I have already marked my target area with a circle, I have the coordinates of the circle in two arrays of pixels. Now I want to know how I can randomly add my 20 small images in random area in my arrays of pixels which are basically the target area (the drawn circle).
In my code, I was trying for just one image, if it works, I'll pass the folder of my 20 small images.
# Depencies importation
import cv2
import numpy as np
# Saving directory
saving_dir = "../Saved_Images/"
# Read the background image
bgimg = cv2.imread("../Images/background.jpg")
# Resizing the bacground image
bgimg_resized = cv2.resize(bgimg, (2050,2050))
# Read the image that will be put in the background image (exemple of 1)
# I'm just trying with one, if it works, I'll pass the folder of the 20
small_img = cv2.imread("../Images/small.jpg")
# Convert the resized background image to gray
bgimg_gray = cv2.cvtColor(bgimg, cv2.COLOR_BGR2GRAY)
# Convert the grayscale image to a binary image
ret, thresh = cv2.threshold(bgimg_gray,127,255,0)
# Determine the moments of the binary image
M = cv2.moments(thresh)
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# Drawing the circle in the background image
circle = cv2.circle(bgimg, (cX, cY), 930, (0,0,255), 9)
print(circle) # This returns None
# Getting the coordinates of the circle
combined = bgimg[:,:,0] + bgimg[:,:,1] + bgimg[:,:,2]
rows, cols = np.where(combined >= 0)
# I have those pixels in rows and cols, but I don't know
# How to randomly put my small image in those pixel
# Saving the new image
cv2.imwrite(saving_dir+"bgimg"+".jpg", bgimg)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.resizeWindow("Test", 1000, 1200)
# Showing the images
cv2.imshow("image", bgimg)
# Waiting for any key to stop the program execution
cv2.waitKey(0)
In the expected results, the small images must be placed in the background image randomly.
If you have the center and the radius of your circle, you can easily generate random coordinates by randomly choosing an angle theta from [0, 2*pi], calculating corresponding x and y values by cos(theta) and sin(theta) and scaling these by some random chosen scaling factors from [0, radius]. I prepared some code for you, see below.
I omitted a lot of code from yours (reading, preprocessing, saving) to focus on the relevant parts (see how to create a minimal, complete, and verifiable example). Hopefully, you can integrate the main idea of my solution into your code on your own. If not, I will provide further explanations.
import cv2
import numpy as np
# (Artificial) Background image (instead of reading an actual image...)
bgimg = 128 * np.ones((401, 401, 3), np.uint8)
# Circle parameters (obtained somehow...)
center = (200, 200)
radius = 100
# Draw circle in background image
cv2.circle(bgimg, center, radius, (0, 0, 255), 3)
# Shape of small image (known before-hand...?)
(w, h) = (13, 12)
for k in range(200):
# (Artificial) Small image (instead of reading an actual image...)
smallimg = np.uint8(np.add(128 * np.random.rand(w, h, 3), (127, 127, 127)))
# Select random angle theta from [0, 2*pi]
theta = 2 * np.pi * np.random.rand()
# Select random distance factors from center
factX = (radius - w/2) * np.random.rand()
factY = (radius - h/2) * np.random.rand()
# Calculate random coordinates for small image from angle and distance factors
(x, y) = np.uint16(np.add((np.cos(theta) * factX - w/2, np.sin(theta) * factY - h/2), center))
# Replace (rather than "add") determined area in background image with small image
bgimg[x:x+smallimg.shape[0], y:y+smallimg.shape[1]] = smallimg
cv2.imshow("bgimg", bgimg)
cv2.waitKey(0)
The exemplary output:
Caveat: I haven't paid attention, if the small images might violate the circle boundary. Therefore, some additional checks or limitations to the scaling factors must be added.
EDIT: I edited my above code. To take the below comment into account, I shift the small image by (width/2, height/2), and limit the radius scale factor accordingly, so that the circle boundary isn't violated, neither top/left nor bottom/right.
Before, it was possible, that the boundary is violated in the bottom/right part (n = 200):
After the edit, this should be prevented (n = 20000):
The touching of the red line in the image is due to the line's thickness. For "safety reasons", one could add another 1 pixel distance.
I am working on the recognition of the center and the image rendering. I'm using cv2.findContours to delimit the edges of the image of interest. And using cv.minEnclosingCircle (cnt) to circumnavigate my region of interest. The code below I can identify the center of each ROI, but I am not able to mark in the output of the image the circle corresponding to the image that I want to calculate and also I want to mark with a pqno point the exact location where the algorithm identified the center.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
thresh = cv2.imread('IMD044.png',0)
_, contours,hierarchy = cv2.findContours(thresh,2,1)
print (len(contours))
cnt = contours
for i in range (len(cnt)):
(x,y),radius = cv2.minEnclosingCircle(cnt[i])
center = (int(x),int(y))
radius = int(radius)
cv2.circle(thresh,center,radius,(0,255,0),2)
print ('Circle: ' + str(i) + ' - Center: ' + str(center) + ' - Radius: ' + str(radius))
plt.text(x-15, y+10, '+', fontsize=25, color = 'red')
plt.text(10, -10, 'Centro: '+str(center), fontsize=11, color = 'red')
plt.text(340, -10, 'Diametro: '+str((radius*2)/100)+'mm', fontsize=11, color = 'red')
plt.Circle(x, y, color='red', fill=False)
plt.imshow(thresh, cmap='gray')
plt.show()
I used the Opencv documentation to demarcate the contours and get the regions, but the green circle mark does not appear.
Exit:
Exit expected:
updating the question I was able to add the information, it's only necessary to add the circle and check if the diameter is correct.
You are using a single channel image, but trying to show 3-channel color. Add the following:
...
thresh = cv2.imread('IMD016.png',0)
_, contours,hierarchy = cv2.findContours(thresh,2,1)
thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB)
print (len(contours))
...
Also, be extra careful when mixing the OpenCV and PyPlot drawing routines. OpenCV uses BGR by default, while PyPlot uses RGB. Also, cv2 drawing routines don't add extra channels, while the plt does. Just need to keep that in mind
I have a binary black and white images that looks like this
I want to fill in those white circles to be solid white disks. How can I do this in Python, preferrably using skimage?
You can detect circles with skimage's methods hough_circle and hough_circle_peaks and then draw over them to "fill" them.
In the following example most of the code is doing "hierarchy" computation for the best fitting circles to avoid drawing circles which are one inside another:
# skimage version 0.14.0
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import color
from skimage.io import imread
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle
from skimage.util import img_as_ubyte
INPUT_IMAGE = 'circles.png' # input image name
BEST_COUNT = 6 # how many circles to draw
MIN_RADIUS = 20 # min radius should be bigger than noise
MAX_RADIUS = 60 # max radius of circles to be detected (in pixels)
LARGER_THRESH = 1.2 # circle is considered significantly larger than another one if its radius is at least so much bigger
OVERLAP_THRESH = 0.1 # circles are considered overlapping if this part of the smaller circle is overlapping
def circle_overlap_percent(centers_distance, radius1, radius2):
'''
Calculating the percentage area overlap between circles
See Gist for comments:
https://gist.github.com/amakukha/5019bfd4694304d85c617df0ca123854
'''
R, r = max(radius1, radius2), min(radius1, radius2)
if centers_distance >= R + r:
return 0.0
elif R >= centers_distance + r:
return 1.0
R2, r2 = R**2, r**2
x1 = (centers_distance**2 - R2 + r2 )/(2*centers_distance)
x2 = abs(centers_distance - x1)
y = math.sqrt(R2 - x1**2)
a1 = R2 * math.atan2(y, x1) - x1*y
if x1 <= centers_distance:
a2 = r2 * math.atan2(y, x2) - x2*y
else:
a2 = math.pi * r2 - a2
overlap_area = a1 + a2
return overlap_area / (math.pi * r2)
def circle_overlap(c1, c2):
d = math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
return circle_overlap_percent(d, c1[2], c2[2])
def inner_circle(cs, c, thresh):
'''Is circle `c` is "inside" one of the `cs` circles?'''
for dc in cs:
# if new circle is larger than existing -> it's not inside
if c[2] > dc[2]*LARGER_THRESH: continue
# if new circle is smaller than existing one...
if circle_overlap(dc, c)>thresh:
# ...and there is a significant overlap -> it's inner circle
return True
return False
# Load picture and detect edges
image = imread(INPUT_IMAGE, 1)
image = img_as_ubyte(image)
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
# Detect circles of specific radii
hough_radii = np.arange(MIN_RADIUS, MAX_RADIUS, 2)
hough_res = hough_circle(edges, hough_radii)
# Select the most prominent circles (in order from best to worst)
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii)
# Determine BEST_COUNT circles to be drawn
drawn_circles = []
for crcl in zip(cy, cx, radii):
# Do not draw circles if they are mostly inside better fitting ones
if not inner_circle(drawn_circles, crcl, OVERLAP_THRESH):
# A good circle found: exclude smaller circles it covers
i = 0
while i<len(drawn_circles):
if circle_overlap(crcl, drawn_circles[i]) > OVERLAP_THRESH:
t = drawn_circles.pop(i)
else:
i += 1
# Remember the new circle
drawn_circles.append(crcl)
# Stop after have found more circles than needed
if len(drawn_circles)>BEST_COUNT:
break
drawn_circles = drawn_circles[:BEST_COUNT]
# Actually draw circles
colors = [(250, 0, 0), (0, 250, 0), (0, 0, 250)]
colors += [(200, 200, 0), (0, 200, 200), (200, 0, 200)]
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 4))
image = color.gray2rgb(image)
for center_y, center_x, radius in drawn_circles:
circy, circx = circle(center_y, center_x, radius, image.shape)
color = colors.pop(0)
image[circy, circx] = color
colors.append(color)
ax.imshow(image, cmap=plt.cm.gray)
plt.show()
Result:
Do a morphological closing (explanation) to fill those tiny gaps, to complete the circles. Then fill the resulting binary image.
Code :
from skimage import io
from skimage.morphology import binary_closing, disk
import scipy.ndimage as nd
import matplotlib.pyplot as plt
# Read image, binarize
I = io.imread("FillHoles.png")
bwI =I[:,:,1] > 0
fig=plt.figure(figsize=(24, 8))
# Original image
fig.add_subplot(1,3,1)
plt.imshow(bwI, cmap='gray')
# Dilate -> Erode. You might not want to use a disk in this case,
# more asymmetric structuring elements might work better
strel = disk(4)
I_closed = binary_closing(bwI, strel)
# Closed image
fig.add_subplot(1,3,2)
plt.imshow(I_closed, cmap='gray')
I_closed_filled = nd.morphology.binary_fill_holes(I_closed)
# Filled image
fig.add_subplot(1,3,3)
plt.imshow(I_closed_filled, cmap='gray')
Result :
Note how the segmentation trash has melded to your object on the lower right and the small cape on the lower part of the middle object has been closed. You might want to continue with an morphological erosion or opening after this.
EDIT: Long response to comments below
The disk(4) was just the example I used to produce the results seen in the image. You will need to find a suitable value yourself. Too big of a value will lead to small objects being melded into bigger objects near them, like on the right side cluster in the image. It will also close gaps between objects, whether you want it or not. Too small of a value will lead to the algorithm failing to complete the circles, so the filling operation will then fail.
Morphological erosion will erase a structuring element sized zone from the borders of the objects. Morphological opening is the inverse operation of closing, so instead of dilate->erode it will do erode->dilate. The net effect of opening is that all objects and capes smaller than the structuring element will vanish. If you do it after filling then the large objects will stay relatively the same. Ideally it should remove a lot of the segmentation artifacts caused by the morphological closing I used in the code example, which might or might not be pertinent to you based on your application.
I don't know skimage but if you'd use OpenCv, I would do a Hough transform for circles, and then just draw them over.
Hough Transform is robust, if there are some small holes in the circles that is no problem.
Something like:
circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT, 1.2, 100)
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
# you can check size etc here.
for (x, y, r) in circles:
# draw the circle in the output image
# you can fill here.
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
# show the output image
cv2.imshow("output", np.hstack([image, output]))
cv2.waitKey(0)
See more info here: https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
I have an image like this:
When i try to use any of the corner detection algorithms i get corners like this:
however i want to corners of the rectangle.
How can i get rid of those corners that i do not want.
and here is my code
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("/home/mkmeral/Desktop/opencv/cropped.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,4,0.01,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),3,255,-1)
plt.subplot(121),plt.imshow(img)
plt.title('2'), plt.xticks([]), plt.yticks([])
plt.suptitle("CORNERS")
cv2.imwrite("/home/mkmeral/Desktop/opencv/corners.png", img)
plt.show()
Here is the whole image, i cropped the image to make it smaller.
This is where i need corners to be:
As your image is of rather poor resolution and resemblance to a rectangle, any solution is going to be somewhat in the beholder's eye.
You can do this with OpenCV but I am just explaining the method using ImageMagick at the commandline. So, threshold the image to black and white at 50% and then do a "Blob Analysis" or "Connected Components Analysis".
convert rect.png -threshold 50% \
-define connected-components:verbose=true \
-connected-components 8 result.png
Output
Objects (id: bounding-box centroid area mean-color):
0: 160x120+0+0 78.0,58.7 18551 srgb(0,0,0)
1: 52x50+97+58 121.8,82.6 649 srgb(255,255,255)
So, if we look at the last line we have an object 52x50 pixels with area 649 pixels and colour white - that is your shape - or burning cigarette as I think of it! Let's draw it in:
convert rect.png -stroke red -fill none -draw "rectangle 97,58 148,107" y.png
Now, if it is a rectangle as you say, it will have a length roughly equal to the diagonal of the enclosing box, so
L = sqrt(52*52 + 50*50) = 72
and its area is 649, so its width is around 9 pixels and it starts at +97+58 from the top-left corner. Or its centroid is at 121.8,82.6. So, all that's needed is a little schoolboy geometry to get your corner points.
I found that by adjusting the arguments passed to cv2.cornerHarris() I could get corners correctly identified.
E.g. given this input image:
We can capture corners with the following (note the arguments passed to cornerHarris():
import cv2
import numpy as np
from matplotlib import pyplot as plt
img_file = 'a.jpg'
img = cv2.imread(img_file, 0)
img = np.float32(img)
'''args:
img - Input image, it should be grayscale and float32 type.
blockSize - It is the size of neighbourhood considered for corner detection
ksize - Aperture parameter of Sobel derivative used.
k - Harris detector free parameter in the equation.
'''
corners = cv2.cornerHarris(img, 4, 3, 0.04)
corners2 = cv2.dilate(corners, None, iterations=3)
img2 = cv2.imread(img_file)
img2[corners2>0.01*corners2.max()] = [255,0,0]
plt.subplot(2, 1, 2)
plt.imshow(img2, cmap = 'gray')
plt.title('Canny Edge Detection')
plt.xticks([])
plt.yticks([])
plt.show()
Output:
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 7 years ago.
Improve this question
This is an antibiotic assay on a petri dish:
I'm working on a project where I'm automating the reading of antibiotic assays of the petri dish kind. In these tests, bacteria from a patient is spread over the petri dish and allowed to grow into a "bacterial lawn." Once the bacteria cover the entire surface of the petri dish, pills either of different medicine types or the same medicine, but in different concentrations are put onto the dish and after 24 hours the kill zones, if they exist, are measured. Kill zones represent regions where the medicine has killed off the bacteria for some radius from the pill. How big the radius of the kill zone is determines whether that bacteria is susceptible or resistant to the medication and the different concentrations.
I've been using a training set of 20 images in a file to generalize my results. I've been using opencv, skimage, numpy, scipy and matplotlib python libraries to build this program. So far I've been able to accurately identify the petri dish rim edge with a hough circle finder and morphological gradient transform of image. I've also been able to use SURF to identify the positions of the pills in the image.
My question:
The issue is that there isn't enough contrast between the kill zone edge and bacterial lawn to use HoughCircleFinder to find these circular zones. Could anyone help me find any method to accurately identify these kill zones?
import numpy as np
import cv2
from skimage import io,img_as_float
from matplotlib import pyplot as plt
import os
from skimage.util import img_as_ubyte
from skimage.color import rgb2gray
from skimage.filters import sobel
import matplotlib.patches as patches
import matplotlib.cbook as cbook
from skimage.filters.rank import entropy
from skimage.morphology import disk
from skimage import io,exposure
from skimage.segmentation import slic
from skimage import io, segmentation
from skimage.color import label2rgb,rgb2gray
from scipy import signal
import scipy
from skimage.future import graph
from skimage.feature import peak_local_max
def HoughCircleFinder(filtered_image,image):
output = image.copy()
gray = img_as_ubyte(filtered_image)
# gray = cv2.cvtColor(img_as_ubyte(image), cv2.COLOR_BGR2GRAY)
# detect circles in the image
circles = cv2.HoughCircles(gray, cv2.cv.CV_HOUGH_GRADIENT, 5, minDist = int(0.45 * max(image.shape)), minRadius = int(0.25 * max(image.shape)), maxRadius = int(0.6 * max(image.shape)))
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
print x,y,r
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
# filter circles that go out of view of image
if not (image.shape[0]*0.25) < x < (image.shape[0]*0.75):
continue
elif not (image.shape[0]*0.25) < y < (image.shape[0]*0.75):
continue
else:
# output = CropCircle(output,x,y,r)
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
return output
def MorphologicalGradient(img):
kernel = np.ones((7,3),np.uint8)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
return gradient
def SURF(img): #image1
featurels = []
surf = cv2.SURF(7000)
kp, des = surf.detectAndCompute(img,None)
for i in kp:
# print round(i.pt[0])
featurels.append(i.pt)
surf.hessianThreshold = 50000
img2 = cv2.drawKeypoints(img,kp,None,(255,0,0),4)
return img2,featurels
# make list of images from file
pic_list = [os.path.join("/Users/sethcommichaux/Desktop/PetriKillZone/PetriDishes/",pic) for pic in os.listdir("/Users/sethcommichaux/Desktop/PetriKillZone/PetriDishes/")]
# for loop for processing images and getting useful data
for image in pic_list[1:]:
print image
image1 = cv2.imread(image,0) #grayscale image
image = cv2.imread(image) #color image
print "number of pixels in image: ",image.size
print "image shape (if grayscale will be 2 tuple, if color 3 or more): ", image.shape
image = HoughCircleFinder(MorphologicalGradient(image1),image)
print 'image data type: ',image.dtype
plt.figure()
io.imshow(image)
plt.show()
# part that finds pills
image,features = SURF(image)
plt.figure()
io.imshow(image)
plt.show()
I have used the SURF key points object to identify the locations of the pills, but how to find the kill zones I'm at a loss for. Below is code for getting plots and histograms along x-axis going out from pill.
for row in features:
PeakFind(image[round(row[1])])
print image[round(row[1])]
plt.figure()
plt.plot(range(len(image[round(row[1])])),image[round(row[1])])
plt.show()
# plt.figure()
# x = plt.hist(image[round(row[0])])
print features