I have this image
Using Hough Transform, I am drawing circles on the target, here is the code and the result
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
bgr_img = cv2.imread('16-Bit_ID-00001.jpg') # read as it is
if bgr_img.shape[-1] == 3: # color image
b,g,r = cv2.split(bgr_img) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
else:
gray_img = bgr_img
img = cv2.medianBlur(gray_img, 95) # blur value acts as a filter
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,30,
param1=50,param2=50,minRadius=60,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
#sliceno = np.int32((math.pi + np.arctan2(Y, X)) * (N / (2 * math.pi)))
plt.subplot(121),plt.imshow(rgb_img)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(cimg)
plt.title('Hough Transform'), plt.xticks([]), plt.yticks([])
plt.show()
The result I get is
Now I want to divide the circle made by the hough transform into 12 equal parts.
Anyone knows how to do it?
I did an attempt but it is far from perfect and not what I wanted to do but still here it is
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
bgr_img = cv2.imread('16-Bit_ID-00001.jpg') # read as it is
if bgr_img.shape[-1] == 3: # color image
b,g,r = cv2.split(bgr_img) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
else:
gray_img = bgr_img
img = cv2.medianBlur(gray_img, 95) # blur value acts as a filter
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,30,
param1=50,param2=50,minRadius=60,maxRadius=0)
circles = np.uint16(np.around(circles))
angle = 0
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
# dividing the circle into 12 equal parts
(x, y), radius = (i[0],i[1]),i[2]
radius = int(radius)
angle = angle +30
x_2 = int(round(x + radius * math.cos(angle * math.pi / 180.0)));
y_2 = int(round(y + radius * math.sin(angle * math.pi / 180.0)));
cv2.line(cimg, (i[0],i[1]),(x_2,y_2),(255,127,0),3,cv2.LINE_AA)
angle = angle +30
x_2 = int(round(x + radius * math.cos(angle * math.pi / 180.0)));
y_2 = int(round(y + radius * math.sin(angle * math.pi / 180.0)));
cv2.line(cimg, (i[0],i[1]),(x_2,y_2),(255,127,0),3,cv2.LINE_AA)
plt.subplot(121),plt.imshow(rgb_img)
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(cimg)
plt.title('Hough Transform'), plt.xticks([]), plt.yticks([])
plt.show()
and here is the result
Related
I have some specific images of two objects (a phone and a TV remote) and I want to calculate the angle between two edges that intersect of these. I used Canny to detect the edges and Hough line for the angle, but the hough_line() function found too many angles that doesnt match the requirement.
Original image:
This is the requirement:
And this is which I made:
My code:
import cv2
from skimage.transform import hough_line, hough_line_peaks
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def edge_detection(img, blur_ksize=5, threshold1=100, threshold2=200):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gaussian = cv2.GaussianBlur(gray, (blur_ksize, blur_ksize), 0)
img_canny = cv2.Canny(img_gaussian, threshold1, threshold2)
return img_canny
image = edge_detection(cv2.imread('img1.png'))
h, theta, d = hough_line(image)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
ax = axes.ravel()
ax[0].imshow(image)
ax[0].set_title('Input image')
ax[0].set_axis_off()
ax[1].imshow(image, cmap=cm.gray)
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - image.shape[1] * np.cos(angle)) / np.sin(angle)
ax[1].plot((0, image.shape[1]), (y0, y1), '-r')
ax[1].set_xlim((0, image.shape[1]))
ax[1].set_ylim((image.shape[0], 0))
ax[1].set_axis_off()
ax[1].set_title('Detected lines')
plt.tight_layout()
plt.show()
angle = []
dist = []
for _, a , d in zip(*hough_line_peaks(h, theta, d)):
angle.append(a)
dist.append(d)
angle = [a*180/np.pi for a in angle]
print(angle)
Are there any ways to detect and calculate exactly one angle I need in opencv? Thanks a lot
Update
I tried different values of blur_ksize, threshold1 and threshold2 in Canny detection, it's seem like I could remove redundant lines, but now the angles those hough_line_peaks() return are negative. Can anyone explain this for me? And I also want to put the angle values to the peaks in plot, to see which angle has which value
here is a sample solution, but I don't know whether it works for all images. You have to tune the hough transform parameters.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def edge_detection(img, blur_ksize=5, threshold1=70, threshold2=200):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gaussian = cv2.GaussianBlur(gray, (blur_ksize, blur_ksize), 0)
img_canny = cv2.Canny(img_gaussian, threshold1, threshold2)
return img_canny
img = cv2.imread('stack.png')
image = edge_detection(img)
minLineLength = 300
maxLineGap = 80
lines = cv2.HoughLinesP(image,1,np.pi/180,50,minLineLength,maxLineGap)
equations = []
for line in lines:
x1,y1,x2,y2 = line[0]
equations.append(np.cross([x1,y1,1],[x2,y2,1]))
cv2.line(img,(x1,y1),(x2,y2),(255,0,0),2)
font = cv2.FONT_HERSHEY_SIMPLEX
thetas = []
N = len(equations)
for ii in range(1,N):
a1,b1,c1 = equations[0]
a2,b2,c2 = equations[ii]
# intersection point
pt = np.cross([a1,b1,c1],[a2,b2,c2])
pt = np.int16(pt/pt[-1])
# angle between two lines
num = a1*b2 - b1*a2
den = a1*a2 + b1*b2
if den != 0:
theta = abs(np.arctan(num/den))*180/3.1416
# show angle and intersection point
cv2.circle(img, (pt[0],pt[1]), 5, (255,0,0), -1)
cv2.putText(img, str(round(theta, 1)), (pt[0]-20,pt[1]-20), font, 0.8, (255,0,0), 2, 0)
thetas.append(theta)
plt.imshow(img)
plt.show()
I have a raster file in WGS84 projection and I am trying to get the coordinates of random pixels within the raster GeoTIFF area down left in picture. At first, I calculate the coordinates of each pixel's centroid (in WGS84 again), then I pick 100 random of them and export them to a csv.
Problem: I expect points to be within the raster area (down left in picture) but they are way off of it. Is it a projection error or coordinates miscalculation? What is wrong in my code?
Here is the code
# Get coordinates for each pixel centroid
geotiff = gdal.Open(path)
gt = geotiff.GetGeoTransform()
column_numbers, row_numbers, band_numbers = geotiff.RasterXSize, geotiff.RasterYSize, geotiff.RasterCount
minx = gt[0]
miny = gt[3] + column_numbers*gt[4] + row_numbers*gt[5]
maxx = gt[0] + column_numbers*gt[1] + row_numbers*gt[2]
maxy = gt[3]
pixelWidth = gt[1]
pixelHeight = -gt[5]
lonPxSz = (maxy - miny) / row_numbers
latPxSz = (maxx - minx) / column_numbers
total = np.array(geotiff.ReadAsArray())
res = []
for i in range(row_numbers):
for j in range(column_numbers):
res.append([[i,j]] + [data[i][j] for data in total])
coords = pd.DataFrame(res, columns=['Pair', 'Col1', 'Col2', 'Col3', 'Col4', 'Col5', 'Col6'])
coords[['Lat', 'Lon']] = pd.DataFrame(coords['Pair'].tolist(), index=coords.index)
coords["Lat"] = (coords["Lat"] + 0.5) * 10 * latPxSz + miny
coords["Lon"] = (coords["Lon"] + 0.5) * 10 * lonPxSz + minx
coords = coords.sample(n = 100)
coords[['Lat', 'Lon']].to_csv("coords.csv", sep=";")
If you only want to pick 100 random points on the image:
from osgeo import gdal
import numpy as np
import pandas as pd
import random
path = "image.tif"
geotiff = gdal.Open(path)
gt = geotiff.GetGeoTransform()
column_numbers, row_numbers, band_numbers = geotiff.RasterXSize, geotiff.RasterYSize, geotiff.RasterCount
minx = gt[0]
miny = gt[3] + column_numbers * gt[4] + row_numbers * gt[5]
maxx = gt[0] + column_numbers * gt[1] + row_numbers * gt[2]
maxy = gt[3]
pixelWidth = gt[1]
pixelHeight = -gt[5]
halfPixelWidth = pixelWidth / 2
halfPixelHeight = pixelHeight / 2
rand_point_x = random.sample([i for i in range(column_numbers)], 100)
rand_point_y = random.sample([i for i in range(row_numbers)], 100)
rand_points = np.vstack((rand_point_y, rand_point_x)).T
coords = pd.DataFrame(rand_points, columns=['Lat', 'Lon'])
coords["Lat"] = miny + (coords["Lat"] * pixelHeight) + halfPixelHeight
coords["Lon"] = minx + (coords["Lon"] * pixelWidth) + halfPixelWidth
coords.to_csv("coords.csv", sep=',')
You may use the coordinates of these random points to retrieve pixel values afterward.
You can try using image processing techniques to get the coordinates of the raster. For example, here is how it can be done using the cv2 (OpenCV) library (purpose of each function commented in code):
import cv2
import numpy as np
def process(img): # Function to process image for optimal contour detection
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 350, 150)
kernel = np.ones((3, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=1)
return cv2.erode(img_dilate, kernel, iterations=1)
def get_raster(img): # Function that uses process function to detect contour of raster
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.05 * peri, True)
return cv2.boundingRect(approx)
def get_random(img, num=100): # Function that uses get_raster to get random points within raster
x, y, w, h = get_raster(img)
return np.vstack((np.random.randint(x, x + w, num),
np.random.randint(y, y + h, num))).T
img = cv2.imread("map.png") # Read in image
pts = get_random(img) # Get random points witin raster
cv2.drawContours(img, pts[:, None], -1, (0, 255, 0), 2) # Draw points onto image
cv2.imshow("Image", img)
cv2.waitKey(0)
Output:
As you can see, randomly positioned green points have been drawn onto the image with in raster projection. If you only need the coordinates of the raster, you can just do x, y, w, h = get_raster(img).
For this image, I tried to use hough cirlce to find the center of the "black hole".
After playing with the parameters of cv2.HoughCircles for a long time, the following is the best I can get.
raw image:
# reproducible code for stackoverflow
import cv2
import os
import sys
from matplotlib import pyplot as plt
import numpy as np
# read image can turn it gray
img = cv2.imread(FILE)
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_gray = dst = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.figure(figsize = (18,18))
plt.imshow(cimg, cmap = "gray")
# removing noises
element = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
closing = cv2.morphologyEx(y, cv2.MORPH_CLOSE, element, iterations = 7)
plt.figure(figsize = (12,12))
plt.imshow(closing, cmap = "gray")
# try to find the circles
circles = cv2.HoughCircles(closing,cv2.HOUGH_GRADIENT,3,50,
param1=50,param2=30,minRadius=20,maxRadius=50)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
plt.figure(figsize = (12,12))
plt.imshow(cimg)
Update::
The one with Canny:
edges = cv2.Canny(closing, 100, 300)
plt.figure(figsize = (12,12))
plt.imshow(edges, cmap = "gray")
circles = cv2.HoughCircles(edges,cv2.HOUGH_GRADIENT,2,50,
param1=50,param2=30,minRadius=20,maxRadius=60)
circles = np.uint16(np.around(circles))
cimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
plt.figure(figsize = (12,12))
plt.imshow(cimg)
Still not the right circle that is wanted.
Update:
#crackanddie
Sometimes there is 6 or 9 in the identity number.
The circle in 6 or 9 is not very round.
Is there any way to filter that out?
This is an alternative method if you do not want to implement or fiddle with Hough's parameters. You must be sure there's at least one circle visible in your picture. The idea is to create a segmentation mask based on the CMYK color space and filter the blobs of interest by circularity and area. These are the steps:
Convert the image from BGR to CMYK
Threshold the K channel to get a binary mask
Filter blobs by circularity and area
Approximate the filtered blobs as circles
I'm choosing the CMYK color space because the circle is mostly black. The K (key) channel (in this case - black) should do a good job of representing the blob of interest, albeit, with some noise - as usual. Let's see the code:
# Imports:
import cv2
import numpy as np
# image path
path = "D://opencvImages//"
fileName = "dyj3O.jpg"
# load image
bgr = cv2.imread(path + fileName)
Alright, we need to convert the image from BGR to CMYK. OpenCV does not offer the conversion, so we need to do it manually. The formula is very straightforward. I'm just interested on the K channel, so I just calculate it like this:
# Make float and divide by 255:
bgrFloat = bgr.astype(np.float) / 255.
# Calculate K as (1 - whatever is biggest out of bgrFloat)
kChannel = 1 - np.max(bgrFloat, axis=2)
# Convert back to uint 8:
kChannel = 255 * kChannel
kChannel = kChannel.astype(np.uint8)
Gotta keep en eye on the data types, because there are float operations going on. This is the result:
As you see, the hole is almost 100% white, that's cool, we can threshold this image via Otsu like this:
# Compute binary mask of the hole via Otsu:
_, binaryImage = cv2.threshold(kChannel, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
Which gives you this nice binary mask:
Now, here comes the laborious part. Let's find contours on this image. For every contour/blob compute circularity and area. Use this info to filter noise and get the contour of interest, keep in mind that a perfect circle should have circularity close to 1.0. Once you get a contour of interest, approximate a circle to it. This is the process:
# Find the big contours/blobs on the filtered image:
contours, hierarchy = cv2.findContours(binaryImage, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Store the detected circles here:
detectedCircles = []
# Look for the potential contours of interest:
for _, c in enumerate(contours):
# Get the blob's area and perimeter:
contourArea = cv2.contourArea(c)
contourPerimeter = cv2.arcLength(c, True)
# Compute circularity:
if contourPerimeter > 0:
circularity = (4 * 3.1416 * contourArea) / (pow(contourPerimeter, 2))
else:
circularity = 0.0
# Set the min threshold values to identify the
# blob of interest:
minCircularity = 0.7
minArea = 2000
if circularity >= minCircularity and contourArea >= minArea:
# Approximate the contour to a circle:
(x, y), radius = cv2.minEnclosingCircle(c)
# Compute the center and radius:
center = (int(x), int(y))
# Cast radius to in:
radius = int(radius)
# Store the center and radius:
detectedCircles.append([center, radius])
# Draw the circles:
cv2.circle(bgr, center, radius, (0, 255, 0), 2)
cv2.imshow("Detected Circles", bgr)
print("Circles Found: " + str(len(detectedCircles)))
Additionally, I have stored the circle (center and radius) in the detectedCircles list. This is the final result:
Circles Found: 1
Here it is:
import numpy as np
import cv2
def threshold_gray_const(image_, rang: tuple):
return cv2.inRange(image_, rang[0], rang[1])
def binary_or(image_1, image_2):
return cv2.bitwise_or(image_1, image_2)
def negate_image(image_):
return cv2.bitwise_not(image_)
def particle_filter(image_, power):
# Abdrakov's particle filter
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image_, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
min_size = power
img2 = np.zeros(output.shape, dtype=np.uint8)
for i in range(0, nb_components):
if sizes[i] >= min_size:
img_to_compare = threshold_gray_const(output, (i + 1, i + 1))
img2 = binary_or(img2, img_to_compare)
img2 = img2.astype(np.uint8)
return img2
def reject_borders(image_):
# Abdrakov's border rejecter
out_image = image_.copy()
h, w = image_.shape[:2]
for row in range(h):
if out_image[row, 0] == 255:
cv2.floodFill(out_image, None, (0, row), 0)
if out_image[row, w - 1] == 255:
cv2.floodFill(out_image, None, (w - 1, row), 0)
for col in range(w):
if out_image[0, col] == 255:
cv2.floodFill(out_image, None, (col, 0), 0)
if out_image[h - 1, col] == 255:
cv2.floodFill(out_image, None, (col, h - 1), 0)
return out_image
src = cv2.imread("your_image")
img_gray = dst = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
closing = cv2.morphologyEx(img_gray, cv2.MORPH_CLOSE, element, iterations=2)
tv, thresh = cv2.threshold(closing, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
neg = negate_image(thresh)
rej = reject_borders(neg)
filtered = particle_filter(rej, 300)
edges = cv2.Canny(filtered, 100, 200)
circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 3, 50, param1=50, param2=30, minRadius=20, maxRadius=50)
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(src, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(src, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow("closing", closing)
cv2.imshow("edges", edges)
cv2.imshow("out", src)
cv2.waitKey(0)
I changed cv2.morphologyEx parameters a bit, because they were too strong. And after this noise removing I made a binary image using cv2.THRESH_OTSU parameter, negated it, rejected borders and filtered a bit. Then I used cv2.Canny to find edges and this 'cannied' image I passed into cv2.HoughCircles. If any questions - ask me :)
If you want to use a "thinking out of the box" solution then check this solution out. Remember this might have a few false positives in some cases and would only work in cases where circle contour is complete or joined.
import numpy as np
import cv2
import matplotlib.pyplot as plt
from math import pi
pi_eps = 0.1
rgb = cv2.imread('/path/to/your/image/find_circle.jpg')
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
th = cv2.adaptiveThreshold(gray,255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,21,5)
contours, hier = cv2.findContours(th.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
out_img = rgb.copy()
for i in range(len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
ar = min(w,h)/max(w,h)
# For a circle aspect ratio is close to 1.0
# In your use case circle diameter is between 40px-100px
if ar < 0.9 or \
w < 40 or w > 100:
continue
# P = 2 * PI * r
perimeter = cv2.arcLength(contours[i], True)
if perimeter == 0:
continue
# Second level confirmation could be done using PI = P * P / (4 * A)
# A = PI * r * r
area = cv2.contourArea(contours[i])
if area == 0:
continue
# d = (w+h) / 2 average diameter
# A contour is a circle if (P / d) = PI
ctr_pi = perimeter / ((w+h) / 2)
if abs(ctr_pi - pi) < pi_eps * pi:
cv2.circle(out_img, (int(x+w/2), int(y+h/2)), int(max(w,h)/2), (0, 255, 0), 1)
print("Center of the circle: ", x + w/2, y+h/2)
plt.imshow(out_img)
I'm trying to extract the detected circles in one image using the circular hough transform. My idea is get every circle or separate each one to then get his color histogram features and after send this features to one classifier as SVM, ANN, KNN etc..
This is my input image:
I'm getting the circles of this way:
import numpy as np
import cv2
import matplotlib.pyplot as plt
cv2.__version__
#read image
file = "lemon.png"
image = cv2.imread(file)
#BGR to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray,
cv2.HOUGH_GRADIENT,
15,
41,
param1=31,
param2=31,
minRadius=0,
maxRadius=33)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(image,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(image,(i[0],i[1]),2,(0,0,255),3)
print("Number of circles: "+ str(len(circles[0,:])))
plt.imshow(image, cmap='gray', vmin=0, vmax=255)
plt.show()
Output:
The next step is try to extract those circles but I don't have idea how to do it.
Well guys I would like to see your suggestions, any I idea I will apreciate it.
Thanks so much.
You can create a binary mask for every circle you detect. Use this mask to extract only the ROIs from the input image. Additionally, you can crop these ROIs and store them in a list to pass them to your classifier.
Here's the code:
import numpy as np
import cv2
# image path
path = "C://opencvImages//"
file = path + "LLfN7.png"
image = cv2.imread(file)
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray,
cv2.HOUGH_GRADIENT,
15,
41,
param1=31,
param2=31,
minRadius=0,
maxRadius=33)
# Here are your circles:
circles = np.uint16(np.around(circles))
# Get input size:
dimensions = image.shape
# height, width
height = image.shape[0]
width = image.shape[1]
# Prepare a list to store each ROI:
lemonROIs = []
The idea is that you process one circle at a step. Get the current circle, create a mask, mask the original input, crop the ROI and store it inside the list:
for i in circles[0, :]:
# Prepare a black canvas:
canvas = np.zeros((height, width))
# Draw the outer circle:
color = (255, 255, 255)
thickness = -1
centerX = i[0]
centerY = i[1]
radius = i[2]
cv2.circle(canvas, (centerX, centerY), radius, color, thickness)
# Create a copy of the input and mask input:
imageCopy = image.copy()
imageCopy[canvas == 0] = (0, 0, 0)
# Crop the roi:
x = centerX - radius
y = centerY - radius
h = 2 * radius
w = 2 * radius
croppedImg = imageCopy[y:y + h, x:x + w]
# Store the ROI:
lemonROIs.append(croppedImg)
For each circle you get a cropped ROI:
You can pass that info to your classifier.
I have a code that computes the orientation of a figure. Based on this orientation the figure is then rotated until it is straightened out. This all works fine. What I am struggling with, is getting the center of the rotated figure to the center of the whole image. So the center point of the figure should match the center point of the whole image.
Input image:
code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
path = "inputImage.png"
image=cv2.imread(path)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh=cv2.threshold(gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
contours,hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cnt1 = contours[0]
cnt=cv2.convexHull(contours[0])
angle = cv2.minAreaRect(cnt)[-1]
print("Actual angle is:"+str(angle))
rect = cv2.minAreaRect(cnt)
p=np.array(rect[1])
if p[0] < p[1]:
print("Angle along the longer side:"+str(rect[-1] + 180))
act_angle=rect[-1]+180
else:
print("Angle along the longer side:"+str(rect[-1] + 90))
act_angle=rect[-1]+90
#act_angle gives the angle of the minAreaRect with the vertical
if act_angle < 90:
angle = (90 + angle)
print("angleless than -45")
# otherwise, just take the inverse of the angle to make
# it positive
else:
angle=act_angle-180
print("grter than 90")
# rotate the image to deskew it
(h, w) = image.shape[:2]
print(h,w)
center = (w // 2, h // 2)
print(center)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
plt.imshow(rotated)
cv2.imwrite("rotated.png", rotated)
With output:
As you can see the white figure is slightly placed to left, I want it to be perfectly centered.
Does anyone know how this can be done?
EDIT: I have tried #joe's suggestion and subtracted the centroid coordinates, from the center of the image by dividing the width and height of the picture by 2. From this I got an offset, this had to be added to the array that describes the image. But I don't know how I add the offset to the array. How would this work with the x and y coordinates?
The code:
img = cv2.imread("inputImage")
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray_image,127,255,0)
height, width = gray_image.shape
print(img.shape)
wi=(width/2)
he=(height/2)
print(wi,he)
M = cv2.moments(thresh)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
offsetX = (wi-cX)
offsetY = (he-cY)
print(offsetX,offsetY)
print(cX,cY)
Here is one way in Python/OpenCV.
Get the bounding box for the white region from the contours. Compute the offset for the recentered region. Use numpy slicing to copy that to the center of a black background the size of the input.
Input:
import cv2
import numpy as np
# read image as grayscale
img = cv2.imread('white_shape.png', cv2.COLOR_BGR2GRAY)
# get shape
hh, ww = img.shape
# get contours (presumably just one around the nonzero pixels)
contours = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
x,y,w,h = cv2.boundingRect(cntr)
# recenter
startx = (ww - w)//2
starty = (hh - h)//2
result = np.zeros_like(img)
result[starty:starty+h,startx:startx+w] = img[y:y+h,x:x+w]
# view result
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save reentered image
cv2.imwrite('white_shape_centered.png',result)
One approach is to obtain the bounding box coordinates of the binary object then crop the ROI using Numpy slicing. From here we calculate the new shifted coordinates then paste the ROI onto a new blank mask.
Code
import cv2
import numpy as np
# Load image as grayscale and obtain bounding box coordinates
image = cv2.imread('1.png', 0)
height, width = image.shape
x,y,w,h = cv2.boundingRect(image)
# Create new blank image and shift ROI to new coordinates
mask = np.zeros(image.shape, dtype=np.uint8)
ROI = image[y:y+h, x:x+w]
x = width//2 - ROI.shape[0]//2
y = height//2 - ROI.shape[1]//2
mask[y:y+h, x:x+w] = ROI
cv2.imshow('ROI', ROI)
cv2.imshow('mask', mask)
cv2.waitKey()
#NawinNarain, from this point onwards where you found out the relative shifts w.r.t. centroid of the image, it is very straightforward - You want to make an Affine matrix with this translations and apply cv2.warpAffine() to your image. That's -it.
T = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
We then use warpAffine() to transform the image using the matrix, T
centered_image = cv2.warpAffine(image, T, (orig_width, orig_height))
This will transform your image so that the centroid is at the center. Hope this helps. The complete center image function will look like this:
def center_image(image):
height, width = image.shape
print(img.shape)
wi=(width/2)
he=(height/2)
print(wi,he)
ret,thresh = cv2.threshold(image,95,255,0)
M = cv2.moments(thresh)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
offsetX = (wi-cX)
offsetY = (he-cY)
T = np.float32([[1, 0, offsetX], [0, 1, offsetY]])
centered_image = cv2.warpAffine(image, T, (width, height))
return centered_image