Pixels intensity values between two lines - python

I have created an alghoritm that detects the edges of an extruded colagen casing and draws a centerline between these edges on an image. Casing with a centerline.
Here is my code:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
img = cv2.imread("C:/Users/5.jpg", cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (1500, 1200))
#ROI
fromCenter = False
r = cv2.selectROI(img, fromCenter)
imCrop = img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
#Operations on an image
_,thresh = cv2.threshold(imCrop,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
blur = cv2.GaussianBlur(opening,(7,7),0)
edges = cv2.Canny(blur, 0,20)
#Edges localization, packing coords into a list
indices = np.where(edges != [0])
coordinates = list(zip(indices[1], indices[0]))
num = len(coordinates)
#Separating into top and bot edge
bot_cor = coordinates[:int(num/2)]
top_cor = coordinates[-int(num/2):]
#Converting to arrays, sorting
a, b = np.array(top_cor), np.array(bot_cor)
a, b = a[a[:,0].argsort()], b[b[:,0].argsort()]
#Edges approximation by a 5th degree polynomial
min_a_x, max_a_x = np.min(a[:,0]), np.max(a[:,0])
new_a_x = np.linspace(min_a_x, max_a_x, imCrop.shape[1])
a_coefs = np.polyfit(a[:,0],a[:,1], 5)
new_a_y = np.polyval(a_coefs, new_a_x)
min_b_x, max_b_x = np.min(b[:,0]), np.max(b[:,0])
new_b_x = np.linspace(min_b_x, max_b_x, imCrop.shape[1])
b_coefs = np.polyfit(b[:,0],b[:,1], 5)
new_b_y = np.polyval(b_coefs, new_b_x)
#Defining a centerline
midx = [np.average([new_a_x[i], new_b_x[i]], axis = 0) for i in range(imCrop.shape[1])]
midy = [np.average([new_a_y[i], new_b_y[i]], axis = 0) for i in range(imCrop.shape[1])]
plt.figure(figsize=(16,8))
plt.title('Cross section')
plt.xlabel('Length of the casing', fontsize=18)
plt.ylabel('Width of the casing', fontsize=18)
plt.plot(new_a_x, new_a_y,c='black')
plt.plot(new_b_x, new_b_y,c='black')
plt.plot(midx, midy, '-', c='blue')
plt.show()
#Converting coords type to a list (plotting purposes)
coords = list(zip(midx, midy))
points = list(np.int_(coords))
mask = np.zeros((imCrop.shape[:2]), np.uint8)
mask = edges
#Plotting
for point in points:
cv2.circle(mask, tuple(point), 1, (255,255,255), -1)
for point in points:
cv2.circle(imCrop, tuple(point), 1, (255,255,255), -1)
cv2.imshow('imCrop', imCrop)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
Now I would like to sum up the intensities of each pixel in a region between top edge and a centerline (same thing for a region between centerline and a bottom edge).
Is there any way to limit the ROI to the region between the detected edges and split it into two regions based on the calculated centerline?
Or is there any way to access the pixels which are contained between the edge and a centerline based on theirs coordinates?
(It's my very first post here, sorry in advance for all the mistakes)

I wrote a somewhat naïve code to get masks for the upper and lower part. My code considers that the source image will be always like yours: with horizontal stripes.
After applying Canny I get this:
Then I run some loops through image array to fill unwanted areas of your image. This is done separately for upper and lower part, creating masks. The results are:
Then you can use this masks to sum only the elements you're interested in, using cv.sumElems.
import cv2 as cv
#open as grayscale image
src = cv.imread("colagen.png",cv.IMREAD_GRAYSCALE)
# apply canny and find contours
threshold = 100
canny_output = cv.Canny(src, threshold, threshold * 2)
# find mask for upper part
mask1 = canny_output.copy()
x, y = canny_output.shape
area = 0
for j in range(y):
area = 0
for i in range(x):
if area == 0:
if mask1[i][j] > 0:
area = 1
continue
else:
mask1[i][j] = 255
elif area == 1:
if mask1[i][j] > 0:
area = 2
else:
continue
else:
mask1[i][j] = 255
mask1 = cv.bitwise_not(mask1)
# find mask for lower part
mask2 = canny_output.copy()
x, y = canny_output.shape
area = 0
for j in range(y):
area = 0
for i in range(x):
if area == 0:
if mask2[-i][j] > 0:
area = 1
continue
else:
mask2[-i][j] = 255
elif area == 1:
if mask2[-i][j] > 0:
area = 2
else:
continue
else:
mask2[-i][j] = 255
mask2 = cv.bitwise_not(mask2)
# apply masks and calculate sum of elements in upper and lower part
sums = [0,0]
(sums[0],_,_,_) = cv.sumElems(cv.bitwise_and(src,mask1))
(sums[1],_,_,_) = cv.sumElems(cv.bitwise_and(src,mask2))
cv.imshow('src',src)
cv.imshow('canny',canny_output)
cv.imshow('mask1',mask1)
cv.imshow('mask2',mask2)
cv.imshow('masked1',cv.bitwise_and(src,mask1))
cv.imshow('masked2',cv.bitwise_and(src,mask2))
cv.waitKey()
Alternatives...
Probably there exist some function that fill the areas of the Canny result. I tried cv.fillPoly and cv.floodFill, but didn't manage to make them work easily... But maybe someone else can help you with that...
Edit
Found another way to get the masks with a cleaner code. Using numpy np.add.accumulate then np.clip, and then a modulo operation:
# first divide canny_output by 255 to get 0's and 1's, then perform
# an accumulate addition for each column. Thus you'll get +1 for every
# line, "painting" areas with 1, 2, 3...
a = np.add.accumulate(canny_output/255,0)
# clip values: anything greater than 2 becomes 2
a = np.clip(a, 0, 2)
# performe a modulo, to get areas alternating with 0 or 1; then multiply by 255
a = a%2 * 255
# convert to uint8
mask1 = cv.convertScaleAbs(a)
# to get mask2 (the lower mask) flip the array then do the same as above
a = np.add.accumulate(np.flip(canny_output,0)/255,0)
a = np.clip(a, 0, 2)
a = a%2 * 255
mask2 = cv.convertScaleAbs(np.flip(a,0))
This returns almost the same result. The border of the mask is a little bit different...

Related

Finding an unfilled circle in an image of finite size using Python

Trying to find a circle in an image that has finite radius. Started off using 'HoughCircles' method from OpenCV as the parameters for it seemed very much related to my situation. But it is failing to find it. Looks like the image may need more pre-processing for it to find reliably. So, started off playing with different thresholds in opencv to no success. Here is an example of an image (note that the overall intensity of the image will vary, but the radius of the circle always remain the same ~45pixels)
Here is what I have tried so far
image = cv2.imread('image1.bmp', 0)
img_in = 255-image
mean_val = int(np.mean(img_in))
ret, img_thresh = cv2.threshold(img_in, thresh=mean_val-30, maxval=255, type=cv2.THRESH_TOZERO)
# detect circle
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.0, 100, minRadius=40, maxRadius=50)
If you look at the image, the circle is obvious, its a thin light gray circle in the center of the blob.
Any suggestions?
Edited to show expected result
The expected result should be like this, as you can see, the circle is very obvious for naked eye on the original image and is always of the same radius but not at the same location on the image. But there will be only one circle of this kind on any given image.
As of 8/20/2020, here is the code I am using to get the center and radii
from numpy import zeros as np_zeros,\
full as np_full
from cv2 import calcHist as cv2_calcHist,\
HoughCircles as cv2_HoughCircles,\
HOUGH_GRADIENT as cv2_HOUGH_GRADIENT
def getCenter(img_in, saturated, minradius, maxradius):
img_local = img_in[100:380,100:540,0]
res = np_full(3, -1)
# do some contrast enhancement
img_local = stretchHistogram(img_local, saturated)
circles = cv2_HoughCircles(img_local, cv2_HOUGH_GRADIENT, 1, 40, param1=70, param2=20,
minRadius=minradius,
maxRadius=maxradius)
if circles is not None: # found some circles
circles = sorted(circles[0], key=lambda x: x[2])
res[0] = circles[0][0]+100
res[1] = circles[0][1]+100
res[2] = circles[0][2]
return res #x,y,radii
def stretchHistogram(img_in, saturated=0.35, histMin=0.0, binSize=1.0):
img_local = img_in.copy()
img_out = img_in.copy()
min, max = getMinAndMax(img_local, saturated)
if max > min:
min = histMin+min * binSize
max = histMin+max * binSize
w, h = img_local.shape[::-1]
#create a new lut
lut = np_zeros(256)
max2 = 255
for i in range(0, 256):
if i <= min:
lut[i] = 0
elif i >= max:
lut[i] = max2
else:
lut[i] = (round)(((float)(i - min) / (max - min)) * max2)
#update image with new lut values
for i in range(0, h):
for j in range(0, w):
img_out[i, j] = lut[img_local[i, j]]
return img_out
def getMinAndMax(img_in, saturated):
img_local = img_in.copy()
hist = cv2_calcHist([img_local], [0], None, [256], [0, 256])
w, h = img_local.shape[::-1]
pixelCount = w * h
saturated = 0.5
threshold = (int)(pixelCount * saturated / 200.0)
found = False
count = 0
i = 0
while not found and i < 255:
count += hist[i]
found = count > threshold
i = i + 1
hmin = i
i = 255
count = 0
while not found and i > 0:
count += hist[i]
found = count > threshold
i = i - 1
hmax = i
return hmin, hmax
and calling the above function as
getCenter(img, 5.0, 55, 62)
But it is still very unreliable. Not sure why it is so hard to get to an algorithm that works reliably for something that is very obvious to a naked eye. Not sure why there is so much variation in the result from frame to frame even though there is no change between them.
Any suggestions are greatly appreciated. Here are some more samples to play with
simple, draw your circles: cv2.HoughCircles returns a list of circles..
take care of maxRadius = 100
for i in circles[0,:]:
# draw the outer circle
cv2.circle(image,(i[0],i[1]),i[2],(255,255,0),2)
# draw the center of the circle
cv2.circle(image,(i[0],i[1]),2,(255,0,255),3)
a full working code (you have to change your tresholds):
import cv2
import numpy as np
image = cv2.imread('0005.bmp', 0)
height, width = image.shape
print(image.shape)
img_in = 255-image
mean_val = int(np.mean(img_in))
blur = cv2.blur(img_in , (3,3))
ret, img_thresh = cv2.threshold(blur, thresh=100, maxval=255, type=cv2.THRESH_TOZERO)
# detect circle
circles = cv2.HoughCircles(img_thresh, cv2.HOUGH_GRADIENT,1,40,param1=70,param2=20,minRadius=60,maxRadius=0)
print(circles)
for i in circles[0,:]:
# check if center is in middle of picture
if(i[0] > width/2-30 and i[0] < width/2+30 \
and i[1] > height/2-30 and i[1] < height/2+30 ):
# draw the outer circle
cv2.circle(image,(i[0],i[1]),i[2],(255,255,0),2)
# draw the center of the circle
cv2.circle(image,(i[0],i[1]),2,(255,0,255),3)
cv2.imshow("image", image )
while True:
keyboard = cv2.waitKey(2320)
if keyboard == 27:
break
cv2.destroyAllWindows()
result:

Detect multiple rectangles in image

I am trying to detect the count of pipes in this picture. For this, I'm using OpenCV and Python-based detection. Based, on existing answers to similar questions, I was able to come up with the following steps
Open the image
Filter it
Apply Edge Detection
Use Contours
Check for the count
The total count of pipes is ~909 when we count it manually give or take 4.
After applying the filter
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('images/input-rectpipe-1.jpg')
blur_hor = cv2.filter2D(img[:, :, 0], cv2.CV_32F, kernel=np.ones((11,1,1), np.float32)/11.0, borderType=cv2.BORDER_CONSTANT)
blur_vert = cv2.filter2D(img[:, :, 0], cv2.CV_32F, kernel=np.ones((1,11,1), np.float32)/11.0, borderType=cv2.BORDER_CONSTANT)
mask = ((img[:,:,0]>blur_hor*1.2) | (img[:,:,0]>blur_vert*1.2)).astype(np.uint8)*255
I get this masked image
This looks fairly accurate in terms of the number of visible rectangles it shows. However, when I try to take the count and plot the bounding box on top of the picture, it picks a lot of unwanted regions as well. For circles, HoughCircles has a way of defining the max and min radius. Is there something similar for rectangles that can improve accuracy. Also, I'm open to suggestions for alternative approaches to this problem.
ret,thresh = cv2.threshold(mask,127,255,0)
contours,hierarchy = cv2.findContours(thresh, 1, 2)
count = 0
for i in range(len(contours)):
count = count+1
x,y,w,h = cv2.boundingRect(contours[i])
rect = cv2.minAreaRect(contours[i])
area = cv2.contourArea(contours[i])
box = cv2.boxPoints(rect)
ratio = w/h
M = cv2.moments(contours[i])
if M["m00"] == 0.0:
cX = int(M["m10"] / 1 )
cY = int(M["m01"] / 1 )
if M["m00"] != 0.0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if (area > 50 and area < 220 and hierarchy[0][i][2] < 0 and (ratio > .5 and ratio < 2)):
#cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.circle(img, (cX, cY), 1, (255, 255, 255), -1)
count = count + 1
print(count)
cv2.imshow("m",mask)
cv2.imshow("f",img)
cv2.waitKey(0)
UPDATE
Based on the second answer I have converted the c++ code to python code and got closer results but still missing out on a few obvious rectangles.
Of course you could filter them by their area. I took your binary image and continued the work as below:
1- Do a loop on all the contours you found from findContours
2- In the loop check if each contour, is an internal contour or not
3- From those which are internal contours, check their area and if the area is in the acceptable range, check the width/height ratio of each contour and finally if it is good too, count that contour as a pipe.
I did the above method on your binary image, and found 794 pipes:
(Some boxes are lost though, You should change the parameters of the edge detector to get more separable boxes in the image.)
and here is the code (It's c++ but easily convertible to python):
Mat img__1, img__2,img__ = imread("E:/R.jpg", 0);
threshold(img__, img__1, 128, 255, THRESH_BINARY);
vector<vector<Point>> contours;
vector< Vec4i > hierarchy;
findContours(img__1, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_NONE);
Mat tmp = Mat::zeros(img__1.size(), CV_8U);
int k = 0;
for (size_t i = 0; i < contours.size(); i++)
{
double area = contourArea(contours[i]);
Rect rec = boundingRect(contours[i]);
float ratio = rec.width / float(rec.height);
if (area > 50 && area < 220 && hierarchy[i][2]<0 && (ratio > .5 && ratio < 2) ) # hierarchy[i][2]<0 stands for internal contours
{
k++;
drawContours(tmp, contours, i, Scalar(255, 255, 255), -1);
}
}
cout << "k= " << k << "\n";
imshow("1", img__1);
imshow("2", tmp);
waitKey(0);
There are many methods to solve this problem but i doubt there will be a single method without some kind of ad-hod measures. Here is another attempt to this problem.
Instead of using the edge information, i suggest a LBP(local binary pattern)-like filter that compares the surrounding pixel with the center value. If a certain percentage of surrounding pixel is larger than the center pixel, the center pixel will be labeled 255. if the condition is not met, then the center pixel will be labeled 0.
This intensity based method is run on the assumption that the pipe center is always darker than the pipe edges. Since it is comparing intensity,it should work well as long as some contrast remains.
Through this process, you will obtain an image with binary blobs for every pipe and some noises. You will have to remove them with some pre-known condition such as, size, shape, fill_ratio, color and etc. The condition can be found in the given code.
import cv2
import matplotlib.pyplot as plt
import numpy as np
# Morphological function sets
def morph_operation(matinput):
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
morph = cv2.erode(matinput,kernel,iterations=1)
morph = cv2.dilate(morph,kernel,iterations=2)
morph = cv2.erode(matinput,kernel,iterations=1)
morph = cv2.dilate(morph,kernel,iterations=1)
return morph
# Analyze blobs
def analyze_blob(matblobs,display_frame):
_,blobs,_ = cv2.findContours(matblobs,cv2.RETR_LIST ,cv2.CHAIN_APPROX_SIMPLE)
valid_blobs = []
for i,blob in enumerate(blobs):
rot_rect = cv2.minAreaRect(blob)
b_rect = cv2.boundingRect(blob)
(cx,cy),(sw,sh),angle = rot_rect
rx,ry,rw,rh = b_rect
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
# Draw the segmented Box region
frame = cv2.drawContours(display_frame,[box],0,(0,0,255),1)
on_count = cv2.contourArea(blob)
total_count = sw*sh
if total_count <= 0:
continue
if sh > sw :
temp = sw
sw = sh
sh = temp
# minimum area
if sw * sh < 20:
continue
# maximum area
if sw * sh > 100:
continue
# ratio of box
rect_ratio = sw / sh
if rect_ratio <= 1 or rect_ratio >= 3.5:
continue
# ratio of fill
fill_ratio = on_count / total_count
if fill_ratio < 0.4 :
continue
# remove blob that is too bright
if display_frame[int(cy),int(cx),0] > 75:
continue
valid_blobs.append(blob)
if valid_blobs:
print("Number of Blobs : " ,len(valid_blobs))
cv2.imshow("display_frame_in",display_frame)
return valid_blobs
def lbp_like_method(matinput,radius,stren,off):
height, width = np.shape(matinput)
roi_radius = radius
peri = roi_radius * 8
matdst = np.zeros_like(matinput)
for y in range(height):
y_ = y - roi_radius
_y = y + roi_radius
if y_ < 0 or _y >= height:
continue
for x in range(width):
x_ = x - roi_radius
_x = x + roi_radius
if x_ < 0 or _x >= width:
continue
r1 = matinput[y_:_y,x_]
r2 = matinput[y_:_y,_x]
r3 = matinput[y_,x_:_x]
r4 = matinput[_y,x_:_x]
center = matinput[y,x]
valid_cell_1 = len(r1[r1 > center + off])
valid_cell_2 = len(r2[r2 > center + off])
valid_cell_3 = len(r3[r3 > center + off])
valid_cell_4 = len(r4[r4 > center + off])
total = valid_cell_1 + valid_cell_2 + valid_cell_3 + valid_cell_4
if total > stren * peri:
matdst[y,x] = 255
return matdst
def main_process():
img = cv2.imread('image.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Blured to remove noise
blurred = cv2.GaussianBlur(gray,(3,3),-1)
# Parameter tuning
winsize = 5
peri = 0.6
off = 4
matlbp = lbp_like_method(gray,winsize,peri,off)
cv2.imshow("matlbp",matlbp)
cv2.waitKey(1)
matmorph = morph_operation(matlbp)
cv2.imshow("matmorph",matmorph)
cv2.waitKey(1)
display_color = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR)
valid_blobs = analyze_blob(matmorph,display_color)
for b in range(len(valid_blobs)):
cv2.drawContours(display_color,valid_blobs,b,(0,255,255),-1)
cv2.imshow("display_color",display_color)
cv2.waitKey(0)
if __name__ == '__main__':
main_process()
Result from the LBP-like processing
After cleaning with morphological process
Final result with the red boxes showing all the blob candidates and the yellow segments showing blobs that pass all the condition we set. There are some false alarms below and on top of the pipe bundle but they can be omitted with some boundary conditions.
Total pipe found : 943

How to measure the size of the room based off the color

So this code is able to segment a variety of rooms it identifies into different colors as seen below. The question is, how do i obtain the area of the rooms that are colored (Like those blue rooms). Rooms are in 1m:150m ratio.
The first image is the output i need to measure, the second room is the image i used to run the code with, the third image is an original image for reference. Thanks in advance.
import numpy as np
def find_rooms(img, noise_reduction=10, corners_threshold=0.0000001,
room_close=2, gap_in_wall_threshold=0.000001):
# :param img: grey scale image of rooms, already eroded and doors removed etc.
# :param noise_reduction: Amount of noise removed.
# :param corners_threshold: Corners to retained, higher value = more of house removed.
# :param room_close: Maximum line length to add to close off open doors.
# :param gap_in_wall_threshold: Minimum number of pixels to identify component as room instead of hole in the wall.
# :return: rooms: list of numpy arrays containing boolean masks for each detected room
# colored_house: Give room a color.
assert 0 <= corners_threshold <= 1
# Remove noise left from door removal
img[img < 128] = 0
img[img > 128] = 255
contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros_like(img)
for contour in contours:
area = cv2.contourArea(contour)
if area > noise_reduction:
cv2.fillPoly(mask, [contour], 255)
img = ~mask
# Detect corners (you can play with the parameters here)
#harris corner detection
dst = cv2.cornerHarris(img, 4,3,0.000001)
dst = cv2.dilate(dst,None)
corners = dst > corners_threshold * dst.max()
# Draw lines to close the rooms off by adding a line between corners on the same x or y coordinate
# This gets some false positives.
# Can try disallowing drawing through other existing lines, need to test.
for y,row in enumerate(corners):
x_same_y = np.argwhere(row)
for x1, x2 in zip(x_same_y[:-1], x_same_y[1:]):
if x2[0] - x1[0] < room_close:
color = 0
cv2.line(img, (x1, y), (x2, y), color, 1)
for x,col in enumerate(corners.T):
y_same_x = np.argwhere(col)
for y1, y2 in zip(y_same_x[:-1], y_same_x[1:]):
if y2[0] - y1[0] < room_close:
color = 0
cv2.line(img, (x, y1), (x, y2), color, 1)
# Mark the outside of the house as black
contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
mask = np.zeros_like(mask)
cv2.fillPoly(mask, [biggest_contour], 255)
img[mask == 0] = 0
# Find the connected components in the house
ret, labels = cv2.connectedComponents(img)
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
unique = np.unique(labels)
rooms = []
for label in unique:
component = labels == label
if img[component].sum() == 0 or np.count_nonzero(component) < gap_in_wall_threshold:
color = 0
else:
rooms.append(component)
color = np.random.randint(0, 255, size=3)
img[component] = color
return rooms, img
#Read gray image
img = cv2.imread('output16.png', 0)
rooms, colored_house = find_rooms(img.copy())
cv2.imshow('result', colored_house)
cv2.waitKey()
cv2.destroyAllWindows()
Ok so let's say that you read the segmented picture using OpenCV:
import cv2
import numpy as np
# reading the segmented picture in coloured mode
image = cv2.imread("path/to/segmented/coloured/picture.jpg", cv2.IMREAD_COLOR)
Now, suppose that you know the size in squared meters of the entire picture, so if for instance the picture reflects a total of 150m x 70m, you have a total size of 150x70 = 10500m². Let's declare this as a variable:
total_size = 10500
You also want to know the total number of pixels in the picture. If, for instance, your picture is 750*350 pixels, you have: 262500 pixels. You can just do that with:
total_number_of_pixels = image.shape[0]*image.shape[1]
Now, as I said in a comment, you also want to know the number of pixels for each unique colour in the segmented picture, which you can do using:
# count all occurrences of unique colours in your picture
unique, counts = np.unique(image.reshape(-1, image.shape[2]), axis=0, return_counts=True)
coloured_pixel_counts = sorted(zip(unique, counts), key=lambda x: x[1]))
Now, all you have left to do is just a cross-multiplication, which can be done with something like this:
rooms = []
for colour, pixel_count in coloured_pixel_counts:
rooms.append((colour, (pixel_count/total_number_of_pixels)*total_size))
You should now have a list of all colours and the respective approximated size in squared meters of the rooms of this colour.
Now, please note that, however, you would probably have to subset this list to the colours that strike your interest, as some colours seem to not really be linked to a room in your segmented pictures...
Again, please ask if anything is unclear!
So the measurement will be based on pixels, and you will need to know the maximum and minimum range of the RGB value of the color you want to "measure". I ran this code on your image to find the percentage of the green colored area to the whole area of the house and I got the following result:
The number of filtered pixels is: 331213 Which counts for %5 of the house
import cv2
import numpy as np
import math
img = cv2.imread('22I7X.png')
#Defining wanted color range
filteredColorMin = np.array([36,0,0], np.uint8) #Min range
filteredColorMax = np.array([70, 255,255], np.uint8) #High range
#Find all the pixels in the wanted color range
dst = cv2.inRange(img, filteredColorMin, filteredColorMax)
#count non-zero values from filtered range
numFilteredColor = cv2.countNonZero(dst)
#Getting total number of pixels in image to get the percentage of the filtered pixels from the total pixels
numTotalPixels=img.shape[0] *img.shape[1]
print('The number of filtered pixels is: ' + str(numFilteredColor) + " Which counts for %" + str(math.ceil((numFilteredColor/numTotalPixels)*100)) + " of the house")
cv2.imshow("original image",img)
cv2.waitKey(0)

Writing robust (size invariant) circle detection (Watershed)

Edit: Quick Summary so far: I use the watershed algorithm but I have probably a problem with threshold. It didn't detect the brighter circles.
New: Fast radial symmetry transform approach which didn't quite work eiter (Edit 6).
I want to detect circles with different sizes. The use case is to detect coins on an image and to extract them solely. -> Get the single coins as single image files.
For this I used the Hough Circle Transform of open-cv:
(https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/hough_circle/hough_circle.html)
import sys
import cv2 as cv
import numpy as np
def main(argv):
## [load]
default_file = "data/newcommon_1euro.jpg"
filename = argv[0] if len(argv) > 0 else default_file
# Loads an image
src = cv.imread(filename, cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: hough_circle.py [image_name -- default ' + default_file + '] \n')
return -1
## [load]
## [convert_to_gray]
# Convert it to gray
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [convert_to_gray]
## [reduce_noise]
# Reduce the noise to avoid false circle detection
gray = cv.medianBlur(gray, 5)
## [reduce_noise]
## [houghcircles]
rows = gray.shape[0]
circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8,
param1=100, param2=30,
minRadius=0, maxRadius=120)
## [houghcircles]
## [draw]
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center = (i[0], i[1])
# circle center
cv.circle(src, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv.circle(src, center, radius, (255, 0, 255), 3)
## [draw]
## [display]
cv.imshow("detected circles", src)
cv.waitKey(0)
## [display]
return 0
if __name__ == "__main__":
main(sys.argv[1:])
I tried all parameters (rows, param1, param2, minRadius, and maxRadius) to optimize the results. This worked very well for one specific image but other images with different sized coins didn't work.
Examples:
Parameters
circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 16,
param1=100, param2=30,
minRadius=0, maxRadius=120)
With the same parameters:
Changed to rows/8
I also tried two other approaches of this thread: writing robust (color and size invariant) circle detection with opencv (based on Hough transform or other features)
The approach of fireant leads to this result:
The approach of fraxel didn't work either.
For the first approach: This happens with all different sizes and also the min and max radius.
How could I change the code, so that the coin size is not important or that it finds the parameters itself?
Thank you in advance for any help!
Edit:
I tried the watershed algorithm of Open-cv, as suggested by Alexander Reynolds: https://docs.opencv.org/3.4/d3/db4/tutorial_py_watershed.html
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('data/P1190263.jpg')
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv.morphologyEx(thresh,cv.MORPH_OPEN,kernel, iterations = 2)
# sure background area
sure_bg = cv.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)
ret, sure_fg = cv.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv.watershed(img,markers)
img[markers == -1] = [255,0,0]
#Display:
cv.imshow("detected circles", img)
cv.waitKey(0)
It works very well on the test image of the open-cv website:
But it performs very bad on my own images:
I can't really think of a good reason why it's not working on my images?
Edit 2:
As suggested I looked at the intermediate images. The thresh looks not good in my opinion. Next, there is no difference between opening and dist_transform. The corresponding sure_fg shows the detected images.
thresh:
opening:
dist_transform:
sure_bg:
sure_fg:
Edit 3:
I tried all distanceTypes and maskSizes I could find, but the results were quite the same (https://www.tutorialspoint.com/opencv/opencv_distance_transformation.htm)
Edit 4:
Furthermore, I tried to change the (first) threshold function. I used different threshold values instead of the OTSU Function. The best one was with 160, but it was far from good:
In the tutorial it looks like this:
It seems like the coins are somehow too bright to be detected by this algorithm, but I don't know how to improve it?
Edit 5:
Changing the overall contrast and brightness of the image (with cv.convertScaleAbs) didn't improve the results. Increasing the contrast however should increase the "difference" between foreground and background, at least on the normal image. But it even got worse. The corresponding threshold image didn't improved (didn't get more white pixel).
Edit 6: I tried another approach, the fast radial symmetry transform (from here https://github.com/ceilab/frst_python)
import cv2
import numpy as np
def gradx(img):
img = img.astype('int')
rows, cols = img.shape
# Use hstack to add back in the columns that were dropped as zeros
return np.hstack((np.zeros((rows, 1)), (img[:, 2:] - img[:, :-2]) / 2.0, np.zeros((rows, 1))))
def grady(img):
img = img.astype('int')
rows, cols = img.shape
# Use vstack to add back the rows that were dropped as zeros
return np.vstack((np.zeros((1, cols)), (img[2:, :] - img[:-2, :]) / 2.0, np.zeros((1, cols))))
# Performs fast radial symmetry transform
# img: input image, grayscale
# radii: integer value for radius size in pixels (n in the original paper); also used to size gaussian kernel
# alpha: Strictness of symmetry transform (higher=more strict; 2 is good place to start)
# beta: gradient threshold parameter, float in [0,1]
# stdFactor: Standard deviation factor for gaussian kernel
# mode: BRIGHT, DARK, or BOTH
def frst(img, radii, alpha, beta, stdFactor, mode='BOTH'):
mode = mode.upper()
assert mode in ['BRIGHT', 'DARK', 'BOTH']
dark = (mode == 'DARK' or mode == 'BOTH')
bright = (mode == 'BRIGHT' or mode == 'BOTH')
workingDims = tuple((e + 2 * radii) for e in img.shape)
# Set up output and M and O working matrices
output = np.zeros(img.shape, np.uint8)
O_n = np.zeros(workingDims, np.int16)
M_n = np.zeros(workingDims, np.int16)
# Calculate gradients
gx = gradx(img)
gy = grady(img)
# Find gradient vector magnitude
gnorms = np.sqrt(np.add(np.multiply(gx, gx), np.multiply(gy, gy)))
# Use beta to set threshold - speeds up transform significantly
gthresh = np.amax(gnorms) * beta
# Find x/y distance to affected pixels
gpx = np.multiply(np.divide(gx, gnorms, out=np.zeros(gx.shape), where=gnorms != 0),
radii).round().astype(int);
gpy = np.multiply(np.divide(gy, gnorms, out=np.zeros(gy.shape), where=gnorms != 0),
radii).round().astype(int);
# Iterate over all pixels (w/ gradient above threshold)
for coords, gnorm in np.ndenumerate(gnorms):
if gnorm > gthresh:
i, j = coords
# Positively affected pixel
if bright:
ppve = (i + gpx[i, j], j + gpy[i, j])
O_n[ppve] += 1
M_n[ppve] += gnorm
# Negatively affected pixel
if dark:
pnve = (i - gpx[i, j], j - gpy[i, j])
O_n[pnve] -= 1
M_n[pnve] -= gnorm
# Abs and normalize O matrix
O_n = np.abs(O_n)
O_n = O_n / float(np.amax(O_n))
# Normalize M matrix
M_max = float(np.amax(np.abs(M_n)))
M_n = M_n / M_max
# Elementwise multiplication
F_n = np.multiply(np.power(O_n, alpha), M_n)
# Gaussian blur
kSize = int(np.ceil(radii / 2))
kSize = kSize + 1 if kSize % 2 == 0 else kSize
S = cv2.GaussianBlur(F_n, (kSize, kSize), int(radii * stdFactor))
return S
img = cv2.imread('data/P1190263.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
result = frst(gray, 60, 2, 0, 1, mode='BOTH')
cv2.imshow("detected circles", result)
cv2.waitKey(0)
I only get this nearly black output (it has some very dark grey shadows). I don't know what to change and would be thankful for help!

Detect the green lines in this image and calculate their lengths

Sample Images
The image can be more noisy at times where more objects intervene from the background. Right now I am using various techniques using the RGB colour space to detect the lines but it fails when there is change in the colour due to intervening obstacles from the background. I am using opencv and python.
I have read that HSV is better for colour detection and used but haven't been successful yet.
I am not able to find a generic solution to this problem. Any hints or clues in this direction would be of great help.
STILL IN PROGRESS
First of all, an RGB image consists of 3 grayscale images. Since you need the green color you will deal only with one channel. The green one. To do so, you can split the image, you can use b,g,r = cv2.split('Your Image'). You will get an output like that if you are showing the green channel:
After that you should threshold the image using your desired way. I prefer Otsu's thresholding in this case. The output after thresholding is:
It's obvious that the thresholded image is extremley noisy. So performing erosion will reduce the noise a little bit. The noise reduced image will be similar to the following:
I tried using closing instead of dilation, but closing preserves some unwanted noise. So I separately performed erosion followed by dilation. After dilation the output is:
Note that: You can do your own way in morphological operation. You can use opening instead of what I did. The results are subjective from
one person to another.
Now you can try one these two methods:
1. Blob Detection.
2. HoughLine Transform.
TODO
Try out these two methods and choose the best.
You should use the fact that you know you are trying to detect a line by using the line hough transform.
http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.html
When the obstacle also look like a line use the fact that you know approximately what is the orientation of the green lines.
If you don't know the orientation of the line use hte fact that there are several green lines with the same orientation and only one line that is the obstacle
Here is a code for what i meant:
import cv2
import numpy as np
# Params
minLineCount = 300 # min number of point alogn line with the a specif orientation
minArea = 100
# Read img
img = cv2.imread('i.png')
greenChannel = img[:,:,1]
# Do noise reduction
iFilter = cv2.bilateralFilter(greenChannel,5,5,5)
# Threshold data
#ret,iThresh = cv2.threshold(iFilter,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
iThresh = (greenChannel > 4).astype(np.uint8)*255
# Remove small areas
se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
iThreshRemove = cv2.morphologyEx(iThresh, cv2.MORPH_OPEN, se1)
# Find edges
iEdge = cv2.Canny(iThreshRemove,50,100)
# Hough line transform
lines = cv2.HoughLines(iEdge, 1, 3.14/180,75)
# Find the theta with the most lines
thetaCounter = dict()
for line in lines:
theta = line[0, 1]
if theta in thetaCounter:
thetaCounter[theta] += 1
else:
thetaCounter[theta] = 1
maxThetaCount = 0
maxTheta = 0
for theta in thetaCounter:
if thetaCounter[theta] > maxThetaCount:
maxThetaCount = thetaCounter[theta]
maxTheta = theta
# Find the rhos that corresponds to max theta
rhoValues = []
for line in lines:
rho = line[0, 0]
theta = line[0, 1]
if theta == maxTheta:
rhoValues.append(rho)
# Go over all the lines with the specific orientation and count the number of pixels on that line
# if the number is bigger than minLineCount draw the pixels in finaImage
lineImage = np.zeros_like(iThresh, np.uint8)
for rho in range(min(rhoValues), max(rhoValues), 1):
a = np.cos(maxTheta)
b = np.sin(maxTheta)
x0 = round(a*rho)
y0 = round(b*rho)
lineCount = 0
pixelList = []
for jump in range(-1000, 1000, 1):
x1 = int(x0 + jump * (-b))
y1 = int(y0 + jump * (a))
if x1 < 0 or y1 < 0 or x1 >= lineImage.shape[1] or y1 >= lineImage.shape[0]:
continue
if iThreshRemove[y1, x1] == int(255):
pixelList.append((y1, x1))
lineCount += 1
if lineCount > minLineCount:
for y,x in pixelList:
lineImage[y, x] = int(255)
# Remove small areas
## Opencv 2.4
im2, contours, hierarchy = cv2.findContours(lineImage,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE )
finalImage = np.zeros_like(lineImage)
finalShapes = []
for contour in contours:
if contour.size > minArea:
finalShapes.append(contour)
cv2.fillPoly(finalImage, finalShapes, 255)
## Opencv 3.0
# output = cv2.connectedComponentsWithStats(lineImage, 8, cv2.CV_32S)
#
# finalImage = np.zeros_like(output[1])
# finalImage = output[1]
# stat = output[2]
# for label in range(output[0]):
# if label == 0:
# continue
# cc = stat[label,:]
# if cc[cv2.CC_STAT_AREA] < minArea:
# finalImage[finalImage == label] = 0
# else:
# finalImage[finalImage == label] = 255
# Show image
#cv2.imwrite('finalImage2.jpg',finalImage)
cv2.imshow('a', finalImage.astype(np.uint8))
cv2.waitKey(0)
and the result for the images:

Categories

Resources