Automatically remove hot/dead pixels from an image in python - python

I am using numpy and scipy to process a number of images taken with a CCD camera. These images have a number of hot (and dead) pixels with very large (or small) values. These interfere with other image processing, so they need to be removed. Unfortunately, though a few of the pixels are stuck at either 0 or 255 and are always at the same value in all of the images, there are some pixels that are temporarily stuck at other values for a period of a few minutes (the data spans many hours).
I am wondering if there is a method for identifying (and removing) the hot pixels already implemented in python. If not, I am wondering what would be an efficient method for doing so. The hot/dead pixels are relatively easy to identify by comparing them with neighboring pixels. I could see writing a loop that looks at each pixel, compares its value to that of its 8 nearest neighbors. Or, it seems nicer to use some kind of convolution to produce a smoother image and then subtract this from the image containing the hot pixels, making them easier to identify.
I have tried this "blurring method" in the code below, and it works okay, but I doubt that it is the fastest. Also, it gets confused at the edge of the image (probably since the gaussian_filter function is taking a convolution and the convolution gets weird near the edge). So, is there a better way to go about this?
Example code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
plt.figure(figsize=(8,4))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
#make a sample image
x = np.linspace(-5,5,200)
X,Y = np.meshgrid(x,x)
Z = 255*np.cos(np.sqrt(x**2 + Y**2))**2
for i in range(0,11):
#Add some hot pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=200,high=255)
#and dead pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=0,high=10)
#Then plot it
ax1.set_title('Raw data with hot pixels')
ax1.imshow(Z,interpolation='nearest',origin='lower')
#Now we try to find the hot pixels
blurred_Z = scipy.ndimage.gaussian_filter(Z, sigma=2)
difference = Z - blurred_Z
ax2.set_title('Difference with hot pixels identified')
ax2.imshow(difference,interpolation='nearest',origin='lower')
threshold = 15
hot_pixels = np.nonzero((difference>threshold) | (difference<-threshold))
#Don't include the hot pixels that we found near the edge:
count = 0
for y,x in zip(hot_pixels[0],hot_pixels[1]):
if (x != 0) and (x != 199) and (y != 0) and (y != 199):
ax2.plot(x,y,'ro')
count += 1
print 'Detected %i hot/dead pixels out of 20.'%count
ax2.set_xlim(0,200); ax2.set_ylim(0,200)
plt.show()
And the output:

Basically, I think that the fastest way to deal with hot pixels is just to use a size=2 median filter. Then, poof, your hot pixels are gone and you also kill all sorts of other high-frequency sensor noise from your camera.
If you really want to remove ONLY the hot pixels, then substituting you can subtract the median filter from the original image, as I did in the question, and replace only these values with the values from the median filtered image. This doesn't work well at the edges, so if you can ignore the pixels along the edge, then this will make things a lot easier.
If you want to deal with the edges, you can use the code below. However, it is not the fastest:
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage
plt.figure(figsize=(10,5))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
#make some sample data
x = np.linspace(-5,5,200)
X,Y = np.meshgrid(x,x)
Z = 100*np.cos(np.sqrt(x**2 + Y**2))**2 + 50
np.random.seed(1)
for i in range(0,11):
#Add some hot pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=200,high=255)
#and dead pixels
Z[np.random.randint(low=0,high=199),np.random.randint(low=0,high=199)]= np.random.randint(low=0,high=10)
#And some hot pixels in the corners and edges
Z[0,0] =255
Z[-1,-1] =255
Z[-1,0] =255
Z[0,-1] =255
Z[0,100] =255
Z[-1,100]=255
Z[100,0] =255
Z[100,-1]=255
#Then plot it
ax1.set_title('Raw data with hot pixels')
ax1.imshow(Z,interpolation='nearest',origin='lower')
def find_outlier_pixels(data,tolerance=3,worry_about_edges=True):
#This function finds the hot or dead pixels in a 2D dataset.
#tolerance is the number of standard deviations used to cutoff the hot pixels
#If you want to ignore the edges and greatly speed up the code, then set
#worry_about_edges to False.
#
#The function returns a list of hot pixels and also an image with with hot pixels removed
from scipy.ndimage import median_filter
blurred = median_filter(Z, size=2)
difference = data - blurred
threshold = 10*np.std(difference)
#find the hot pixels, but ignore the edges
hot_pixels = np.nonzero((np.abs(difference[1:-1,1:-1])>threshold) )
hot_pixels = np.array(hot_pixels) + 1 #because we ignored the first row and first column
fixed_image = np.copy(data) #This is the image with the hot pixels removed
for y,x in zip(hot_pixels[0],hot_pixels[1]):
fixed_image[y,x]=blurred[y,x]
if worry_about_edges == True:
height,width = np.shape(data)
###Now get the pixels on the edges (but not the corners)###
#left and right sides
for index in range(1,height-1):
#left side:
med = np.median(data[index-1:index+2,0:2])
diff = np.abs(data[index,0] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[index],[0]] ))
fixed_image[index,0] = med
#right side:
med = np.median(data[index-1:index+2,-2:])
diff = np.abs(data[index,-1] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[index],[width-1]] ))
fixed_image[index,-1] = med
#Then the top and bottom
for index in range(1,width-1):
#bottom:
med = np.median(data[0:2,index-1:index+2])
diff = np.abs(data[0,index] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[0],[index]] ))
fixed_image[0,index] = med
#top:
med = np.median(data[-2:,index-1:index+2])
diff = np.abs(data[-1,index] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[height-1],[index]] ))
fixed_image[-1,index] = med
###Then the corners###
#bottom left
med = np.median(data[0:2,0:2])
diff = np.abs(data[0,0] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[0],[0]] ))
fixed_image[0,0] = med
#bottom right
med = np.median(data[0:2,-2:])
diff = np.abs(data[0,-1] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[0],[width-1]] ))
fixed_image[0,-1] = med
#top left
med = np.median(data[-2:,0:2])
diff = np.abs(data[-1,0] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[height-1],[0]] ))
fixed_image[-1,0] = med
#top right
med = np.median(data[-2:,-2:])
diff = np.abs(data[-1,-1] - med)
if diff>threshold:
hot_pixels = np.hstack(( hot_pixels, [[height-1],[width-1]] ))
fixed_image[-1,-1] = med
return hot_pixels,fixed_image
hot_pixels,fixed_image = find_outlier_pixels(Z)
for y,x in zip(hot_pixels[0],hot_pixels[1]):
ax1.plot(x,y,'ro',mfc='none',mec='r',ms=10)
ax1.set_xlim(0,200)
ax1.set_ylim(0,200)
ax2.set_title('Image with hot pixels removed')
ax2.imshow(fixed_image,interpolation='nearest',origin='lower',clim=(0,255))
plt.show()
Output:

Related

How to find max value between boundary in segmentation?

here's an issue: I want to find actual maximum width of boundary in segmented image with irregular shape (this)
below I post some example image I use for testing
So far I managed to obtain boundaries and skeleton line, but how do I measure distance between contours perpendicural to the skeleton line?
def get_skeleton(image_path):
im = cv2.imread(img_path , cv2.IMREAD_GRAYSCALE)
binary = im > filters.threshold_otsu(im)
skeleton = morphology.skeletonize(binary)
return skeleton
skeleton = get_skeleton(img_path)
plt.imshow(skeleton, cmap="gray")
def get_boundary(image_path):
reading_Img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
reading_Img = cv2.cvtColor(reading_Img,cv2.COLOR_BGR2RGB)
canny_Img = cv2.Canny(reading_Img,90,100)
contours,_ = cv2.findContours(canny_Img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
canvas = np.zeros_like(reading_Img)
boundary = cv2.drawContours(canvas , contours, -1, (255, 0, 0), 1)
return boundary
boundary = get_boundary(img_path)
plt.imshow(boundary)
Sample input image
EDIT:
First of all thanks for your answer, I would like to add more detail on what I am trying to do.
So I made a segmentation model which detects cracks in concrete (they can be any shape, vertical, horizontal, diagonal, etc) and now I need to identify their max-width and draw a line that shows where it occurs.
I found that the medial axis returns the distance from the boundary and by filtering max value I was able to get the width (see colab below) and its coordinate on the medial axis. Now I need to draw a line connecting the width between boundaries, but I have no idea on how to find the coordinates of such a line.
I thought of an algorithm which starts at the point of max distance occurrence on medial axis and expands until it finds a boundary, but I don't know how to implement it.
This image shows what I need to have:
After I find x and y of points I will be able to calculate euclidean distance between 2 points
dist=sqrt((y2-y1)^2+(x2-x1)^2)
Please look at my code in colab notebook: https://colab.research.google.com/drive/1NvEyfrxpKGJ1kxjP48PGNB_UUSp6f6Ze?usp=sharing
sample input images:
https://imgur.com/ewTmH8M
https://imgur.com/JRAQCke
https://imgur.com/7QQFfAv
Starting with your approach using the medial axis function you can
interpolate the direction of the axis at the point that was found
derive the orthogonal from the direction
look where the orthogonal reaches the boundary.
The example below shows the principle and works with your example images. But I'm sure there will be some boundary conditions that are not yet considered. I leave it to you to get it robust against real live data.
import cv2
import numpy as np
from skimage.morphology import medial_axis
from skimage import img_as_ubyte
delta = 3 # delta index for interpolation
# get crack
im = cv2.imread("img.png", cv2.IMREAD_GRAYSCALE)
rgb = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB) # rgb just for demo purpose
_, crack = cv2.threshold(im, 127, 255, cv2.THRESH_BINARY)
# get medial axis
medial, distance = medial_axis(im, return_distance=True)
med_img = img_as_ubyte(medial)
med_contours, _ = cv2.findContours(med_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(rgb, med_contours, -1, (255, 0, 0), 1)
med_pts = [v[0] for v in med_contours[0]]
# get point with maximal distance from medial axis
max_idx = np.argmax(distance)
max_pos = np.unravel_index(max_idx, distance.shape)
max_dist = distance[max_pos]
coords = np.array([max_pos[1], max_pos[0]])
print(f"max distance from medial axis to boundary = {max_dist} at {coords}")
# interpolate orthogonal of medial axis at coords
idx = next(i for i, v in enumerate(med_pts) if (v == coords).all())
px1, py1 = med_pts[(idx-delta) % len(med_pts)]
px2, py2 = med_pts[(idx+delta) % len(med_pts)]
orth = np.array([py1 - py2, px2 - px1]) * max(im.shape)
# intersect orthogonal with crack and get contour
orth_img = np.zeros(crack.shape, dtype=np.uint8)
cv2.line(orth_img, coords + orth, coords - orth, color=255, thickness=1)
gap_img = cv2.bitwise_and(orth_img, crack)
gap_contours, _ = cv2.findContours(gap_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
gap_pts = [v[0] for v in gap_contours[0]]
# determine the end points of the gap contour by negative dot product
n = len(gap_pts)
gap_ends = [
p for i, p in enumerate(gap_pts)
if np.dot(p - gap_pts[(i-1) % n], gap_pts[(i+1) % n] - p) < 0
]
print(f"Maximum gap found from {gap_ends[0]} to {gap_ends[1]}")
cv2.line(rgb, gap_ends[0], gap_ends[1], color=(0, 0, 255), thickness=1)
cv2.imwrite("test_out.png", rgb)
First thing I did was keep your images greyscale, there is no need to covert to 3 channels to find contours. Second was to convert the boundary image to a binary so that it is the same as the skeleton image. Then I simply added the two to get the both image.
I then clocked through each row (as you are looking for perpendicular distances) of the combined both image & looked for elements that where True i.e that are either boundary or skeleton pixels. I made a simplifying assumption at this point - I only searched for cases where there is a boundary followed by a single skeleton pixel then by a second boundary, I appreciate that this may not always be the case but I leave that particular headache for you to sort out.
After that its just a case of keeping track of he max & min distances recorded as you go through the image row by row. (edit: there maybe a cleaner way to do this than the way I've done it but hopefully you get the idea)
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage import filters
from skimage import morphology
def get_skeleton(image_path):
im = cv2.imread(image_path , cv2.IMREAD_GRAYSCALE)
binary = im > filters.threshold_otsu(im)
skeleton = morphology.skeletonize(binary)
return skeleton
def get_boundary(image_path):
reading_Img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
canny_Img = cv2.Canny(reading_Img, 90, 100)
contours,_ = cv2.findContours(canny_Img,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
canvas = np.zeros_like(reading_Img)
boundary = cv2.drawContours(canvas, contours, -1, (255, 0, 0), 1)
binary = boundary > filters.threshold_otsu(boundary)
return binary
skeleton = get_skeleton("LtqlM.png")
boundary = get_boundary("LtqlM.png")
both = skeleton + boundary
max_dist = 0
min_dist = 100000
for idx in range(both.shape[0]): # counting through rows
row = both[idx, :]
lines = np.where(row==True)[0]
if len(lines) == 3:
dist_1 = lines[1] - lines[0]
dist_2 = lines[2] - lines[1]
if (dist_1 > dist_2) and dist_1 > max_dist:
max_dist = dist_1
if (dist_2 > dist_1) and dist_2 > max_dist:
max_dist = dist_2
if (dist_1 < dist_2) and dist_1 < min_dist:
min_dist = dist_1
if (dist_2 < dist_1) and dist_2 < min_dist:
min_dist = dist_2
print("Maximum distance = ", max_dist)
print("Minimum distance = ", min_dist)
plt.imshow(both)
From the pixel with the largest distance, you can explore concentric square layers, until you find a background pixel. Then find the background pixel with the shortest Euclidean distance on the last layer. The second endpoint is symmetrical.

Using Gaussian Mixture model to create binary image of lungs

I am trying to segment lungs on a CT scan using Sklearn's Gaussian Mixture. The code is running fine but I
am getting a very jumbled up output.
Here is the code:
def GaussianMixtureSegmentaion(image, fill_lung_structures = True):
mask = np.zeros_like(image)
for i in range(image.shape[0]):
twod = image[i,:,:]
np.random.seed(1)
gmm = mixture.GaussianMixture(n_components=2, covariance_type='diag', max_iter=100, n_init = 1)
gmm.fit(twod)
label_data = gmm.predict(twod)
#label_data = label_data.reshape(twod.shape)
mask[i,:,:] = label_data
labels = measure.label(mask)
# Pick the pixel in the very corner to determine which label is air.
# Improvement: Pick multiple background labels from around the patient
# More resistant to "trays" on which the patient lays cutting the air
# around the person in half
background_label = labels[0,0,0]
#Fill the air around the person
mask[background_label == labels] = 2
# Method of filling the lung structures (that is superior to something like
# morphological closing)
if fill_lung_structures:
# For every slice we determine the largest solid structure
for i, axial_slice in enumerate(mask):
axial_slice = axial_slice - 1
labeling = measure.label(axial_slice)
l_max = largest_label_volume(labeling, bg=0)
if l_max is not None: #This slice contains some lung
mask[i][labeling != l_max] = 1
mask -= 1 #Make the image actual binary
mask = 1-mask # Invert it, lungs are now 1
# Remove other air pockets insided body
labels = measure.label(mask, background=0)
l_max = largest_label_volume(labels, bg=0)
if l_max is not None: # There are air pockets
mask[labels != l_max] = 0
return mask
The input is a 3d ct scan of the lungs. I iterate through each slice and use the GaussianMixture on each slice to try to cluster the slice into two parts (essentialy the light and dark areas). Am I using this in the wrong way?
Attached is a picture of an example of one of the slices of the lung:
I believe this should be easy to do - I just want to make it a binary picture.

Distorting Voronoi With Perlin Noise

According to this blog post, there are several ways to make voronoi cells a bit more dynamic looking. The one that I'm interested in is the first one that they mentioned:
The above illustration is the same Voronoi diagram as the one above, only now, Perlin noise has been used to distort which points belong to which cell. This creates some more interesting borders between between cells.
This is kind of easy to accomplish as long as you use a pixel-by-pixel (or tile-by-tile) assignment to the closest voronoi origin, since you can quite simply offset the actual coordinates of the pixel by Perlin noise - quite closely related to distorted Perlin noise.
I have seen similar ideas mentioned elsewhere, but not actual code showing how the perlin noise is "distorted" or "added" to the voronoi diagram. I've tried applying it through guessing, but have had no luck. The way my code is written, the distance between points is in the hundreds, while the perlin noise value is only from 0 to 1, so adding or subtracting the noise really doesn't do much. Multiplying just seems to break the voronoi. I've tried scaling the voronoi distance values to be between 0 and 1 or -1 to 1 and then applying to noise, but that didn't work either.
Below is an example of the voronoi diagram and perlin noise I am generating. I would appreciate any feedback or ability to point me in the right direction.
from PIL import Image
import random
import math
import numpy as np
import noise
wid = 500
hei = 250
image = Image.new("RGB",(wid,hei))
world_test = np.zeros(image.size)
scale = 100 # Number that determines at what distance to view the noisemap
octaves = 6 # the number of levels of detail you want you perlin noise to have
persistence = 0.5 # number that determines how much detail is added or removed at each octave (adjusts frequency)
lacunarity = 2.0 # number that determines how much each octave contributes to the overall shape (adjusts amplitude)
# Creates perlin noise
for x in range(wid):
for y in range(hei):
world_test[x][y] = ((noise.pnoise2(x/100,
y/100,
octaves = octaves,
persistence = persistence,
lacunarity = lacunarity,
repeatx = wid,
repeaty = hei,
base = 0)))
def generate_voronoi_diagram(width, height, num_cells):
image = Image.new("RGB", (width, height))
putpixel = image.putpixel
imgx, imgy = image.size
nx = []
ny = []
nr = []
ng = []
nb = []
#Go through number of cells
for i in range(num_cells):
#creat a point (x,y) and give it a specific color value
nx.append(random.randrange(imgx))
ny.append(random.randrange(imgy))
nr.append(random.randrange(256))
ng.append(random.randrange(256))
nb.append(random.randrange(256))
#go through each pixel in the image
for y in range(int(imgy)):
for x in range(int(imgx)):
dmin = math.hypot(imgx-1, imgy-1)
j = -1
#go through each cell
for i in range(num_cells):
# d is distance from each voronoi starting point
d = math.hypot((nx[i]-x), (ny[i]-y))
# apply perlin distort to d
d += world_test[x][y]
#if distance is less than the current min distance,
#set that point as the owner of this pixel and the new dmin
if d < dmin:
dmin = d
j = i
putpixel((x, y), (nr[j], ng[j], nb[j]))
image.save("Voronoi_example.png", "PNG")
image.show()
generate_voronoi_diagram(wid, hei, 30)
I think I found a solution to this question.
If you create separated noise maps for each coordinate(x and y) and add the value of that noise maps to each pixel during the calculation of d this value will get distorted by the maps.
I think I found this answer on this site but i'm not sure:
https://gamedev.stackexchange.com/questions/182582/how-to-distort-an-image-using-perlin-noise
Based on this site I modified your python code and it now should work.
from PIL import Image
import random
import math
import numpy as np
import noise
wid = 100
hei = 100
image = Image.new("RGB",(wid,hei))
world_test_x = np.zeros(image.size)
world_test_y = np.zeros(image.size)
scale = 0.1 # Number that determines at what distance to view the noisemap
octaves = 6 # the number of levels of detail you want you perlin noise to have
persistence = 0.5 # number that determines how much detail is added or removed at each octave (adjusts frequency)
lacunarity = 2.0 # number that determines how much each octave contributes to the overall shape (adjusts amplitude)
seed = 19829813472
mult = 50 # Strenght
# Creates perlin noise to distort x coordinates
for x in range(wid):
for y in range(hei):
world_test_x[x][y] = ((noise.pnoise2(x/100,
y/100,
octaves = octaves,
persistence = persistence,
lacunarity = lacunarity,
repeatx = wid,
repeaty = hei,
base = 0)))*mult
# Creates perlin noise to distort y coordinates
for x in range(wid):
for y in range(hei):
world_test_y[x][y] = ((noise.pnoise2((x+seed)/100,
(y+seed)/100,
octaves = octaves,
persistence = persistence,
lacunarity = lacunarity,
repeatx = wid,
repeaty = hei,
base = 0)))*mult
def generate_voronoi_diagram(width, height, num_cells):
image = Image.new("RGB", (width, height))
putpixel = image.putpixel
imgx, imgy = image.size
nx = []
ny = []
nr = []
ng = []
nb = []
nsize=[]
#Go through number of cells
for i in range(num_cells):
#creat a point (x,y) and give it a specific color value
nx.append(random.randrange(imgx))
ny.append(random.randrange(imgy))
nr.append(random.randrange(256))
ng.append(random.randrange(256))
nb.append(random.randrange(256))
nsize.append(0)
#go through each pixel in the image
for y in range(int(imgy)):
for x in range(int(imgx)):
dmin = math.hypot(imgx-1, imgy-1)
j = -1
#go through each cell
for i in range(num_cells):
# d is distance from each voronoi starting point
# each point gets its coordinates distorted so d also gets distorted
d = math.hypot((nx[i]-x+world_test_x[x][y]), (ny[i]-y+world_test_y[x][y]))
#if distance is less than the current min distance,
#set that point as the owner of this pixel and the new dmin
if d < dmin:
dmin = d
j = i
nsize[j]+=1
putpixel((x, y), (nr[j], ng[j], nb[j]))
image.save("Voronoi_example.png", "PNG")
image.show()
print(nsize)
print(nr)
generate_voronoi_diagram(wid, hei, 8)
Here is the Voronoi map
Here is the distorted Voronoi map

Pixels intensity values between two lines

I have created an alghoritm that detects the edges of an extruded colagen casing and draws a centerline between these edges on an image. Casing with a centerline.
Here is my code:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
img = cv2.imread("C:/Users/5.jpg", cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (1500, 1200))
#ROI
fromCenter = False
r = cv2.selectROI(img, fromCenter)
imCrop = img[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
#Operations on an image
_,thresh = cv2.threshold(imCrop,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
blur = cv2.GaussianBlur(opening,(7,7),0)
edges = cv2.Canny(blur, 0,20)
#Edges localization, packing coords into a list
indices = np.where(edges != [0])
coordinates = list(zip(indices[1], indices[0]))
num = len(coordinates)
#Separating into top and bot edge
bot_cor = coordinates[:int(num/2)]
top_cor = coordinates[-int(num/2):]
#Converting to arrays, sorting
a, b = np.array(top_cor), np.array(bot_cor)
a, b = a[a[:,0].argsort()], b[b[:,0].argsort()]
#Edges approximation by a 5th degree polynomial
min_a_x, max_a_x = np.min(a[:,0]), np.max(a[:,0])
new_a_x = np.linspace(min_a_x, max_a_x, imCrop.shape[1])
a_coefs = np.polyfit(a[:,0],a[:,1], 5)
new_a_y = np.polyval(a_coefs, new_a_x)
min_b_x, max_b_x = np.min(b[:,0]), np.max(b[:,0])
new_b_x = np.linspace(min_b_x, max_b_x, imCrop.shape[1])
b_coefs = np.polyfit(b[:,0],b[:,1], 5)
new_b_y = np.polyval(b_coefs, new_b_x)
#Defining a centerline
midx = [np.average([new_a_x[i], new_b_x[i]], axis = 0) for i in range(imCrop.shape[1])]
midy = [np.average([new_a_y[i], new_b_y[i]], axis = 0) for i in range(imCrop.shape[1])]
plt.figure(figsize=(16,8))
plt.title('Cross section')
plt.xlabel('Length of the casing', fontsize=18)
plt.ylabel('Width of the casing', fontsize=18)
plt.plot(new_a_x, new_a_y,c='black')
plt.plot(new_b_x, new_b_y,c='black')
plt.plot(midx, midy, '-', c='blue')
plt.show()
#Converting coords type to a list (plotting purposes)
coords = list(zip(midx, midy))
points = list(np.int_(coords))
mask = np.zeros((imCrop.shape[:2]), np.uint8)
mask = edges
#Plotting
for point in points:
cv2.circle(mask, tuple(point), 1, (255,255,255), -1)
for point in points:
cv2.circle(imCrop, tuple(point), 1, (255,255,255), -1)
cv2.imshow('imCrop', imCrop)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.destroyAllWindows()
Now I would like to sum up the intensities of each pixel in a region between top edge and a centerline (same thing for a region between centerline and a bottom edge).
Is there any way to limit the ROI to the region between the detected edges and split it into two regions based on the calculated centerline?
Or is there any way to access the pixels which are contained between the edge and a centerline based on theirs coordinates?
(It's my very first post here, sorry in advance for all the mistakes)
I wrote a somewhat naïve code to get masks for the upper and lower part. My code considers that the source image will be always like yours: with horizontal stripes.
After applying Canny I get this:
Then I run some loops through image array to fill unwanted areas of your image. This is done separately for upper and lower part, creating masks. The results are:
Then you can use this masks to sum only the elements you're interested in, using cv.sumElems.
import cv2 as cv
#open as grayscale image
src = cv.imread("colagen.png",cv.IMREAD_GRAYSCALE)
# apply canny and find contours
threshold = 100
canny_output = cv.Canny(src, threshold, threshold * 2)
# find mask for upper part
mask1 = canny_output.copy()
x, y = canny_output.shape
area = 0
for j in range(y):
area = 0
for i in range(x):
if area == 0:
if mask1[i][j] > 0:
area = 1
continue
else:
mask1[i][j] = 255
elif area == 1:
if mask1[i][j] > 0:
area = 2
else:
continue
else:
mask1[i][j] = 255
mask1 = cv.bitwise_not(mask1)
# find mask for lower part
mask2 = canny_output.copy()
x, y = canny_output.shape
area = 0
for j in range(y):
area = 0
for i in range(x):
if area == 0:
if mask2[-i][j] > 0:
area = 1
continue
else:
mask2[-i][j] = 255
elif area == 1:
if mask2[-i][j] > 0:
area = 2
else:
continue
else:
mask2[-i][j] = 255
mask2 = cv.bitwise_not(mask2)
# apply masks and calculate sum of elements in upper and lower part
sums = [0,0]
(sums[0],_,_,_) = cv.sumElems(cv.bitwise_and(src,mask1))
(sums[1],_,_,_) = cv.sumElems(cv.bitwise_and(src,mask2))
cv.imshow('src',src)
cv.imshow('canny',canny_output)
cv.imshow('mask1',mask1)
cv.imshow('mask2',mask2)
cv.imshow('masked1',cv.bitwise_and(src,mask1))
cv.imshow('masked2',cv.bitwise_and(src,mask2))
cv.waitKey()
Alternatives...
Probably there exist some function that fill the areas of the Canny result. I tried cv.fillPoly and cv.floodFill, but didn't manage to make them work easily... But maybe someone else can help you with that...
Edit
Found another way to get the masks with a cleaner code. Using numpy np.add.accumulate then np.clip, and then a modulo operation:
# first divide canny_output by 255 to get 0's and 1's, then perform
# an accumulate addition for each column. Thus you'll get +1 for every
# line, "painting" areas with 1, 2, 3...
a = np.add.accumulate(canny_output/255,0)
# clip values: anything greater than 2 becomes 2
a = np.clip(a, 0, 2)
# performe a modulo, to get areas alternating with 0 or 1; then multiply by 255
a = a%2 * 255
# convert to uint8
mask1 = cv.convertScaleAbs(a)
# to get mask2 (the lower mask) flip the array then do the same as above
a = np.add.accumulate(np.flip(canny_output,0)/255,0)
a = np.clip(a, 0, 2)
a = a%2 * 255
mask2 = cv.convertScaleAbs(np.flip(a,0))
This returns almost the same result. The border of the mask is a little bit different...

Detect the green lines in this image and calculate their lengths

Sample Images
The image can be more noisy at times where more objects intervene from the background. Right now I am using various techniques using the RGB colour space to detect the lines but it fails when there is change in the colour due to intervening obstacles from the background. I am using opencv and python.
I have read that HSV is better for colour detection and used but haven't been successful yet.
I am not able to find a generic solution to this problem. Any hints or clues in this direction would be of great help.
STILL IN PROGRESS
First of all, an RGB image consists of 3 grayscale images. Since you need the green color you will deal only with one channel. The green one. To do so, you can split the image, you can use b,g,r = cv2.split('Your Image'). You will get an output like that if you are showing the green channel:
After that you should threshold the image using your desired way. I prefer Otsu's thresholding in this case. The output after thresholding is:
It's obvious that the thresholded image is extremley noisy. So performing erosion will reduce the noise a little bit. The noise reduced image will be similar to the following:
I tried using closing instead of dilation, but closing preserves some unwanted noise. So I separately performed erosion followed by dilation. After dilation the output is:
Note that: You can do your own way in morphological operation. You can use opening instead of what I did. The results are subjective from
one person to another.
Now you can try one these two methods:
1. Blob Detection.
2. HoughLine Transform.
TODO
Try out these two methods and choose the best.
You should use the fact that you know you are trying to detect a line by using the line hough transform.
http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.html
When the obstacle also look like a line use the fact that you know approximately what is the orientation of the green lines.
If you don't know the orientation of the line use hte fact that there are several green lines with the same orientation and only one line that is the obstacle
Here is a code for what i meant:
import cv2
import numpy as np
# Params
minLineCount = 300 # min number of point alogn line with the a specif orientation
minArea = 100
# Read img
img = cv2.imread('i.png')
greenChannel = img[:,:,1]
# Do noise reduction
iFilter = cv2.bilateralFilter(greenChannel,5,5,5)
# Threshold data
#ret,iThresh = cv2.threshold(iFilter,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
iThresh = (greenChannel > 4).astype(np.uint8)*255
# Remove small areas
se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
iThreshRemove = cv2.morphologyEx(iThresh, cv2.MORPH_OPEN, se1)
# Find edges
iEdge = cv2.Canny(iThreshRemove,50,100)
# Hough line transform
lines = cv2.HoughLines(iEdge, 1, 3.14/180,75)
# Find the theta with the most lines
thetaCounter = dict()
for line in lines:
theta = line[0, 1]
if theta in thetaCounter:
thetaCounter[theta] += 1
else:
thetaCounter[theta] = 1
maxThetaCount = 0
maxTheta = 0
for theta in thetaCounter:
if thetaCounter[theta] > maxThetaCount:
maxThetaCount = thetaCounter[theta]
maxTheta = theta
# Find the rhos that corresponds to max theta
rhoValues = []
for line in lines:
rho = line[0, 0]
theta = line[0, 1]
if theta == maxTheta:
rhoValues.append(rho)
# Go over all the lines with the specific orientation and count the number of pixels on that line
# if the number is bigger than minLineCount draw the pixels in finaImage
lineImage = np.zeros_like(iThresh, np.uint8)
for rho in range(min(rhoValues), max(rhoValues), 1):
a = np.cos(maxTheta)
b = np.sin(maxTheta)
x0 = round(a*rho)
y0 = round(b*rho)
lineCount = 0
pixelList = []
for jump in range(-1000, 1000, 1):
x1 = int(x0 + jump * (-b))
y1 = int(y0 + jump * (a))
if x1 < 0 or y1 < 0 or x1 >= lineImage.shape[1] or y1 >= lineImage.shape[0]:
continue
if iThreshRemove[y1, x1] == int(255):
pixelList.append((y1, x1))
lineCount += 1
if lineCount > minLineCount:
for y,x in pixelList:
lineImage[y, x] = int(255)
# Remove small areas
## Opencv 2.4
im2, contours, hierarchy = cv2.findContours(lineImage,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_NONE )
finalImage = np.zeros_like(lineImage)
finalShapes = []
for contour in contours:
if contour.size > minArea:
finalShapes.append(contour)
cv2.fillPoly(finalImage, finalShapes, 255)
## Opencv 3.0
# output = cv2.connectedComponentsWithStats(lineImage, 8, cv2.CV_32S)
#
# finalImage = np.zeros_like(output[1])
# finalImage = output[1]
# stat = output[2]
# for label in range(output[0]):
# if label == 0:
# continue
# cc = stat[label,:]
# if cc[cv2.CC_STAT_AREA] < minArea:
# finalImage[finalImage == label] = 0
# else:
# finalImage[finalImage == label] = 255
# Show image
#cv2.imwrite('finalImage2.jpg',finalImage)
cv2.imshow('a', finalImage.astype(np.uint8))
cv2.waitKey(0)
and the result for the images:

Categories

Resources