How to detect white space in an image in opencv Python? - python

I have an image:
I have to detect the white space in between and basically partition it into two parts like this-
This is what I have coded so far... but it does detect only the black lines and not the middle white region.
import numpy as np
import cv2
img = cv2.imread('12.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
median = cv2.medianBlur(gray,5)
minLineLength = 250
maxLineGap = 100
lines = cv2.HoughLinesP(edges,0.3,np.pi/180,250,minLineLength,maxLineGap)
for line in lines:
x1,y1,x2,y2 =line[0]
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('newwhite.png',img)

I have a simple solution based on mean values along axis. I prefer scikit-image against opencv but you can use cv2.
import matplotlib.pyplot
import numpy as np
import skimage.io
import skimage.color
import skimage.morphology
import scipy.signal
img = skimage.io.imread('12.png')
gray = skimage.color.rgb2gray(img)
# Create some large dark area with the text, 10 is quite big!
eroded = skimage.morphology.erosion(gray, skimage.morphology.square(5))
# Compute mean values along axis 0 or 1
hist = np.mean(eroded, axis=0)
# Search large (here 3% of dimension size) and distant (here 20% of dimension size) peaks
scipy.signal.find_peaks(hist, width=len(hist)*3//100, distance=len(hist)*20//100)
Then each peak represents a white line in one dimension of your image

Related

How to create semi transparent pattern with python pil?

I have found some examples on this site. I would like to create, example 6. Can you help?
Create, as a numpy array, the image of the napkin. The squares have a size of 10×10. You may use the command numpy tile. Save the resulting image to a file.
In a standard grayscale image, black pixels are 0, gray pixels are 128, and white ones are 255:
import numpy as np
import matplotlib.pyplot as plt
# first create one 20 x 20 tile
a1 = np.zeros((20,20), dtype=int)
a1[10:20,0:10] = a1[0:10,10:20] = 128
a1[10:20,10:20] = 255
# fill the whole 100 x 100 area with the tiles
a = np.tile(a1, (5,5))
# plot and save
plt.imshow(a, 'Greys_r')
plt.savefig('pattern.png')
You could do this:
from PIL import Image
import numpy as np
# Make grey 2x2 image
TwoByTwo = np.full((2,2), 128, np.uint8)
# Change top-left to black, bottom-right to white
TwoByTwo[0,0] = 0
TwoByTwo[1,1] = 255
# Tile it
tiled = np.tile(TwoByTwo, (5,5))
# Make into PIL Image, rescale in size and save
Image.fromarray(tiled).resize((100,100), Image.NEAREST).save('result.png')

Can i iterate over an image while ignoring black pixels?

I have a segmented image using SLIC from scipy and for every superpixel i get an image where only that superpixel is colored and the rest of the image is black. I want to iterate ONLY on the colored pixels from that one superpixel.
I have tried using for loop like this:
for i in range(0,mask.shape[0]):
for j in range(0,mask.shape[1]):
x,y,z = each_segment[i][j] #gets the pixel RGB value
unique_pixel_array = [x,y,z] #creates a vector that holds those values for each pixel
if (unique_pixel_array != [0,0,0]):
print(unique_pixel_array)
This method is working however it is very inefficient considerin it is iterating over the entire image, and if i have a big image it will take a very long time to process for every superpixel.
Is there a faster and more efficient way to do this?
I will attach the whole code below , maybe you will get a better sense of the whole thing.
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = cv2.imread("image.png")
segments = slic(img_as_float(img),n_segments= 7 ,slic_zero=True,sigma =5)
fig = plt.figure("Superpixels -- %d segments" % (22))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(img, segments))
plt.axis("off")
plt.show()
for (sp,segVal) in enumerate (np.unique(segments)):
mask = np.zeros(img.shape[:2],dtype = "uint8")
mask[segments == segVal] = 255
each_segment = cv2.bitwise_and(img,img,mask=mask)
for i in range(0,mask.shape[0]):
for j in range(0,mask.shape[1]):
x,y,z = each_segment[i][j]
unique_pixel_array = [x,y,z]
print(unique_pixel_array)
cv2.imshow("Mask", mask)
cv2.imshow("Applied", cv2.bitwise_and(img, img, mask = mask))
cv2.waitKey(0)

Low Pass Filter for blurring an image

I'm trying to blur an image using fft by passing a low pass filter that I created but the output yields to be an image full of gray noise. I'm just trying to follow the basics here but it seems like there is something wrong with my implementation:
from scipy import fftpack
import numpy as np
import imageio
from PIL import Image, ImageDraw
image1 = imageio.imread('image.jpg',as_gray=True)
#convert image to numpy array
image1_np=np.array(image)
#fft of image
fft1 = fftpack.fftshift(fftpack.fft2(image1_np))
#Create a low pass filter image
x,y = image1_np.shape[0],image1_np.shape[1]
#size of circle
e_x,e_y=50,50
#create a box
bbox=((x/2)-(e_x/2),(y/2)-(e_y/2),(x/2)+(e_x/2),(y/2)+(e_y/2))
low_pass=Image.new("L",(image1_np.shape[0],image1_np.shape[1]),color=0)
draw1=ImageDraw.Draw(low_pass)
draw1.ellipse(bbox, fill=255)
low_pass_np=np.array(low_pass)
low_pass_fft=fftpack.fftshift(fftpack.fft2(low_pass))
#multiply both the images
filtered=np.multiply(fft1,low_pass_fft)
#inverse fft
ifft2 = abs(fftpack.ifft2(fftpack.ifftshift(filtered)))
#save the image
imageio.imsave('fft-then-ifft.png', ifft2.astype(np .uint8))
As mentioned in comments by Cris Luengo, there are a few things that need to be corrected:
The provided elliptical shape for the low-pass filter makes sense in the frequency-domain, so you shouldn't be computing its FFT.
The filter magnitude of 255 scales the results by the same amount. As you store such large values, the uint8 type wraps around to keep only the 8 least significant bits, resulting in something that looks like noise. This can be fixed by simply changing the value of the filter:
draw1.ellipse(bbox, fill=1)
After readjusting the scaling, there computed filtered may still get slightly out of the desired 0-255 range in some areas of the image. This creates wrap-around spots (black areas in regions surrounded by white pixels, white areas in regions surrounded by black pixels, or even gradient bands where the image goes from white to black to white). To avoid this is common to clip the values to the 0-255 range with the following:
ifft2 = np.real(fftpack.ifft2(fftpack.ifftshift(filtered)))
ifft2 = np.maximum(0, np.minimum(ifft2, 255))
After making these corrections, you should have the following code:
from scipy import fftpack
import numpy as np
import imageio
from PIL import Image, ImageDraw
image1 = imageio.imread('image.jpg',as_gray=True)
#convert image to numpy array
image1_np=np.array(image1)
#fft of image
fft1 = fftpack.fftshift(fftpack.fft2(image1_np))
#Create a low pass filter image
x,y = image1_np.shape[0],image1_np.shape[1]
#size of circle
e_x,e_y=50,50
#create a box
bbox=((x/2)-(e_x/2),(y/2)-(e_y/2),(x/2)+(e_x/2),(y/2)+(e_y/2))
low_pass=Image.new("L",(image1_np.shape[0],image1_np.shape[1]),color=0)
draw1=ImageDraw.Draw(low_pass)
draw1.ellipse(bbox, fill=1)
low_pass_np=np.array(low_pass)
#multiply both the images
filtered=np.multiply(fft1,low_pass_np)
#inverse fft
ifft2 = np.real(fftpack.ifft2(fftpack.ifftshift(filtered)))
ifft2 = np.maximum(0, np.minimum(ifft2, 255))
#save the image
imageio.imsave('fft-then-ifft.png', ifft2.astype(np .uint8))
And the following filtered image:

How to represent a binary image as a graph with the axis being height and width dimensions and the data being the pixels

I am trying to use Python along with opencv, numpy and matplotlib to do some computer vision for a robot which will use a railing to navigate. I am currently extremely stuck have run out of places to look. My current code is:
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('railings.jpg')
railing_image = np.copy(image)
resized_image = cv2.resize(railing_image,(881,565))
gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blur, 85, 255)
cv2.imshow('test',canny)
image_array = np.array(canny)
ncols, nrows = image_array.shape
count = 0
scan = np.array
for x in range(0,image_array.shape[1]):
for y in range(0,image_array.shape[0]):
if image_array[y, x] == 0:
count += 1
scan = [scan, count]
print(scan)
plt.plot([0, count])
plt.axis([0, nrows, 0, ncols])
plt.show()
cv2.waitKey(0)
I am using a canny image which is stored in an array of 1's and 0's, the image I need represented is
The final result should look something like the following image.
I've tried using a histogram function but I've only managed to get that to output essentially a count of the number of times a 1 or 0 appears.
If anyone could help me or point me in the right direction that would produce a graph that represents the image pixels within a graph of height and width dimensions.
Thank you
I'm not sure how general this is but you could just use numpy argmax to get location of the maximum (like this) in your case. You should avoid loops as this will be very slow, better to use numpy functions. I've imported your image and used the cutoff criterion that 200 or more in the yellow channel is railing,
import cv2
import numpy as np
import matplotlib.pyplot as plt
#This loads the canny image you uploaded
image = cv2.imread('uojHJ.jpg')
#Trim off the top taskbar
trimimage = image[100:, :,0]
#Use argmax with 200 cutoff colour in one channel
maxindex = np.argmax(trimimage[:,:]>200, axis=0)
#Plot graph
plt.plot(trimimage.shape[0] - maxindex)
plt.show()
Where this looks as follows:

Adaptive Histogram Equalization in Python

I am trying to implement adaptive histogram equalization in python. I take an image and split it into smaller regions and then apply the traditional histogram equalization to it. I then combine the smaller images into one and obtain a final resultant image. The final image appears to be very blocky in nature and has different contrast levels for each individual region. Is there a way I could maintain a uniform contrast for each individual image so that it looks like a single image instead of smaller images stitched together.
import cv2
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import imsave
from scipy import ndimage
from scipy import misc
import scipy.misc
import scipy
import image_slicer
from image_slicer import join
from PIL import Image
img = 'watch.png'
num_tiles = 25
tiles = image_slicer.slice(img, num_tiles)
for tile in tiles:
img = scipy.misc.imread(tile.filename)
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf *hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'g')
plt.hist(img.flatten(),256,[0,256], color = 'g')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
cdf_m = np.ma.masked_equal(cdf,0)
cdf_o = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_o,0).astype('uint8')
img3 = cdf[img]
cv2.imwrite(tile.filename,img3)
tile.image = Image.open(tile.filename
image = join(tiles)
image.save('watch-join.png')
I reviewed the actual algorithm and came up with the following implementation. I am sure there is a better way to do this. Any suggestions are appreciated.
import numpy as np
import cv2
img = cv2.imread('watch.png',0)
print img
img_size=img.shape
print img_size
img_mod = np.zeros((600, 800))
for i in range(0,img_size[0]-30):
for j in range(0,img_size[1]-30):
kernel = img[i:i+30,j:j+30]
for k in range(0,30):
for l in range(0,30):
element = kernel[k,l]
rank = 0
for m in range(0,30):
for n in range(0,30):
if(kernel[k,l]>kernel[m,n]):
rank = rank + 1
img_mod[i,j] = ((rank * 255 )/900)
im = np.array(img_mod, dtype = np.uint8)
cv2.imwrite('target.png',im)

Categories

Resources