Cropping excess padding from binary images keeping a constant margin - python

I'm trying to crop some binary images. The images have a black background and white silhouette. I want to crop all of my images, removing any excess background. I've tried doing this using the function below, however, the output images tend to also crop the edges of the silhouette, whilst I would like to keep these. I haven't been successful in finding a way to do this. Any ideas on how this could be done?
example images
def crop_image(img,tol):
mask = img > tol
return img[np.ix_(mask.any(1),mask.any(0))]

You can get the coordinates from cv2.findNonZero() function, and cv2.boundingRect()
import matplotlib.pyplot as plt
import numpy as np
import cv2
def downloadImage(URL):
"""Downloads the image on the URL, and convers to cv2 RGB format"""
from io import BytesIO
from PIL import Image as PIL_Image
import requests
response = requests.get(URL)
image = PIL_Image.open(BytesIO(response.content))
return cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
URL = "https://i.stack.imgur.com/WgnXW.jpg"
# Read image
img = downloadImage(URL)
# crop blank frame
initialImage = img[11:2330, 11:2208]
fig, ax = plt.subplots(1, 3)
ax[0].imshow(initialImage)
ax[0].set_title('Initial Image')
# crop the empty space
coords = cv2.findNonZero(cv2.cvtColor(initialImage, cv2.COLOR_BGR2GRAY))
x, y, w, h = cv2.boundingRect(coords)
cropedIMag = initialImage[y:y+h, x:x+w]
ax[1].imshow(cropedIMag)
ax[1].set_title('Cropped Image')
# Add an empty frame to the image
extraPixels = 100
paddedImag = np.pad(cropedIMag, ((extraPixels, extraPixels),(extraPixels, extraPixels),(0,0)), 'constant')
ax[2].imshow(paddedImag)
ax[2].set_title('Cropped \nand padded Image')
plt.show()

Related

How can I display the two images inside of this function?

I provide the following function to flip the image and its corresponding mask. Now I want to display two images and save them. How can I do that? I need to know this method more than others because it is related to my research work.
import torch
import torchvision.transforms as transforms
from torchvision.utils import save_image
import cv2
from matplotlib import pyplot as plt
import numpy as np
import numpy as np
import cv2
## define functions
def t_random(min=0, max=1):
return min + (max - min) * np.random.rand()
def t_randint(min, max):
return np.random.randint(low=min, high=max)
class augCompose(object):
def __init__(self, transforms=None):
self.transforms = transforms
def __call__(self, img, mask):
if self.transforms is not None:
for op, prob in self.transforms:
if t_random() <= prob:
img, mask = op(img, mask)
return img, mask
def RandomFlip(img, mask, FLIP_LEFT_RIGHT=True, FLIP_TOP_BOTTOM=True):
if FLIP_LEFT_RIGHT and t_random() < 0.5:
img = cv2.flip(img, 1)
mask = cv2.flip(mask, 1)
if FLIP_TOP_BOTTOM and t_random() < 0.5:
img = cv2.flip(img, 0)
mask = cv2.flip(mask, 0)
return img, mask
# This function will read the image using its path with opencv
def Load_Image(Path):
img = cv.imread(Path)[:,:,::-1] # opencv read the images in BGR format
# so we use [:,:,::-1] to convert from BGR to RGB
return img
## Load the image
img = Load_Image("..........\\6192.jpg")
msk = Load_Image("...........\\6192.bmp")
## Call the function
RandomFlip = RandomFlip(img, msk)
## how can I display these two flipped images?
you can plot them with just plt as follows:
# make axes: images are arranged over one row
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
# you can change the size of the plot is required
fig_size = (12, 10) # example base size
fig.set_figwidth(fig_size[0] * 2) # where 2 is #columns
fig.set_figheight(fig_size[1] * 1) # 1 is #rows
# plot the images on each axis
ax1.imshow(img)
# additionally you can specify a colormap, e.g.
ax2.imshow(msk, cmap='binary')
# you can set the title, axis labels, legends, as usual, e.g.
ax1.set_title('Image')
ax2.set_title('Mask')
# removes borders
fig.tight_layout()
# finally show the images
plt.show()
If you have more than two images you can rearrange them easily, in a grid-like fashion, by setting nrows and ncols.
You can display the images using either OpenCV or Matplotlib.
If you wish to use OpenCV:
## Load the image
img = Load_Image("..........\\6192.jpg")
msk = Load_Image("...........\\6192.bmp")
## Call the function
flp_img, flp_msk = RandomFlip(img, msk)
## Display the image
cv2.imshow('image',flp_img)
cv2.waitKey(0)
## Display the mask
cv2.imshow('mask',flp_msk)
cv2.waitKey(0)
## Saving the images
cv2.imwrite("flp_img", flp_img)
cv2.imwrite("flp_msk", flp_msk)
If you wish to use Matplotlib you can use the plt.imshow function, but we first need to recolor the image from BGR (the format used by OpenCV) to RGB (the format used by Matplotlib). We will use the same code for saving the images:
## Load the image
img = Load_Image("..........\\6192.jpg")
msk = Load_Image("...........\\6192.bmp")
## Call the function
flp_img, flp_msk = RandomFlip(img, msk)
## Display the image
plt.imshow(cv2.cvtColor(flp_img, cv2.COLOR_BGR2RGB))
plt.show()
## Display the mask
plt.imshow(cv2.cvtColor(flp_msk, cv2.COLOR_BGR2RGB))
plt.show()
## Saving the images
cv2.imwrite("flp_img", flp_img)
cv2.imwrite("flp_msk", flp_msk)

Images are changed to different colors (with pillow), how to get it back to the original colors?

I am trying to find the dominant color in a frame in a video. This works well, however, my frames are somehow converted into different colors. Yellow/pink becomes blue/purple-ish, but black and white stay the same (thus it is not the inverted colors).
Does anyone know where it comes from and how I can change it so that the original colors are kept? This is my code:
import cv2
from sklearn.cluster import KMeans
from collections import Counter
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
video = cv2.VideoCapture('video.mp4')
def show_blurred_image(image, dominant_color):
frame_to_blur = Image.fromarray(image)
blurred_frame = cv2.blur(image, (200,200))
blurred_frame = Image.fromarray(blurred_frame)
plt.subplot(121),plt.imshow(frame_to_blur),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blurred_frame),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
R = round(dominant_color[0])
G = round(dominant_color[1])
B = round(dominant_color[2])
custom_color = '#%02x%02x%02x' % (R, G, B)
print(custom_color)
rect = patches.Rectangle((1620,0),300,1080,linewidth=1,
fill = True,
edgecolor=custom_color,
facecolor=custom_color)
ax = plt.gca()
ax.add_patch(rect)
plt.show()
def get_dominant_color(image, k=4, image_processing_size = None):
"""
takes an image as input
returns the dominant color of the image as a list
dominant color is found by running k means on the
pixels & returning the centroid of the largest cluster
processing time is sped up by working with a smaller image;
this resizing can be done with the image_processing_size param
which takes a tuple of image dims as input
>>> get_dominant_color(my_image, k=4, image_processing_size = (25, 25))
[56.2423442, 34.0834233, 70.1234123]
"""
#resize image if new dims provided
if image_processing_size is not None:
image = cv2.resize(image, image_processing_size,
interpolation = cv2.INTER_AREA)
#reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
#cluster and assign labels to the pixels
clt = KMeans(n_clusters = k)
labels = clt.fit_predict(image)
#count labels to find most popular
label_counts = Counter(labels)
#subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
return list(dominant_color)
dominant_colors = []
show_frame = 10
frame_nb = 0
while(video.isOpened()):
ret, frame = video.read()
if ret == True:
if (frame_nb == show_frame):
dominant_color = get_dominant_color(frame)
show_blurred_image(frame, dominant_color)
frame_nb += 1
else:
break
video.release()
cv2.destroyAllWindows()
OpenCV loads images in a BGR format, while PIL and matplotlib works with the RGB format. If you want to use the libraries together, you need to convert the images in the right color spaces.
In your case :
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

Can i iterate over an image while ignoring black pixels?

I have a segmented image using SLIC from scipy and for every superpixel i get an image where only that superpixel is colored and the rest of the image is black. I want to iterate ONLY on the colored pixels from that one superpixel.
I have tried using for loop like this:
for i in range(0,mask.shape[0]):
for j in range(0,mask.shape[1]):
x,y,z = each_segment[i][j] #gets the pixel RGB value
unique_pixel_array = [x,y,z] #creates a vector that holds those values for each pixel
if (unique_pixel_array != [0,0,0]):
print(unique_pixel_array)
This method is working however it is very inefficient considerin it is iterating over the entire image, and if i have a big image it will take a very long time to process for every superpixel.
Is there a faster and more efficient way to do this?
I will attach the whole code below , maybe you will get a better sense of the whole thing.
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = cv2.imread("image.png")
segments = slic(img_as_float(img),n_segments= 7 ,slic_zero=True,sigma =5)
fig = plt.figure("Superpixels -- %d segments" % (22))
ax = fig.add_subplot(1, 1, 1)
ax.imshow(mark_boundaries(img, segments))
plt.axis("off")
plt.show()
for (sp,segVal) in enumerate (np.unique(segments)):
mask = np.zeros(img.shape[:2],dtype = "uint8")
mask[segments == segVal] = 255
each_segment = cv2.bitwise_and(img,img,mask=mask)
for i in range(0,mask.shape[0]):
for j in range(0,mask.shape[1]):
x,y,z = each_segment[i][j]
unique_pixel_array = [x,y,z]
print(unique_pixel_array)
cv2.imshow("Mask", mask)
cv2.imshow("Applied", cv2.bitwise_and(img, img, mask = mask))
cv2.waitKey(0)

How to get histogram of intensity of individual masked cells in an image?

OK so newbie here that has been working on a set of homework problems with the original post here: How do I make a mask from one image and then transfer it to another?
. The original idea was to take the DAPI image (grey image) and apply it as a mask to the NPM1 (green) image. After implementing the suggested code from HansHirse (thanks!) along with some other code I had been making for the homework problem I finally got a working histogram of all compatible cells in the image. The "compatibility" bit is that any cells touching the border weren't supposed to be counted. However, I still need to find a way to get histograms of each individual cell as well. I've attached the original images from the post too:
To do this, I tried blob_doh and one other method to get segmented regions of each cell but have no idea as to how I can apply these coordinates to an image for the histogram.
PS. The code is a bit messy. I segmented the code such that the blob_doh is near the bottom and the other method is also its own separate piece at the very bottom. Sorry!
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import cv2
import mahotas as mh
import scipy
from scipy import ndimage
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import label2rgb
# Read image into numpy array
image = cv2.imread("NOTREATDAPI.jpg",0)
dna = np.array(image) # must be gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
epsilon = np.array(cleared) #final masked DAPI product
print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Load and reset original images
image = cv2.imread("NOTREATDAPI.jpg",0) #The DAPI Image
image1 = cv2.imread("NOTREATNPM1.jpg",0) #The NPM1 Image
print("Original DAPI Image");plt.imshow(image);plt.show() #The DAPI Image
print("Original NPM1 Image");plt.imshow(image1);plt.show() #The NPM1 Image
# Create an array of bool of same shape as image
maskAboveThreshold = epsilon > 0 #Use mask array from above - include only values above non-masked zeros
print("Final Masked Image of NPM1"); plt.imshow(image1 *
maskAboveThreshold, cmap='gray')
plt.show()
True_NPM1= image1 * maskAboveThreshold # Final masked version of NPM1 set back to grayscale
# Create a mask using the DAPI image and binary thresholding at 25
_, mask = cv2.threshold(True_NPM1, 1, 255, cv2.THRESH_BINARY)
# Do some morphological opening to get rid of small artifacts
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN,
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15)))
# Calculate the histogram using the NPM1 image and the obtained binary
mask
hist = cv2.calcHist([image1], [0], mask, [256], [0, 256])
# Show bar plot of calculated histogram
plt.bar(np.arange(256), np.squeeze(hist))
plt.show()
# Show mask image
plt.imshow(mask)
plt.show()
#blob_doh way of segmenting the cells ------
import cv2 as cv
from PIL import Image, ImageDraw
image10 = np.array(Image.open("OXALIDAPI.jpg"))
plt.imshow(image10)
#Convert to gaussian image with thresholds
image10 = cv2.imread("OXALIDAPI.jpg",0)
dna = np.array(image10) # gray-scale image
plt.show()
# Remove extraneous artifacts from image; set the threshold
dnaf = ndimage.gaussian_filter(dna, 8) #gaussian filter for general image
T = mh.thresholding.otsu(dnaf) # set threshold via mahotas otsu thresholding
theta=np.array(dnaf > T) #setting mask of values in image to calculated otsu threshold
cleared = clear_border(theta) #removes all cells that are in contact with the image border
image = np.array(cleared) #final masked DAPI product
#print("DAPI MASK USING GAUSSIAN FILTER AND OTSU THRESHOLDING");
plt.imshow(epsilon)
plt.show()
# Convert image to grayscale
image_gray = rgb2gray(image)
plt.imshow(image_gray,cmap="gray")
def plot_blobs(img,blobs):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.imshow(img, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r*1.25, color="red", linewidth=1, fill=False)
ax.add_patch(c)
# blob_doh
blobs_doh = blob_doh(image_gray, min_sigma=10, max_sigma=256,
threshold=.025)
plot_blobs(image,blobs_doh)
#get blob coordinates
def filter_blobs(blobs,r_cutoff=5):
new_blobs = []
for b in blobs:
if b[2] > r_cutoff:
new_blobs.append(b)
return new_blobs
new_blobs = filter_blobs(blobs_doh)
#plot_blobs(image,new_blobs)
print(new_blobs)
#Other method of segmenting cells. maybe useful?
yeta = cv2.imread("NOTREATDAPI.jpg",0)
image = np.array(yeta)
# apply threshold
dnaf = ndimage.gaussian_filter(image, 8)
T = mh.thresholding.otsu(dnaf) # set threshold
plt.imshow(dnaf > T)
epsilon=np.array(dnaf > T)
plt.show()
# remove artifacts connected to image border
cleared = clear_border(epsilon)
# label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# take regions with large enough areas
if region.area >= 50:
# draw rectangle around individual cells
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=0.5)
ax.add_patch(rect)
#ax.set_axis_off()
#plt.tight_layout()
plt.show()
howzer=np.array(image_label_overlay)
What you are looking for is cv2.connectedComponents. Basically, once you have the binary mask that separate the cells, you try to label each connected component of the mask as one cell:
# I choose OTSU instead of binary, but they are not much different in this case
_, mask = cv2.threshold(dapi, 25, 255, cv2.THRESH_OTSU)
# compute the connected component
labels, markers = cv2.connectedComponents(mask)
# load 2nd image in grayscale
# as your 2nd image is only green/black
npm1 = cv2.imread('npm1.jpg', cv2.IMREAD_GRAYSCALE)
# for you image (and usually), labels[0] is the background
for label in labels[1:]:
# compute the histogram over the entire 256 levels of intensity
hist, bins = np.histogram(npm1[markers==label], bins=range(256))
# do whatever you like to hist
# note that bins=range(256) and hist only have 255 values
plt.bar(bins[1:], hist)
plt.title('cell number: {:}'.format(label))
So for example the histogram of the first and second cells:
And the cell markers are:

Load RGB images as a ndarray, and plot out with color change

I tried to load and plot several images(jpg) from a local folder, and found out the plotting images changed color. The color channel correction between OpenCV and Matplotlib has been done.
How did it happen? How to correct the color?
Thanks.
import cv2
from matplotlib import pyplot as plt
import numpy as np
import os
folder = 'New_Web_Image'
img_list = np.empty([0,32,32,3])
for file in os.listdir(folder):
img = cv2.imread(os.path.join(folder, file))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (32,32), interpolation = cv2.INTER_AREA)
#plt.imshow(img)
#plt.show()#If I plot the image here, the image show right color
img_list = np.append(img_list, [img[:, :, :]], axis=0)
print(img_list.shape) #lists shape check right
plt.imshow(img_list[0])
plt.show() #If I plor the image from the lists, the color changed
Here is the image result in the loop:
Here is the image from ndarray "lists":
It's not a color correction. OpenCV orders layers as BGR, rather than the RGB we usually expect. As long as you're staying with the OpenCV world, that should be fine. But anding and image loaded via cv2.imread() to matplotlib.pyplot steps outside that world, which is why you need
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
to get the layers reordered first.
A bunch of other interesting (and possibly useful) conversions are possible. See http://docs.opencv.org/3.2.0/df/d9d/tutorial_py_colorspaces.html
To halfly answer my own question, I've corrected the colors by
loading the images with a ndarray output first,
and then changing color & size, and plotting the images
Updated code:
import cv2
from matplotlib import pyplot as plt
import numpy as np
import os
# Load the images
folder = 'New_Web_Image'
img_list = []
for file in os.listdir(folder):
img = cv2.imread(os.path.join(folder, file))
if img is not None:
img_list.append(img)
img_list = np.asarray(img_list)
# Plot the images
n = img_list.shape[0]
fig, axs = plt.subplots(1, n, figsize=(20,5), dpi=80)
for i in range(n):
img = img_list[i]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (32,32), interpolation = cv2.INTER_AREA)
axs[i].imshow(img)
plt.show()
Another half question, that "how did the color change in previous code?" is still unclear to me.
Thanks in advance to who would suggest.

Categories

Resources