Histogram of a region of an image - python

I want to get the histogrm of a region in a numpy image in python. I found a solution on how to use a mask here.
this solution didnt help me because if i use it I will loose the real number of black pixels. Also, the region that i want to get is not necessarly rectangular.

To compute histogram use np.histogram function. It returns a histogram and bins. So you can store the results and work with it:
hist, bins = np.histogram(arr, bins=bins, range=range)
If you want to plot the results, you can use plt.bar after applying np.histogramsimply passing bins and hist:
plt.bar(bins, hist)
Another option is using matplotlib plt.hist it computes the histogram and plots it from a raw data:
plt.hist(arr, bins=bins)
Here is the complete example for the histogram of image region of any shape:
Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import face
from PIL import Image, ImageDraw
# Let's create test image with different colors
img = np.zeros((300, 300, 3), dtype=np.uint8)
img[0:150, 0:150] = [255, 0, 0]
img[0:150, 150:] = [0, 255, 0]
img[150:, :150] = [0, 0, 255]
img[150:, 150:] = [255, 255, 255]
# define our function for preparing mask
def prepare_mask(polygon, image):
"""Returns binary mask based on input polygon presented as list of coordinates of vertices
Params:
polygon (list) - coordinates of polygon's vertices. Ex: [(x1,y1),(x2,y2),...] or [x1,y1,x2,y2,...]
image (numpy array) - original image. Will be used to create mask of the same size. Shape (H, W, C).
Output:
mask (numpy array) - boolean mask. Shape (H, W).
"""
# create an "empty" pre-mask with the same size as original image
width = image.shape[1]
height = image.shape[0]
mask = Image.new('L', (width, height), 0)
# Draw your mask based on polygon
ImageDraw.Draw(mask).polygon(polygon, outline=1, fill=1)
# Covert to np array
mask = np.array(mask).astype(bool)
return mask
def compute_histogram(mask, image):
"""Returns histogram for image region defined by mask for each channel
Params:
image (numpy array) - original image. Shape (H, W, C).
mask (numpy array) - boolean mask. Shape (H, W).
Output:
list of tuples, each tuple (each channel) contains 2 arrays: first - computed histogram, the second - bins.
"""
# Apply binary mask to your array, you will get array with shape (N, C)
region = image[mask]
red = np.histogram(region[..., 0].ravel(), bins=256, range=[0, 256])
green = np.histogram(region[..., 1].ravel(), bins=256, range=[0, 256])
blue = np.histogram(region[..., 2].ravel(), bins=256, range=[0, 256])
return [red, green, blue]
def plot_histogram(histograms):
"""Plots histogram computed for each channel.
Params:
histogram (list of tuples) - [(red_ch_hist, bins), (green_ch_hist, bins), (green_ch_hist, bins)]
"""
colors = ['r', 'g', 'b']
for hist, ch in zip(histograms, colors):
plt.bar(hist[1][:256], hist[0], color=ch)
# Create some test masks
red_polygon = [(50, 100), (50, 50), (100, 75)]
green_polygon = [(200, 100), (200, 50), (250, 75)]
blue_polygon = [(50, 250), (50, 200), (100, 225)]
white_polygon = [(200, 250), (200, 200), (250, 225)]
polygons = [red_polygon, green_polygon, blue_polygon, white_polygon]
for polygon in polygons:
mask = prepare_mask(polygon, img)
histograms = compute_histogram(mask, img)
# Let's plot our test results
plt.figure(figsize=(10, 10))
plt.subplot(221)
plt.imshow(img)
plt.title('Image')
plt.subplot(222)
plt.imshow(mask, cmap='gray')
plt.title('Mask')
plt.subplot(223)
plot_histogram(histograms)
plt.title('Histogram')
plt.show()
Output:
The final test on raccoon:
Code:
raccoon = face()
polygon = [(200, 700), (150, 600), (300, 500), (300, 400), (400, 500)]
mask = prepare_mask(polygon, raccoon)
histograms = compute_histogram(mask, raccoon)
plt.figure(figsize=(10, 10))
plt.subplot(221)
plt.imshow(raccoon)
plt.title('Image')
plt.subplot(222)
plt.imshow(mask, cmap='gray')
plt.title('Mask')
plt.subplot(223)
plot_histogram(histograms)
plt.title('Histogram')
plt.show()
Output:

Let's define (x,y)coordinates of 5 points (p0, p1, p2, p3, p4) as the corners of the region. We can make a mask using opencv fillPoly function after concatinating the points as np array. The pixel values of image can be filtered with this mask. I use matplotlib's histogram. Opencv and numpy have also histogram functions.
import cv2
import numpy as np
from matplotlib import pyplot as plt
img_x = img.copy()
pts = np.concatenate((p0, p1, p2, p3, p4)).reshape((-1, 1, 2))
cv2.fillPoly(img_x , [pts], (255, 255, 255))
n, bins, patches = plt.hist(img[img_x == (255, 255, 255)], 256, [0, 256])

Related

Unwanted image shape cv2

iam trying to generate images and train a neural network,
i make these images via making an empty np array
image = np.empty((400, 400), np.int32) to make the shape on it
image = cv2.circle(image, (CordX, CordY), rad, color, thickness)
I don't know how to make it 3 dimensional (image.shape op currently is 400, 400) and the output should be 400, 400, 3
To convert gray image to RGB, you can simply repeat one channel 3 times:
# Create additional axis
image = image[:, :, None]
print(image.shape) # (400, 400, 1)
# Repeat 3 times along axis 2
image = image.repeat(3, 2)
print(image.shape) # (400, 400, 3)
Full example:
image = np.empty((400, 400), np.int32)
image = cv2.circle(image, (200, 200), 20, 100, 3)
image = image[:, :, None].repeat(3, 2)
import matplotlib.pyplot as plt
plt.imshow(image)
plt.show()
Alternatively, you could create RGB image in the first place:
image = np.empty((400, 400, 3), np.int32)
image = cv2.circle(image, (200, 200), 20, (100, 100, 100), 3)
This is the same image as above.

Draw image in rectangle python

I have plotted a rectangle using matplotlib and would like to place an image in it as shown in the image below. Does anyone have an idea how I can achieve this in python?
Here is one way using Python/OpenCV/Numpy. Do a perspective warp of the panda image using its 4 corners and the 4 corners of the rectangle. Then make a mask of the excess regions, which are black in the warped image. Finally, blend the warped image and background image using the mask.
Input:
Graph Image:
import numpy as np
import cv2
import math
# read image to be processed
img = cv2.imread("panda.png")
hh, ww = img.shape[:2]
# read background image
bck = cv2.imread("rectangle_graph.png")
hhh, www = bck.shape[:2]
# specify coordinates for corners of img in order TL, TR, BR, BL as x,y pairs
img_pts = np.float32([[0,0], [ww-1,0], [ww-1,hh-1], [0,hh-1]])
# manually pick coordinates of corners of rectangle in background image
bck_pts = np.float32([[221,245], [333,26], [503,111], [390,331]])
# compute perspective matrix
matrix = cv2.getPerspectiveTransform(img_pts,bck_pts)
#print(matrix)
# change black and near-black to graylevel 1 in each channel so that no values
# inside panda image will be black in the subsequent mask
img[np.where((img<=[5,5,5]).all(axis=2))] = [1,1,1]
# do perspective transformation setting area outside input to black
img_warped = cv2.warpPerspective(img, matrix, (www,hhh), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))
# make mask for area outside the warped region
# (black in image stays black and rest becomes white)
mask = cv2.cvtColor(img_warped, cv2.COLOR_BGR2GRAY)
mask = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]
mask = cv2.merge([mask,mask,mask])
mask_inv = 255 - mask
# use mask to blend between img_warped and bck
result = ( 255 * (bck * mask_inv + img_warped * mask) ).clip(0, 255).astype(np.uint8)
# save images
cv2.imwrite("panda_warped.png", img_warped)
cv2.imwrite("panda_warped_mask.png", mask)
cv2.imwrite("panda_in_graph.png", result)
# show the result
cv2.imshow("warped", img_warped)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Warped Input:
Mask:
Result:
You can use imshow to place the image at a given position. And add a transform to give the image the same rotation as the rectangle.
To steer away from possible copyright issues, the following code uses an image from wikipedia (author: Fernando Revilla):
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.patches import Rectangle
file = 'https://upload.wikimedia.org/wikipedia/commons/thumb/8/82/Giant_Panda_Tai_Shan.JPG/1200px-Giant_Panda_Tai_Shan.JPG'
img = plt.imread(file, format='jpg')
fig, ax = plt.subplots()
# suppose a rectangle was drawn onto the plot
x, y = 20, 30
width, height = 12, 9
angle = 70
rect = Rectangle((x, y), width, height, angle=angle, ec='black', fc='none', lw=3)
ax.add_patch(rect)
# draw the image using the rectangles position and rotation
tr = transforms.Affine2D().translate(-x, -y).rotate_deg(angle).translate(x, y)
ax.imshow(img, extent=[x, x + width, y, y + height], transform=tr + ax.transData)
ax.set_aspect('equal') # keep right angles
ax.relim()
ax.autoscale()
plt.show()

In python, with openCV, how to set correctly the maxRadius parameter of HoughCircles?

So I have this image (480, 640, 3):
And I want to do detect different circles in it (mainly the red one, but you don't care).
Here is my code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('img0.png')
print(img.shape)
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:, :, 1]
print(img.shape)
circles = circles = cv2.HoughCircles(sat, cv2.HOUGH_GRADIENT, 1, minDist=30, maxRadius=500)
print(circles)
if circles is not None: # code stolen from here : https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(img, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(img, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
plt.imshow(img)
plt.show()
Note that I use the saturation for the gray image, as I want the red part of my hand spinner, it is easier for me
Which output this :
Which is not that bad for me(ignore the wrong color scheme, It's bgr of opencv displayed as rgb), except that there is circles with too big radius. The output of print(circle) is :
[[[420.5 182.5 141.3]
[420.5 238.5 84.5]
[335.5 283.5 35. ]
[253.5 323.5 42.7]
[417.5 337.5 43.6]]]
(it is [x,y, radius])
Basically, it means that circles of interest has radius below 50, and I want to get rid of the two first one. I wanted to use the maxRadius parameter (notice that in my code, it is currently 500). So my guess was that if I set maxRadius at 50, it would remove the unwanted radius, but instead, it deleted all the circles... I have found that with maxRadius at 400, I got an output that "works":
And with maxRadius below 200, there is no more circles found.
What I am missing here ?
I am working on windows, python 3.7.7, last version of opencv
The following seems to work in Python/OpenCV.
Read the input
Convert to HSV and extract the saturation channel
Median filter
Do Hough Circles processing
Draw the circles
Save the results
Input
import cv2
import numpy as np
# Read image
img = cv2.imread('circles.png')
hh, ww = img.shape[:2]
# Convert to HSV and extract the saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,1]
# median filter
median = cv2.medianBlur(sat, 7)
# get Hough circles
min_dist = int(ww/20)
circles = cv2.HoughCircles(median, cv2.HOUGH_GRADIENT, 1, minDist=min_dist, param1=150, param2=50, minRadius=0, maxRadius=0)
print(circles)
# draw circles
result = img.copy()
for circle in circles[0]:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
(x,y,r) = circle
x = int(x)
y = int(y)
cv2.circle(result, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(result, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# save results
cv2.imwrite('circles_saturation.jpg', sat)
cv2.imwrite('circles_median.jpg', sat)
cv2.imwrite('circles_result.jpg', result)
# show images
cv2.imshow('sat', sat)
cv2.imshow('median', median)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Saturation image:
Median filtered image:
Results:
Circles data:
[[[258.5 323.5 52.7]
[340.5 193.5 51.3]
[422.5 326.5 34.1]
[333.5 276.5 33.6]]]

How can i get the rgb color values from inside of a contour in image using opencv?

I know here already some questions were asked but they did't help me to solve my problem. I will appreciate any help to solve my problem.
I'm new to opencv.
I have an image and apply some code to get contours from image. Now i want to get the RGB color values from detected contours. How can i do that?
I do research on it and find that it could be solved by using contours so i try to implement contours and now finally i want to get the color values of the contours.
Here is my Code:
import cv2
import numpy as np
img = cv2.imread('C:/Users/Rizwan/Desktop/example_strip1.jpg')
img_hsv = cv2.cvtColor(255-img, cv2.COLOR_BGR2HSV)
lower_red = np.array([40, 20, 0])
upper_red = np.array([95, 255, 255])
mask = cv2.inRange(img_hsv, lower_red, upper_red)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
color_detected_img = cv2.bitwise_and(img, img, mask=mask)
print(len(contours))
for c in contours:
area = cv2.contourArea(c)
x, y, w, h = cv2.boundingRect(c)
ax = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 2)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(color_detected_img, [box], -1, (255, 0, 0), 2)
cv2.imshow("Cropped", color_detected_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
I expect the output should be the RGB values of the detected color inside the contours.
As asked in the comments, here's a possible solution to extract the BGR(!) values from the pixels of an image inside a before found contour. The proper detecting of the desired, colored stripes is omitted here as also discussed in the comments.
Having an image and a filled mask of a contour, for example from cv2.drawContours, we can simply use NumPy's boolean array indexing by converting the (most likely uint8) mask to an bool_ array.
Here's a short code snippet, that uses NumPy's savetxt to store all values in some txt file:
import cv2
import numpy as np
# Some dummy image
img = np.zeros((100, 100, 3), np.uint8)
img = cv2.rectangle(img, (0, 0), (49, 99), (255, 0, 0), cv2.FILLED)
img = cv2.rectangle(img, (50, 0), (99, 49), (0, 255, 0), cv2.FILLED)
img = cv2.rectangle(img, (50, 50), (99, 99), (0, 0, 255), cv2.FILLED)
# Mask of some dummy contour
mask = np.zeros((100, 100), np.uint8)
mask = cv2.fillPoly(mask, np.array([[[20, 20], [30, 70], [70, 50], [20, 20]]]), 255)
# Show only for visualization purposes
cv2.imshow('img', img)
cv2.imshow('mask', mask)
# Convert mask to boolean array
mask = np.bool_(mask)
# Use boolean array indexing to get all BGR values from img within mask
values = img[mask]
# For example, save values to txt file
np.savetxt('values.txt', values)
cv2.waitKey(0)
cv2.destroyAllWindows()
The dummy image looks like this:
The dummy contour mask looke like this:
The resulting values.txt has some >1000 entries, please check yourself. Attention: Values are BGR values; e.g. prior converting the image to RGB is needed to get RGB values.
Hope that helps!

Blend overlapping images in python

I am taking two images in python and overlapping the first image onto the second image. What I would like to do is blend the images where they overlap. Is there a way to do this in python other than a for loop?
PIL has a blend function which combines two RGB images with a fixed alpha:
out = image1 * (1.0 - alpha) + image2 * alpha
However, to use blend, image1 and image2 must be the same size.
So to prepare your images you'll need to paste each of them into a new image of
the appropriate (combined) size.
Since blending with alpha=0.5 averages the RGB values from both images equally,
we need to make two versions of the panorama -- one with img1 one top and one with img2 on top. Then regions with no overlap have RGB values which agree (so their averages will remain unchanged) and regions of overlap will get blended as desired.
import operator
from PIL import Image
from PIL import ImageDraw
# suppose img1 and img2 are your two images
img1 = Image.new('RGB', size=(100, 100), color=(255, 0, 0))
img2 = Image.new('RGB', size=(120, 130), color=(0, 255, 0))
# suppose img2 is to be shifted by `shift` amount
shift = (50, 60)
# compute the size of the panorama
nw, nh = map(max, map(operator.add, img2.size, shift), img1.size)
# paste img1 on top of img2
newimg1 = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
newimg1.paste(img2, shift)
newimg1.paste(img1, (0, 0))
# paste img2 on top of img1
newimg2 = Image.new('RGBA', size=(nw, nh), color=(0, 0, 0, 0))
newimg2.paste(img1, (0, 0))
newimg2.paste(img2, shift)
# blend with alpha=0.5
result = Image.blend(newimg1, newimg2, alpha=0.5)
img1:
img2:
result:
If you have two RGBA images here is a way to perform alpha compositing.
If you'd like a soft edge when stitching two images together you could blend them with a sigmoid function.
Here is a simple grayscale example:
import numpy as np
import matplotlib.image
import math
def sigmoid(x):
y = np.zeros(len(x))
for i in range(len(x)):
y[i] = 1 / (1 + math.exp(-x[i]))
return y
sigmoid_ = sigmoid(np.arange(-1, 1, 1/50))
alpha = np.repeat(sigmoid_.reshape((len(sigmoid_), 1)), repeats=100, axis=1)
image1_connect = np.ones((100, 100))
image2_connect = np.zeros((100, 100))
out = image1_connect * (1.0 - alpha) + image2_connect * alpha
matplotlib.image.imsave('blend.png', out, cmap = 'gray')
If you blend white and black squares result will look something like that:
+ =

Categories

Resources