I'm using Python to get images from an IP camera over an ethernet connection, and then process them looking for specific targets. I am using GRIP to generate code to look for the specific targeted areas. (For those unfamiliar with GRIP: it basically offers you a GUI desktop interface where you can see a live video feed and alter parameters until you get the desired output. Then you can auto generate a piece of code—mine is in Python—that will perform that processing 'pipeline' on any image you feed into it in your code).
After extensively debugging my connection code, I finally got a successful working connection that gets the image from the IP camera and send it into the GRIP pipeline. However, the processing of the image is failing, and it's returning a Segmentation Fault, with no indicated line numbers. Here is the pipeline code (auto generated):
import cv2
import numpy
import math
from enum import Enum
class GripPipeline:
"""
An OpenCV pipeline generated by GRIP.
"""
def __init__(self):
"""initializes all values to presets or None if need to be set
"""
self.__blur_type = BlurType.Median_Filter
self.__blur_radius = 19.81981981981982
self.blur_output = None
self.__hsv_threshold_input = self.blur_output
self.__hsv_threshold_hue = [72.84172661870504, 86.31399317406144]
self.__hsv_threshold_saturation = [199.50539568345323, 255.0]
self.__hsv_threshold_value = [89.43345323741006, 255.0]
self.hsv_threshold_output = None
self.__find_contours_input = self.hsv_threshold_output
self.__find_contours_external_only = False
self.find_contours_output = None
self.__filter_contours_contours = self.find_contours_output
self.__filter_contours_min_area = 500.0
self.__filter_contours_min_perimeter = 0.0
self.__filter_contours_min_width = 0.0
self.__filter_contours_max_width = 1000.0
self.__filter_contours_min_height = 0.0
self.__filter_contours_max_height = 1000.0
self.__filter_contours_solidity = [0, 100]
self.__filter_contours_max_vertices = 1000000.0
self.__filter_contours_min_vertices = 0.0
self.__filter_contours_min_ratio = 0.0
self.__filter_contours_max_ratio = 1000.0
self.filter_contours_output = None
def process(self, source0):
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step Blur0:
self.__blur_input = source0
(self.blur_output) = self.__blur(self.__blur_input, self.__blur_type, self.__blur_radius)
# Step HSV_Threshold0:
self.__hsv_threshold_input = self.blur_output
(self.hsv_threshold_output) = self.__hsv_threshold(self.__hsv_threshold_input, self.__hsv_threshold_hue, self.__hsv_threshold_saturation, self.__hsv_threshold_value)
# Step Find_Contours0:
self.__find_contours_input = self.hsv_threshold_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
# Step Filter_Contours0:
self.__filter_contours_contours = self.find_contours_output
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
#staticmethod
def __blur(src, type, radius):
"""Softens an image using one of several filters.
Args:
src: The source mat (numpy.ndarray).
type: The blurType to perform represented as an int.
radius: The radius for the blur as a float.
Returns:
A numpy.ndarray that has been blurred.
"""
if(type is BlurType.Box_Blur):
ksize = int(2 * round(radius) + 1)
return cv2.blur(src, (ksize, ksize))
elif(type is BlurType.Gaussian_Blur):
ksize = int(6 * round(radius) + 1)
return cv2.GaussianBlur(src, (ksize, ksize), round(radius))
elif(type is BlurType.Median_Filter):
ksize = int(2 * round(radius) + 1)
return cv2.medianBlur(src, ksize)
else:
return cv2.bilateralFilter(src, -1, round(radius), round(radius))
#staticmethod
def __hsv_threshold(input, hue, sat, val):
"""Segment an image based on hue, saturation, and value ranges.
Args:
input: A BGR numpy.ndarray.
hue: A list of two numbers the are the min and max hue.
sat: A list of two numbers the are the min and max saturation.
lum: A list of two numbers the are the min and max value.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
#staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
#staticmethod
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
continue
if (h < min_height or h > max_height):
continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
if (cv2.arcLength(contour, True) < min_perimeter):
continue
hull = cv2.convexHull(contour)
solid = 100 * area / cv2.contourArea(hull)
if (solid < solidity[0] or solid > solidity[1]):
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
continue
ratio = (float)(w) / h
if (ratio < min_ratio or ratio > max_ratio):
continue
output.append(contour)
return output
BlurType = Enum('BlurType', 'Box_Blur Gaussian_Blur Median_Filter Bilateral_Filter')
I realize that that is long, however I am less familiar with Python than other languages, so I wanted to offer all of it in the case that someone with much more Python experience might be able to spot some error in it.
Here is my code that I have written to get the image and feed it into the pipeline:
import numpy
import math
import cv2
import urllib.request
from enum import Enum
from GripPipeline import GripPipeline
from networktables import NetworkTable
frame = cv2.VideoCapture('https://10.17.11.1')
pipeline = GripPipeline()
def get_image()
img_array = numpy.asarray(bytearray(frame.grab()))
return img_array
while True:
img = get_image()
pipeline.process(img) #where the Segmentation Fault occurs
Does anyone have any idea on what could be causing this or how to fix it?
EDIT: It turns out that the error is coming from something in the second line of the process method, but I still don't know what. If anyone sees any flaws in what's being called there please let me know.
Try getting frames as tutorial suggests. Note renaming frame to cap:
cap = cv2.VideoCapture('https://10.17.11.1')
pipeline = GripPipeline()
while True:
ret, img = cap.read()
pipeline.process(img)
Related
I'm currently trying to write something that can extract data from some uncommon graphs in a book. I scanned the pages of the book, and by using opencv I would like to detect some features from the graphs in order to convert it into useable data. In the left graph I'm looking for the height of the "triangles" and in the right graph the distance from the center to the points where the dotted lines intersect with the gray area. In both cases I would like to convert these values into numeric data for further usage.
The first thing I thought of was detecting the lines of the charts, in the hopes I could somehow measure their length or position. For this I'm using the Hough Line Transform. The following snippet of code shows how far I've gotten already.
import numpy as np
import cv2
# Reading the image
img = cv2.imread('test2.jpg')
# Convert the image to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Apply edge detection
edges = cv2.Canny(gray,50,150,apertureSize = 3)
# Line detection
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=50,maxLineGap=20)
for line in lines:
x1,y1,x2,y2 = line[0]
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imwrite('linesDetected.jpg',img)
The only problem is that this detection algorithm is not accurate at all. At least not for me. And in order to extract some data from the charts, the detection of the lines should be somewhat accurate. Is their any way I could do this? Or is my strategy to detect lines just wrong in the first place? Should I maybe start with detecting something else, like circles,object sizes, contours or colors?
Using color segmentation is an easy way to convert this graph to data. This method does require some manual annotation. After the graph is segmented, count the pixels for each color. Check out the 'watershed' demo in the demo files that are included in the OpenCV library:
import numpy as np
import cv2 as cv
from common import Sketcher
class App:
def __init__(self, fn):
self.img = cv.imread(fn)
self.img = cv.resize(self.img, (654,654))
h, w = self.img.shape[:2]
self.markers = np.zeros((h, w), np.int32)
self.markers_vis = self.img.copy()
self.cur_marker = 1
self.colors = np.int32( list(np.ndindex(2, 2, 3)) ) * 123
self.auto_update = True
self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors)
def get_colors(self):
return list(map(int, self.colors[self.cur_marker])), self.cur_marker
def watershed(self):
m = self.markers.copy()
cv.watershed(self.img, m)
cv.imshow('img', self.img)
overlay = self.colors[np.maximum(m, 0)]
vis = cv.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv.CV_8UC3)
cv.imshow('overlay', np.array(overlay, np.uint8))
cv.imwrite('/home/stephen/Desktop/overlay.png', np.array(overlay, np.uint8))
cv.imshow('watershed', vis)
def run(self):
while cv.getWindowProperty('img', 0) != -1 or cv.getWindowProperty('watershed', 0) != -1:
ch = cv.waitKey(50)
if ch >= ord('1') and ch <= ord('9'):
self.cur_marker = ch - ord('0')
print('marker: ', self.cur_marker)
if self.sketch.dirty and self.auto_update:
self.watershed()
self.sketch.dirty = False
if ch == 27: break
cv.destroyAllWindows()
fn = '/home/stephen/Desktop/test.png'
App(cv.samples.findFile(fn)).run()
The output will be an image like this:
You can count the pixels for each color using this code:
# Extract the values from the image
vals = []
img = cv.imread('/home/stephen/Desktop/overlay.png')
# Get the colors in the image
flat = img.reshape(-1, img.shape[-1])
colors = np.unique(flat, axis=0)
# Iterate through the colors (ignore the first and last colors)
for color in colors[1:-1]:
a,b,c = color
lower = a-1, b-1, c-1
upper = a+1,b+1,c+1
lower = np.array(lower)
upper = np.array(upper)
mask = cv.inRange(img, lower, upper)
vals.append(sum(sum(mask)))
cv.imshow('mask', mask)
cv.waitKey(0)
cv.destroyAllWindows()
And print out the output data using this code:
names = ['alcohol', 'esters', 'biter', 'hoppy', 'acid', 'zoetheid', 'mout']
print(list(zip(names, vals)))
The output is:
[('alcohol', 22118), ('esters', 26000), ('biter', 16245), ('hoppy', 21170), ('acid', 19156), ('zoetheid', 11090), ('mout', 7167)]
I've recorded the video while bottle was rotated.Then i got frames from video and cut the central block from all images.
So for all frames I got the following images:
I've tried to stitch them to get panorama, but I got bad results.
I used the following program:
import glob
#rom panorama import Panorama
import sys
import numpy
import imutils
import cv2
def readImages(imageString):
images = []
# Get images from arguments.
for i in range(0, len(imageString)):
img = cv2.imread(imageString[i])
images.append(img)
return images
def findAndDescribeFeatures(image):
# Getting gray image
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find and describe the features.
# Fast: sift = cv2.xfeatures2d.SURF_create()
sift = cv2.xfeatures2d.SIFT_create()
# Find interest points.
keypoints = sift.detect(grayImage, None)
# Computing features.
keypoints, features = sift.compute(grayImage, keypoints)
# Converting keypoints to numbers.
keypoints = numpy.float32([kp.pt for kp in keypoints])
return keypoints, features
def matchFeatures(featuresA, featuresB):
# Slow: featureMatcher = cv2.DescriptorMatcher_create("BruteForce")
featureMatcher = cv2.DescriptorMatcher_create("FlannBased")
matches = featureMatcher.knnMatch(featuresA, featuresB, k=2)
return matches
def generateHomography(allMatches, keypointsA, keypointsB, ratio, ransacRep):
if not allMatches:
return None
matches = []
for match in allMatches:
# Lowe's ratio test
if len(match) == 2 and (match[0].distance / match[1].distance) < ratio:
matches.append(match[0])
pointsA = numpy.float32([keypointsA[m.queryIdx] for m in matches])
pointsB = numpy.float32([keypointsB[m.trainIdx] for m in matches])
if len(pointsA) > 4:
H, status = cv2.findHomography(pointsA, pointsB, cv2.RANSAC, ransacRep)
return matches, H, status
else:
return None
paths = glob.glob("C:/Users/andre/Desktop/Panorama-master/frames/*.jpg")
images = readImages(paths[::-1])
while len(images) > 1:
imgR = images.pop()
imgL = images.pop()
interestsR, featuresR = findAndDescribeFeatures(imgR)
interestsL, featuresL = findAndDescribeFeatures(imgL)
try:
try:
allMatches = matchFeatures(featuresR, featuresL)
_, H, _ = generateHomography(allMatches, interestsR, interestsL, 0.75, 4.0)
result = cv2.warpPerspective(imgR, H,
(imgR.shape[1] + imgL.shape[1], imgR.shape[0]))
result[0:imgL.shape[0], 0:imgL.shape[1]] = imgL
images.append(result)
except TypeError:
pass
except cv2.error:
pass
result = imutils.resize(images[0], height=260)
cv2.imshow("Result", result)
cv2.imwrite("Result.jpg", result)
cv2.waitKey(0)
My result was:
May be someone know hot to do it better? I think that using small blocks from frame should remove roundness... But...
Data: https://1drv.ms/f/s!ArcAdXhy6TxPho0FLKxyRCL-808Y9g
I managed to achieve a nice result. I rewrote your code just a little bit, here is the changed part:
def generateTransformation(allMatches, keypointsA, keypointsB, ratio):
if not allMatches:
return None
matches = []
for match in allMatches:
# Lowe's ratio test
if len(match) == 2 and (match[0].distance / match[1].distance) < ratio:
matches.append(match[0])
pointsA = numpy.float32([keypointsA[m.queryIdx] for m in matches])
pointsB = numpy.float32([keypointsB[m.trainIdx] for m in matches])
if len(pointsA) > 2:
transformation = cv2.estimateRigidTransform(pointsA, pointsB, True)
if transformation is None or transformation.shape[1] < 1 or transformation.shape[0] < 1:
return None
return transformation
else:
return None
paths = glob.glob("a*.jpg")
images = readImages(paths[::-1])
result = images[0]
while len(images) > 1:
imgR = images.pop()
imgL = images.pop()
interestsR, featuresR = findAndDescribeFeatures(imgR)
interestsL, featuresL = findAndDescribeFeatures(imgL)
allMatches = matchFeatures(featuresR, featuresL)
transformation = generateTransformation(allMatches, interestsR, interestsL, 0.75)
if transformation is None or transformation[0, 2] < 0:
images.append(imgR)
continue
transformation[0, 0] = 1
transformation[1, 1] = 1
transformation[0, 1] = 0
transformation[1, 0] = 0
transformation[1, 2] = 0
result = cv2.warpAffine(imgR, transformation, (imgR.shape[1] +
int(transformation[0, 2] + 1), imgR.shape[0]))
result[:, :imgL.shape[1]] = imgL
cv2.imshow("R", result)
images.append(result)
cv2.waitKey(1)
cv2.imshow("Result", result)
So the key thing I changed is the transformation of the images. I use estimateRigidTransform() instead of findHomography() to calculate transformation of the image. From that transformation matrix I only extract the x coordinate translation, which is in the [0, 2] cell of the resulting Affine Transformation matrix transformation. I set the other transformation matrix elements as if it is an identity transformation (no scaling, no perspective, no rotation or y translation). Then I pass it to warpAffine() to transform the imgR the same way you did with warpPerspective().
You can do it because you have stable camera and spinning object positions and you capture with a straight front view of the object. It means that you don't have to do any perspective / scaling / rotation image corrections and can just "glue" them together by x axis.
I think your approach fails because you actually observe the bottle with a slightly tilted down camera view or the bottle is not in the middle of the screen. I'll try to describe that with an image. I depict some text on the bottle with red. For example the algorithm finds a matching points pair (green) on the bottom of the captured round object. Note that the point moves not only right, but diagonally up too. The program then calculates the transformation taking into account the points which move up slightly. This continues to get worse frame by frame.
The recognition of matching image points also may be slightly inaccurate, so extracting only the x translation is even better because you give the algorithm "a clue" what actual situation you have. This makes it less applicable for another conditions, but in your case it improves the result a lot.
Also I filter out some incorrect results with if transformation[0, 2] < 0 check (it can rotate only one direction, and the code wont work if that is negative anyways).
Thank you for your time dear reader,
i´m trying to implement a document scanner in Python/OpenCV but im struggling with varying lights in the image.
Im hoping that maybe some kind soul can at least point me in the right direction because I have no concrete clue how I could improve it - if that is possible or known at all.
Im using otsus binarization for thresholding:
https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html
My results so far are pretty good:
But for difficult lighting with either two bright/dark areas or one bright - one dark area for instance (this is a crass example) it fails:
Playing around with Gimp curves sometimes gets me clear edges - maybe there is a best practice how to tackle this problem that I dont know of?
I played around with the code a lot but got no real progress by combining hierarchy / chain approx / approxpolydp epsilon methods etc.
My current code:
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import cv2 as cv
import numpy as np
import sys
# zero at the end reads black white
img = cv.imread(sys.argv[1],0)
blur = cv.GaussianBlur(img,(5,5),0)
# find normalized_histogram, and its cumulative distribution function
hist = cv.calcHist([blur],[0],None,[256],[0,256])
hist_norm = hist.ravel()/hist.max()
Q = hist_norm.cumsum()
bins = np.arange(256)
fn_min = np.inf
thresh = -1
for i in range(1,256):
p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes
b1,b2 = np.hsplit(bins,[i]) # weights
# finding means and variances
m1,m2 = np.sum(p1*b1)/q1, np.sum(p2*b2)/q2
v1,v2 = np.sum(((b1-m1)**2)*p1)/q1,np.sum(((b2-m2)**2)*p2)/q2
# calculates the minimization function
fn = v1*q1 + v2*q2
if fn < fn_min:
fn_min = fn
thresh = i
# find otsu's threshold value with OpenCV function
ret, otsu = cv.threshold(blur,0,255,cv.THRESH_BINARY+cv.THRESH_OTSU)
cv.imwrite('otsu.jpg',otsu)
_, contours, hierarchy = cv.findContours(otsu, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
def biggestRectangle(contours):
biggest = None
max_area = 0
indexReturn = -1
for index in range(len(contours)):
i = contours[index]
area = cv.contourArea(i)
if area > 100:
peri = cv.arcLength(i,True)
approx = cv.approxPolyDP(i,0.1*peri,True)
if area > max_area: #and len(approx)==4:
biggest = approx
max_area = area
indexReturn = index
return indexReturn
indexReturn = biggestRectangle(contours)
hull = cv.convexHull(contours[indexReturn])
orig = cv.imread(sys.argv[1])
cv.imwrite('hola.jpg',cv.drawContours(orig, [hull], 0, (0,255,0),3))
The biggest rectangle code I copied from here:
How to detect document from a picture in opencv?
All credits to monic! (accepted answer)
I think the best way to achieve this would be to use color Masks but in HSV so that it stays focused on the color and not on the brightness/contrast.
Keep in mind that OpenCV encodes HSV with those ranges:
H: 0 - 180
S: 0 - 255
V: 0 - 255
Here is how I would do it:
# Convert your image to HSV
imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV);
# Define lower/upper color
lower = np.array([0, 0, 180])
upper = np.array([180, 20, 255])
# Check the region of the image actually with a color in the range defined below
# inRange returns a matrix in black and white
bw = cv2.inRange(imgHsv, lower, upper)
Having ordered half a dozen webcams online for a project I notice that the colors on the output are not consistent.
In order to compensate for this I have attempted to take a template image and extract the R,G and B histograms and tried to match the target images's RGB histograms based on this.
This was inspired from the description of the solution for a very similar problem Comparative color calibration
The perfect solution will look like this :
In order to try to solve this I wrote the following script which performed poorly:
EDIT (Thanks to #DanMašek and #api55)
import numpy as np
def show_image(title, image, width = 300):
# resize the image to have a constant width, just to
# make displaying the images take up less screen real
# estate
r = width / float(image.shape[1])
dim = (width, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
# show the resized image
cv2.imshow(title, resized)
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
from matplotlib import pyplot as plt
from scipy.misc import lena, ascent
import cv2
source = cv2.imread('/media/somadetect/Lexar/color_transfer_data/1/frame10.png')
s_b = source[:,:,0]
s_g = source[:,:,1]
s_r = source[:,:,2]
template = cv2.imread('/media/somadetect/Lexar/color_transfer_data/5/frame6.png')
t_b = source[:,:,0]
t_r = source[:,:,1]
t_g = source[:,:,2]
matched_b = hist_match(s_b, t_b)
matched_g = hist_match(s_g, t_g)
matched_r = hist_match(s_r, t_r)
y,x,c = source.shape
transfer = np.empty((y,x,c), dtype=np.uint8)
transfer[:,:,0] = matched_r
transfer[:,:,1] = matched_g
transfer[:,:,2] = matched_b
show_image("Template", template)
show_image("Target", source)
show_image("Transfer", transfer)
cv2.waitKey(0)
Template image :
Target Image:
The Matched Image:
Then I found Adrian's (pyimagesearch) attempt to solve a very similar problem in the following link
Fast Color Transfer
The results seem to be fairly good with some saturation defects. I would welcome any suggestions or pointers on how to address this issue so all web cam outputs could be calibrated to output similar colors based on one template image.
Your script performs poorly because you are using the wrong index.
OpenCV images are BGR, so this was correct in your code:
source = cv2.imread('/media/somadetect/Lexar/color_transfer_data/1/frame10.png')
s_b = source[:,:,0]
s_g = source[:,:,1]
s_r = source[:,:,2]
template = cv2.imread('/media/somadetect/Lexar/color_transfer_data/5/frame6.png')
t_b = source[:,:,0]
t_r = source[:,:,1]
t_g = source[:,:,2]
but this is wrong
transfer[:,:,0] = matched_r
transfer[:,:,1] = matched_g
transfer[:,:,2] = matched_b
since here you are using RGB and not BGR, so the color changes and your OpenCV still thinks it is BGR. That is why it looks weird.
It should be:
transfer[:,:,0] = matched_b
transfer[:,:,1] = matched_g
transfer[:,:,2] = matched_r
As other possible solutions, you may try to look which parameters can be set in your camera. Sometimes they have some auto parameters which you can set manually for all of them to match. Also, beware of this auto parameters, usually white balance and focus and others are set auto and they may change quite a lot in the same camera from one time to another (depending on illumination, etc etc).
UPDATE:
As DanMašek points out, also
t_b = source[:,:,0]
t_r = source[:,:,1]
t_g = source[:,:,2]
is wrong, since the r should be index 2 and g index 1
t_b = source[:,:,0]
t_g = source[:,:,1]
t_r = source[:,:,2]
I have attempted a white patch based calibration routine. Here is the link https://theiszm.wordpress.com/tag/white-balance/.
The code snippet follows:
import cv2
import math
import numpy as np
import sys
from matplotlib import pyplot as plt
def hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
# Read original image
im_o = cv2.imread('/media/Lexar/color_transfer_data/5/frame10.png')
im = im_o
cv2.imshow('Org',im)
cv2.waitKey()
B = im[:,:, 0]
G = im[:,:, 1]
R = im[:,:, 2]
R= np.array(R).astype('float')
G= np.array(G).astype('float')
B= np.array(B).astype('float')
# Extract pixels that correspond to pure white R = 255,G = 255,B = 255
B_white = R[168, 351]
G_white = G[168, 351]
R_white = B[168, 351]
print B_white
print G_white
print R_white
# Compensate for the bias using normalization statistics
R_balanced = R / R_white
G_balanced = G / G_white
B_balanced = B / B_white
R_balanced[np.where(R_balanced > 1)] = 1
G_balanced[np.where(G_balanced > 1)] = 1
B_balanced[np.where(B_balanced > 1)] = 1
B_balanced=B_balanced * 255
G_balanced=G_balanced * 255
R_balanced=R_balanced * 255
B_balanced= np.array(B_balanced).astype('uint8')
G_balanced= np.array(G_balanced).astype('uint8')
R_balanced= np.array(R_balanced).astype('uint8')
im[:,:, 0] = (B_balanced)
im[:,:, 1] = (G_balanced)
im[:,:, 2] = (R_balanced)
# Notice saturation artifacts
cv2.imshow('frame',im)
cv2.waitKey()
# Extract the Y plane in original image and match it to the transformed image
im_o = cv2.cvtColor(im_o, cv2.COLOR_BGR2YCR_CB)
im_o_Y = im_o[:,:,0]
im = cv2.cvtColor(im, cv2.COLOR_BGR2YCR_CB)
im_Y = im[:,:,0]
matched_y = hist_match(im_o_Y, im_Y)
matched_y= np.array(matched_y).astype('uint8')
im[:,:,0] = matched_y
im_final = cv2.cvtColor(im, cv2.COLOR_YCR_CB2BGR)
cv2.imshow('frame',im_final)
cv2.waitKey()
The input image is:
The result of the script is:
Thank you all for suggestions and pointers!!
I want to adjust the colour levels of an image in python. I can use any python library that can easily be installed on my Ubuntu desktop. I want to do the same as ImageMagick's -level ( http://www.imagemagick.org/www/command-line-options.html#level ). PIL (Python Image Library) doesn't seem to have it. I have been calling convert on the image and then reading in the file back again, but that seems wasteful. Is there a better / faster way?
If I understood correctly the -level option of ImageMagick, then the level_image function I provide should do what you want.
Two things to note:
the speed definitely can be improved
it currently only works with RGB images
the algorithm goes through the HSV colorspace, and affects only the V (brightness) component
The code:
import colorsys
class Level(object):
def __init__(self, minv, maxv, gamma):
self.minv= minv/255.0
self.maxv= maxv/255.0
self._interval= self.maxv - self.minv
self._invgamma= 1.0/gamma
def new_level(self, value):
if value <= self.minv: return 0.0
if value >= self.maxv: return 1.0
return ((value - self.minv)/self._interval)**self._invgamma
def convert_and_level(self, band_values):
h, s, v= colorsys.rgb_to_hsv(*(i/255.0 for i in band_values))
new_v= self.new_level(v)
return tuple(int(255*i)
for i
in colorsys.hsv_to_rgb(h, s, new_v))
def level_image(image, minv=0, maxv=255, gamma=1.0):
"""Level the brightness of image (a PIL.Image instance)
All values ≤ minv will become 0
All values ≥ maxv will become 255
gamma controls the curve for all values between minv and maxv"""
if image.mode != "RGB":
raise ValueError("this works with RGB images only")
new_image= image.copy()
leveller= Level(minv, maxv, gamma)
levelled_data= [
leveller.convert_and_level(data)
for data in image.getdata()]
new_image.putdata(levelled_data)
return new_image
If there is some way to do the RGB→HSV conversion (and vice versa) using PIL, then one can split into the H, S, V bands, use the .point method of the V band and convert back to RGB, speeding up the process by a lot; however, I haven't found such a way.
Why not use PythonMagick? It's a Python interface to Image Magick.
This is the code that I use. Levels are done, 1) on the brightness channel of the HSV image and, 2) according to the desired amount of blacks and whites pixels in the result.
The code can be modified to avoid to use pillow since openCV use numpy arrays as internal data. If doing so, be aware that openCV native colorspace is BGR. You will have to change the calls to cv.cvtColor() accordingly.
from PIL import Image
import numpy as np
import cv2 as cv
fileName = 'foo.JPG'
fileOut = 'bar.JPG'
imgPil = Image.open(fileName)
imgCV = np.asarray(imgPil, np.uint8)
hsv = cv.cvtColor(imgCV, cv.COLOR_RGB2HSV)
h,s,v = cv.split(hsv)
ceil = np.percentile(v,95) # 5% of pixels will be white
floor = np.percentile(v,5) # 5% of pixels will be black
a = 255/(ceil-floor)
b = floor*255/(floor-ceil)
v = np.maximum(0,np.minimum(255,v*a+b)).astype(np.uint8)
hsv = cv.merge((h,s,v))
rgb = cv.cvtColor(hsv, cv.COLOR_HSV2RGB)
imgPil = Image.fromarray(rgb)
imgPil.save(fileOut)
using code from this link here
# Auto leveling for image
def levels(data, all_same = 0, clip = 0):
if data.mode not in ['RGB', 'CMYK']:
return data
## get redistriputed histogram scalled smoothly
lut = _makelut(data, all_same, clip)
## update image points using histogram
data = data.point(lut)
return data
def _find_hi_lo(lut, clip):
min = None
max = None
for i in range(len(lut)):
if lut[i] > clip:
min = i
break
lut.reverse()
for i in range(len(lut)):
if lut[i] > clip:
max = 255 - i
break
return min, max
def _scale(channels, min, max):
lut = []
# hefny fix
ratio = float(max-min)
if ratio == 0:
ratio = 1
for i in range (channels):
for i in range(256):
value = int((i - min)*(255.0/ratio))
if value < 0:
value = 0
if value > 255:
value = 255
lut.append(value)
return lut
def _makelut(data, all_same, clip):
histogram = data.histogram()
lut = []
r, g, b, k = [], [], [], []
channels = len(histogram)/256
for i in range(256):
r.append(histogram[i])
g.append(histogram[256+i])
b.append(histogram[512+i])
if channels == 4:
for i in range(256):
k.append(histogram[768+i])
rmin, rmax = _find_hi_lo(r, clip)
gmin, gmax = _find_hi_lo(g, clip)
bmin, bmax = _find_hi_lo(b, clip)
if channels == 4:
kmin, kmax = _find_hi_lo(k)
else:
kmin, kmax = 128, 128
if all_same == 1:
min_max = [rmin, gmin, bmin, kmin, rmax, gmax, bmax, kmax]
min_max.sort()
lut = _scale(channels, min_max[0], min_max[-1])
else:
r_lut = _scale(1, rmin, rmax)
g_lut = _scale(1, gmin, gmax)
b_lut = _scale(1, bmin, bmax)
if channels == 4:
k_lut = _scale(1, kmin, kmax)
lut = []
for i in range (256):
lut.append(r_lut[i])
for i in range (256):
lut.append(g_lut[i])
for i in range (256):
lut.append(b_lut[i])
if channels == 4:
for i in range (256):
lut.append(k_lut[i])
return lut
from PIL import ImageEnhance , ImageDraw , Image
img = Image.open(file_path)
img2 = levels(img)