I am trying to execute the code from this URL
However, I started getting this error:
error: ..\..\..\..\opencv\modules\ml\src\knearest.cpp:370: error: (-5) Input samples must be floating-point matrix (<num_samples>x<var_count>) in function CvKNearest::find_nearest
I have not made any major changes though. But will paste what I did:
import scipy as sp
import numpy as np
import cv2
# Load the images
img =cv2.imread("image1.png")
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.FeatureDetector_create("SURF")
surfDescriptorExtractor = cv2.DescriptorExtractor_create("SURF")
kp = surf.detect(imgg)
kp, descritors = surfDescriptorExtractor.compute(imgg,kp)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
modelImages = ["image2.png"]
for modelImage in modelImages:
# Now loading a template image and searching for similar keypoints
template = cv2.imread(modelImage)
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys = surf.detect(templateg)
keys,desc = surfDescriptorExtractor.compute(templateg, keys)
for h,des in enumerate(desc):
#debug
print(des.shape)
des = np.array(des,np.float32).reshape(64L)
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
#print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imshow('img',img)
cv2.imshow('tm',template)
cv2.waitKey(0)
cv2.destroyAllWindows()
Any help is greatly appreciated.
Thanks in advance!
Related
Currently I am working on the Vietnamese text detection from the image
So, to detect the text in Image I am using the PaddleOcr Detection because I need line to line detection. Paddleocr is showing 100% result for that, we can do recognition using PaddleOcr has PaddleOcr didn't trained on Vietnamese it's not giving 100% results.
So, for recognition I am going with vietocr and its showing 100% results but the problem with vietocr is that it is only working when we pass cropped image not on full images.
My plan to crop the Image into multiple by using Bounding box co-ordinates generated from PaddleOcr
I am using PaddleOcr for the text detection
Sample Code
from paddleocr import PaddleOCR,draw_ocr
# Paddleocr supports Chinese, English, French, German, Korean and Japanese.
# You can set the parameter `lang` as `ch`, `en`, `french`, `german`, `korean`, `japan`
# to switch the language model in order.
ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory
img_path = '/content/im1502.jpg'
result = ocr.ocr(img_path, cls=True)
resultss = result
for idx in range(len(result)):
res = result[idx]
for line in res:
print(line)
# draw result
from PIL import Image
result = result[0]
results = result
image = Image.open(img_path).convert('RGB')
boxes = [line[0] for line in result]
txts = [line[1][0] for line in result]
scores = [line[1][1] for line in result]
im_show = draw_ocr(image, boxes, txts=None, scores=None, font_path='/path/to/PaddleOCR/doc/fonts/simfang.ttf')
im_show = Image.fromarray(im_show)
im_show.save('result.jpg')
Bounding Box Image and Results
[[[108.0, 78.0], [289.0, 93.0], [286.0, 131.0], [104.0, 116.0]],
[[51.0, 230.0], [267.0, 235.0], [266.0, 272.0], [50.0, 267.0]],
[[17.0, 304.0], [343.0, 304.0], [343.0, 340.0], [17.0, 340.0]]]
Recognition with VietOcr
import matplotlib.pyplot as plt
from PIL import Image
from vietocr.tool.predictor import Predictor
from vietocr.tool.config import Cfg
config = Cfg.load_config_from_name('vgg_transformer')
# config['weights'] = './weights/transformerocr.pth'
#config['weights'] = 'https://drive.google.com/uc?id=13327Y1tz1ohsm5YZMyXVMPIOjoOA0OaA'
config['cnn']['pretrained']=False
#config['device'] = 'cuda:0'
config['predictor']['beamsearch']=False
detector = Predictor(config)
img = '/content/im1502.jpg'
img = Image.open(img)
plt.imshow(img)
result = detector.predict(img)
result
The result is
SẢNH CHUNG
So can anyone help me with how to crop the image using paddleocr bounding box coordinates
Thanks, I figured it out. Here boxes are the Bbox from paddelocr
box = np.array(boxes).astype(np.int32).reshape(-1, 2)
img = cv2.imread(img_path)
height = img.shape[0]
width = img.shape[1]
mask = np.zeros((height, width), dtype=np.uint8)
New_list = boxes.copy()
for boxs in New_list:
box=np.array(boxs).astype(np.int32).reshape(-1, 2)
points = np.array([box])
cv2.fillPoly(mask, points, (255))
res = cv2.bitwise_and(img,img,mask = mask)
rect = cv2.boundingRect(points) # returns (x,y,w,h) of the rect
cropped = res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]
The cropped will be part of a img of bounding box
I am trying to find the dominant color in a frame in a video. This works well, however, my frames are somehow converted into different colors. Yellow/pink becomes blue/purple-ish, but black and white stay the same (thus it is not the inverted colors).
Does anyone know where it comes from and how I can change it so that the original colors are kept? This is my code:
import cv2
from sklearn.cluster import KMeans
from collections import Counter
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
video = cv2.VideoCapture('video.mp4')
def show_blurred_image(image, dominant_color):
frame_to_blur = Image.fromarray(image)
blurred_frame = cv2.blur(image, (200,200))
blurred_frame = Image.fromarray(blurred_frame)
plt.subplot(121),plt.imshow(frame_to_blur),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blurred_frame),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
R = round(dominant_color[0])
G = round(dominant_color[1])
B = round(dominant_color[2])
custom_color = '#%02x%02x%02x' % (R, G, B)
print(custom_color)
rect = patches.Rectangle((1620,0),300,1080,linewidth=1,
fill = True,
edgecolor=custom_color,
facecolor=custom_color)
ax = plt.gca()
ax.add_patch(rect)
plt.show()
def get_dominant_color(image, k=4, image_processing_size = None):
"""
takes an image as input
returns the dominant color of the image as a list
dominant color is found by running k means on the
pixels & returning the centroid of the largest cluster
processing time is sped up by working with a smaller image;
this resizing can be done with the image_processing_size param
which takes a tuple of image dims as input
>>> get_dominant_color(my_image, k=4, image_processing_size = (25, 25))
[56.2423442, 34.0834233, 70.1234123]
"""
#resize image if new dims provided
if image_processing_size is not None:
image = cv2.resize(image, image_processing_size,
interpolation = cv2.INTER_AREA)
#reshape the image to be a list of pixels
image = image.reshape((image.shape[0] * image.shape[1], 3))
#cluster and assign labels to the pixels
clt = KMeans(n_clusters = k)
labels = clt.fit_predict(image)
#count labels to find most popular
label_counts = Counter(labels)
#subset out most popular centroid
dominant_color = clt.cluster_centers_[label_counts.most_common(1)[0][0]]
return list(dominant_color)
dominant_colors = []
show_frame = 10
frame_nb = 0
while(video.isOpened()):
ret, frame = video.read()
if ret == True:
if (frame_nb == show_frame):
dominant_color = get_dominant_color(frame)
show_blurred_image(frame, dominant_color)
frame_nb += 1
else:
break
video.release()
cv2.destroyAllWindows()
OpenCV loads images in a BGR format, while PIL and matplotlib works with the RGB format. If you want to use the libraries together, you need to convert the images in the right color spaces.
In your case :
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
I'm trying to configure OpenCV within Python 3.6 to match a character icon (pattern) 1 with a box of characters 2. Nevertheless, the match is quite low, especially for shaded characters like 1.
I tried to solve it by using not only matchTemplate, but also comparing histograms, nevertheless - result is still poor.
I did try using gray-scale, colors, matching just a center of picture (cropped face), matching whole picture... resizing pattern to have exact dimension as it would be in a box... all combinations... and still this is VERY random (see attached image of correlation results)
Thank you in advance for help!
Here's the code:
import numpy as np
import cv2 as cv
from PIL import Image
import os
box = Image.open("/Users/user/Desktop/dbz/my_box.jpeg")
box.thumbnail((592,1053))
#conditions for each match step
character_threshold = 0.6 #checks in box
hist_threshold = 0.3
import numpy as np
import cv2 as cv
from PIL import Image
import os
box = Image.open("/Users/user/Desktop/dbz/my_box.jpeg")
box.thumbnail((592,1053))
#conditions for each match step
character_threshold = 0.6
hist_threshold = 0.3
for root, dirs, files in os.walk("/Users/user/Desktop/dbz/img/Super/TEQ/"):
for file in files:
if not file.startswith("."):
print("now " + file)
char = os.path.join(root, file)
#Opens and generate character's icon
character = Image.open(char)
character.thumbnail((153,139))
#Crops face from the character's icon and converts to grayscale CV object
face = character.crop((22,22,94,94)) #size 72x72 with centered face (should be 22,22,94,94)
face_array = np.array(face).astype(np.uint8)
face_array_gray = cv.cvtColor(face_array, cv.COLOR_RGB2GRAY)
#Converts the character's icon to grayscale CV object
character_array = np.array(character).astype(np.uint8)
character_array_gray = cv.cvtColor(character_array, cv.COLOR_RGB2GRAY)
#Converts box screen to grayscale CV object
box_array = np.array(box).astype(np.uint8)
box_array_gray = cv.cvtColor(box_array, cv.COLOR_RGB2GRAY)
#Check whether the face is in the box
character_score = cv.matchTemplate(box_array[:,:,2],face_array[:,:,2],cv.TM_CCOEFF_NORMED)
if character_score.max() > character_threshold:
ij = np.unravel_index(np.argmax(character_score),character_score.shape)
x, y = ij[::-1] #np returns lower-left coordinates, whilst PIL accepts upper, left,lower, right !!!
w, h = face_array_gray.shape
face.show()
found = box.crop((x,y,x+w,y+h)) #expand border to 25 pixels in each size (Best is (x-20,y-5,x+w,y+h+20))
#found.show()
#found_character = np.array(found_character).astype(np.uint8)
#found_character = cv.cvtColor(found_character, cv.COLOR_RGB2GRAY)
found_array = np.array(found).astype(np.uint8)
found_array_gray = cv.cvtColor(found_array, cv.COLOR_RGB2GRAY)
found_hist = cv.calcHist([found_array],[0,1,2],None,[8,8,8],[0,256,0,256,0,256])
found_hist = cv.normalize(found_hist,found_hist).flatten()
found_hist_gray = cv.calcHist([found_array_gray],[0],None,[8],[0,256])
found_hist_gray = cv.normalize(found_hist_gray,found_hist_gray).flatten()
face_hist = cv.calcHist([face_array],[0,1,2],None,[8,8,8],[0,256,0,256,0,256])
face_hist = cv.normalize(face_hist,face_hist).flatten()
face_hist_gray = cv.calcHist([face_array_gray],[0],None,[8],[0,256])
face_hist_gray = cv.normalize(face_hist_gray,face_hist_gray).flatten()
character_hist = cv.calcHist([character_array],[0,1,2],None,[8,8,8],[0,256,0,256,0,256])
character_hist = cv.normalize(character_hist,character_hist).flatten()
character_hist_gray = cv.calcHist([character_array_gray],[0],None,[8],[0,256])
character_hist_gray = cv.normalize(character_hist_gray,character_hist_gray).flatten()
hist_compare_result_CORREL = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_CORREL)
#hist_compare_result_CHISQR = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_CHISQR)
#hist_compare_result_INTERSECT = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_INTERSECT)
#hist_compare_result_BHATTACHARYYA = cv.compareHist(found_hist_gray, character_hist_gray,cv.HISTCMP_BHATTACHARYYA)
if (hist_compare_result_CORREL+character_score.max()) > 1:
print(f"Found {file} with a score:\n match:{character_score.max()}\n hist_correl: {hist_compare_result_CORREL}\n SUM:{hist_compare_result_CORREL+character_score.max()}", file=open("/Users/user/Desktop/dbz/out.log","a+"))
(1)
(2)
Here is a simple example of masked template matching in Python/OpenCV.
Image:
Transparent Template:
Template with alpha removed:
Template alpha channel extracted as mask image:
i
mport cv2
import numpy as np
# read image
img = cv2.imread('logo.png')
# read template with alpha
tmplt = cv2.imread('hat_alpha.png', cv2.IMREAD_UNCHANGED)
hh, ww = tmplt.shape[:2]
# extract template mask as grayscale from alpha channel and make 3 channels
tmplt_mask = tmplt[:,:,3]
tmplt_mask = cv2.merge([tmplt_mask,tmplt_mask,tmplt_mask])
# extract templt2 without alpha channel from tmplt
tmplt2 = tmplt[:,:,0:3]
# do template matching
corrimg = cv2.matchTemplate(img,tmplt2,cv2.TM_CCORR_NORMED, mask=tmplt_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(corrimg)
max_val_ncc = '{:.3f}'.format(max_val)
print("correlation match score: " + max_val_ncc)
xx = max_loc[0]
yy = max_loc[1]
print('xmatch =',xx,'ymatch =',yy)
# draw red bounding box to define match location
result = img.copy()
pt1 = (xx,yy)
pt2 = (xx+ww, yy+hh)
cv2.rectangle(result, pt1, pt2, (0,0,255), 1)
cv2.imshow('image', img)
cv2.imshow('template2', tmplt2)
cv2.imshow('template_mask', tmplt_mask)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('logo_hat_match2.png', result)
Match location on input:
Match Information:
correlation match score: 1.000
xmatch = 417 ymatch = 44
Without the mask, the large green area in the template would mismatch in the input and lower the match score dramatically.
I am trying to crop a face using the facial landmarks identified by dlib. The right eyebrow is causing problems - the crop goes flat across rather than follow the eyebrow arc.
What am I doing wrong here?
from imutils import face_utils
import imutils
import numpy as np
import collections
import dlib
import cv2
def face_remap(shape):
remapped_image = shape.copy()
# left eye brow
remapped_image[17] = shape[26]
remapped_image[18] = shape[25]
remapped_image[19] = shape[24]
remapped_image[20] = shape[23]
remapped_image[21] = shape[22]
# right eye brow
remapped_image[22] = shape[21]
remapped_image[23] = shape[20]
remapped_image[24] = shape[19]
remapped_image[25] = shape[18]
remapped_image[26] = shape[17]
# neatening
remapped_image[27] = shape[0]
return remapped_image
"""
MAIN CODE STARTS HERE
"""
# load the input image, resize it, and convert it to grayscale
image = cv2.imread("images/faceCM1.jpg")
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
out_face = np.zeros_like(image)
# initialize dlib's face detector (HOG-based) and then create the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(SHAPE_PREDICTOR)
# detect faces in the grayscale image
rects = detector(gray, 1)
# loop over the face detections
for (i, rect) in enumerate(rects):
"""
Determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a NumPy array
"""
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
#initialize mask array
remapped_shape = np.zeros_like(shape)
feature_mask = np.zeros((image.shape[0], image.shape[1]))
# we extract the face
remapped_shape = face_remap(shape)
cv2.fillConvexPoly(feature_mask, remapped_shape[0:27], 1)
feature_mask = feature_mask.astype(np.bool)
out_face[feature_mask] = image[feature_mask]
cv2.imshow("mask_inv", out_face)
cv2.imwrite("out_face.png", out_face)
sample image of cropped face showing the issue
Using the convex hull formed by the 68 landmarks didn't exactly achieve the desired output, so I had the following approach to this problem using scikit-image instead of OpenCV
1. Load image and predict 68 landmarks
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
img = dlib.load_rgb_image('mean.jpg')
rect = detector(img)[0]
sp = predictor(img, rect)
landmarks = np.array([[p.x, p.y] for p in sp.parts()])
2. Select the landmarks that represents the shape of the face
(I had to reverse the order of the eyebrows landmarks because the 68 landmarks aren't ordered to describe the face outline)
outline = landmarks[[*range(17), *range(26,16,-1)]]
3. Draw a polygon using these landmarks using scikit-image
Y, X = skimage.draw.polygon(outline[:,1], outline[:,0])
4. Create a canvas with zeros and use the polygon as mask to original image
cropped_img = np.zeros(img.shape, dtype=np.uint8)
cropped_img[Y, X] = img[Y, X]
For the sake of completeness, I provide below a solution using scipy.spatial.ConvexHull, if this option is still preferred
vertices = ConvexHull(landmarks).vertices
Y, X = skimage.draw.polygon(landmarks[vertices, 1], landmarks[vertices, 0])
cropped_img = np.zeros(img.shape, dtype=np.uint8)
cropped_img[Y, X] = img[Y, X]
Its because the face shape you are providing is not convex.
fillConvexPoly works perfectly on convex shapes only, In this case there is a concave corner (at point #27) and hence the results are messed up.
To fix this, modify the function as
def face_remap(shape):
remapped_image = cv2.convexHull(shape)
return remapped_image
This would give you a result which looks like.
Now you may write some more code to remove the triangular section on forehead (if you want it that way)
I am working on surf implementation in opencv using python which will detect the template in the given image. I have modified the code such that it will take video capture from the webcam connected and convert into images and then apply surf on it. Following is the modified code.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(True):
ret ,img = cap.read()
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.SURF()
kp, descritors = surf.detect(imgg,None,useProvidedKeypoints = False)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
# Now loading a template image and searching for similar keypoints
template = cv2.imread('template.png')
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys,desc = surf.detect(templateg,None,useProvidedKeypoints = False)
for h,des in enumerate(desc):
des = np.array(des,np.float32).reshape((1,128))
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imwrite('img',img)
cv2.imwrite('tm',template)
cv2.waitKey(0)
cap.release()
But the error which is coming is
knn.train(samples,responses)
TyepError: data type = 17 is not supported
Does anybody have any idea on this?
CV probably expects regular arrays but you are passing numpy arrays instead. Try this
knn.train(samples.tolist(),responses.tolist())