Template matching with updating templates from previous detection - python

I am trying to write a code where after matching with a given template, the detected part of that frame becomes the template for the next frame.
temp = "image.png"
while True:
try:
_, frame = cap.read()
copy = frame.copy()
w,h=temp.shape[:-1]
res=cv2.matchTemplate(frame,temp,cv2.TM_CCOEFF_NORMED)
threshold=0.75
#try:
loc=np.where(res>=threshold)
print(len(loc))
for pt in zip(*loc[::-1]):
#cv2.rectangle(img,pt,(pt[0]+w,pt[1]+h),(0,255,255),2)
point = pt
cropped_image = copy[point[1]:point[1]+h, point[0]:point[0]+w]
temp = cropped_image #update the template
but after writing this code the template matching is going in totally wrong direction, even though if i remove the "temp = cropped_image" then the cropped_image is actually good.

You can find x,y,w,h of the matched image with cv2.minMaxLoc()
import cv2
src = cv2.imread("source.png", cv2.IMREAD_GRAYSCALE)
templit = cv2.imread("initial_template.png", cv2.IMREAD_GRAYSCALE)
result = cv2.matchTemplate(src, templit, cv2.TM_SQDIFF_NORMED)
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(result)
x, y = minLoc
h, w = templit.shape
cropped_img = src[y: y + h, x: x + w]
//do template matching again with cropped_image

Related

Join extracted/splitted patches to reconstruct an image

i used this code below to extract patches from a image.
extract code:
import os
import glob
from PIL import Image
Image.MAX_IMAGE_PIXELS = None # to avoid image size warning
imgdir = "/path/to/image/folder"
filelist = [f for f in glob.glob(imgdir + "**/*.png", recursive=True)]
savedir = "/path/to/image/folder/output"
start_pos = start_x, start_y = (0, 0)
cropped_image_size = w, h = (256, 256)
for file in filelist:
img = Image.open(file)
width, height = img.size
frame_num = 1
for col_i in range(0, width, w):
for row_i in range(0, height, h):
crop = img.crop((col_i, row_i, col_i + w, row_i + h))
name = os.path.basename(file)
name = os.path.splitext(name)[0]
save_to= os.path.join(savedir, name+"_{:03}.png")
crop.save(save_to.format(frame_num))
frame_num += 1
Now i want to reconstruct this imagem from all those patches extracted before, i've tried 2 diffenret codes
so my DB is 120x256x256x3 extracted patches, theres 120 patches to fit in 3840x2048 shape..:
patches = []
for directory_path in glob.glob('D:\join_exemplo'):
for img_path in glob.glob(os.path.join(directory_path, "*.png")):
img = cv2.imread(img_path,1)
patches.append(img)
input_patches = np.array(patches)
first i've tried sklearn.feature_extraction.image importing reconstruct_from_patches_2d, but got a black image:
reconstruct = reconstruct_from_patches_2d(input_patches, input_image)
reconstruct = reconstruct.astype(np.uint8)
Image.fromarray(reconstruct, 'RGB').save(r'D:\join_exemplo\re\re3.png')
also tried, this below but got a grayscale tone pattern image
input_image = (3840,2048,3)
reconstructed_arr = np.zeros(shape=(3840,2048,3))
#%%
>>> step = 256
>>> for x in range(img.shape[0]):
for y in range(img.shape[1]):
x_pos, y_pos = x * step, y * step
reconstructed_arr[x_pos:x_pos + 512, y_pos:y_pos + 512] = img[x, y, 0, ...]
>>> (input_image == reconstructed_arr).all()
True
cv2.imwrite(r'D:\join_exemplo\re\re.png',reconstructed_arr)
Can someone see whats wrong? sorry about my bad english

YUV frame averaging using opencv

I am trying to read a yuv video, then perform frame averaging, however, I run into problems when doing so, my code is shown below (this was made by combining a yuv reader and well as code to split the frame into the separate y, u,v components as well as adding what I thought was the way to proceed with this)
I get an error that the argument should be int or none not float but doesn't reference the line, but I do know this has to do with the reading the yuv file part. I am trying to average every two frames within the video. The YUV file is 10bit, 420
import cv2
import numpy as np
class readYUV:
def __init__(self, filename, size):
self.height, self.width = size
self.frame_len = self.width * self.height * 3 / 2
self.f = open(filename, 'rb')
self.shape = (int(self.height*1.5), self.width)
def read_raw(self):
try:
raw = self.f.read(self.frame_len)
yuv = np.frombuffer(raw, dtype=np.uint8)
yuv = yuv.reshape(self.shape)
except Exception as e:
print (str(e))
return False, None
return True, yuv
def read(self):
ret, yuv = self.read_raw()
if not ret:
return ret, yuv
bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR_NV21)
return ret, bgr
def make_lut_u():
return np.array([[[i, 255 - i, 0] for i in range(256)]], dtype=np.uint8)
def make_lut_v():
return np.array([[[0, 255 - i, i] for i in range(256)]], dtype=np.uint8)
# otherwise, split the frame into its respective channels
def splitter(img):
cv2.imshow("frame", img)
cv2.waitKey(30)
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
y, u, v = cv2.split(img_yuv)
lut_u, lut_v = make_lut_u(), make_lut_v()
# Convert back to BGR so we can apply the LUT and stack the images
y = cv2.cvtColor(y, cv2.COLOR_GRAY2BGR)
u = cv2.cvtColor(u, cv2.COLOR_GRAY2BGR)
v = cv2.cvtColor(v, cv2.COLOR_GRAY2BGR)
u_mapped = cv2.LUT(u, lut_u)
v_mapped = cv2.LUT(v, lut_v)
result = np.vstack([img, y, u_mapped, v_mapped])
return result
if __name__ == "__main__":
filename = "file.yuv"
size = (3840, 2160)
cap = readYUV(filename, size)
while 1:
ret, frame = cap.read()
cv2.waitKey(30)
result1 = splitter(frame)
ret2, frame2 = cap.read()
cv2.waitKey(30)
result2 = splitter(frame2)
result = (result1 +result2)/2
cv2.imwrite('average.png', result)
Thanks in advance

Unable to get the output image

i'm fairly new to python and openCV and i have been experimenting with some code that i found online. so thank you in advance for helping
although im using the imshow() function defined in opencv im unable to display the image
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import cv2
def main():
"put img into target img of size imgSize, transpose for TF and normalize gray-values"
img=cv2.imread("C:\\Users\\bnsid\\Desktop\\a01-003-00-02.png", cv2.IMREAD_GRAYSCALE)
imgSize=(128,32)
dataAugmentation = True
if img is None:
img = np.zeros([imgSize[1], imgSize[0]])
# dataaugmentation
if dataAugmentation:
stretch = (random.random() - 0.5) # -0.5 .. +0.5
wStretched = max(int(img.shape[1] * (1 + stretch)), 1) # random width, but at least 1
img = cv2.resize(img, (wStretched, img.shape[0])) # stretch horizontally by factor 0.5 .. 1.5
# create target image and copy sample image into it
(wt, ht) = imgSize
(h, w) = img.shape
fx = w / wt
fy = h / ht
f = max(fx, fy)
newSize = (max(min(wt, int(w / f)), 1), max(min(ht, int(h / f)), 1)) # scale according to f (result at least 1 and at most wt or ht)
img = cv2.resize(img, newSize)
target = np.ones([ht, wt]) * 255
target[0:newSize[1], 0:newSize[0]] = img
# transpose for TF
img = cv2.transpose(target)
# normalize
(m, s) = cv2.meanStdDev(img)
m = m[0][0]
s = s[0][0]
img = img - m
img = img / s if s>0 else img
cv2.imshow('Greyscale_Stretched', img)
k= cv2.waitKey(0) & 0xFF
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('grey.png', img)
cv2.destroyAllWindows()
Just tested your code. You need to call the main() function somewhere. Since you haven't done that, the function is not executed.
simply add main() at the end of the code, and everything works.
def main():
#your code here
print("placeholder")
main()
The main() function you have declared and defined here does not act like the main() entry function in C++. If you would like similar behavior, use this:
def function_name():
print('placeholder')
if __name__ == '__main__':
function_name() #for eg: main()

opencv segment conntected characters

I am trying to do character detect for handwriting letters.
Upon recognizing itself, I use tesseract or opencv SVM, and works fine till now.
Everything works fine for segment letters until I hit those connected ones.
I use the following code to segment letters:
# -*- coding: utf-8 -*-
import numpy as np
import cv2
# from matplotlib import pyplot as plt
from os.path import dirname, join, basename
import sys
from glob import glob
trainpic=[]
targetdir = dirname(__file__)+'tmporigin'
#print glob(join(dirname(__file__)+'/cat','*.jpg'))
img = {}
debug = True
a_num = 0
for fn in glob(join(targetdir, '*')):
filename = basename(fn)
trainpic.append(cv2.imread(fn, 0))
img_rgb = cv2.imread(fn)
img = cv2.imread(fn, 0)
image_close = cv2.morphologyEx(img_rgb, cv2.MORPH_CLOSE, np.ones((1, 7), np.uint8))
#if debug:
# cv2.imshow('morphology', image_close)
# key = cv2.waitKey(0)
_, contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
samples = np.empty((0, 100))
responses = []
# keys = [i for i in range(48, 58)]
tmp_list = []
tmpcount = 0
for cnt in contours:
print 'contourarea:%s' % cv2.contourArea(cnt)
if cv2.contourArea(cnt) > 130: # 50 300
[x, y, w, h] = cv2.boundingRect(cnt)
print 'boundingRect width:%s' % w
print 'boundingRect height:%s' % h
if h > 28:
cv2.rectangle(img_rgb, (x, y), (x+w, y+h), (0, 0, 255), 2)
roi = img[y:y+h, x:x+w]
roismall = cv2.resize(roi, (45, 55))
if debug:
cv2.imshow('norm', img_rgb)
key = cv2.waitKey(0)
# tmp_list.append(roi)
tmpfilename = fn if tmpcount == 0 else fn.rsplit('.', 1)[0] + '_' + str(tmpcount) + '.png'
cv2.imwrite(tmpfilename, roismall)
tmpcount += 1
else:
print 'contarea less, skip...'
# print img[num].shape
a_num += 1
print '%s images processed' % a_num
So, its fine to handle letters with inter space like this guy(split to D and B):
However, failed to segment connected letters like this:
I googled a lot for connected letters and find related links like these two:
enter link description here
enter link description here
I tried a lot, e.g. morphology dilate, erode, open, close, watershed .etc but didn't fix my problem.
I use opencv 3.2.0 and python 2.7.10 upon my Ubuntu desktop.
Any suggestion is great appreciated.
Thanks.
Wesley

Face Recognition with multiple faces don't detect multiple faces

I've written a small program, which detects faces and saves them to an Train file for the recognition.
I have some trouble with this algorithm. Sometimes it throws the error, that the LBPH::Train was feed with empty data, which is wrong.
OpenCV Error: Unsupported format or combination of formats (Empty training data was given. You'll need more than one sample to learn a model.) in cv::LBPH::train, file ........\opencv\modules\contrib\src\facerec.cpp, line 917
Traceback (most recent call last):
Moreover the algorithm detects multiple faces, but recognizes it just as the same face, which is wrong.
Could someone give me a hint on what I'm missing?
import cv2
import os
import numpy as np
import sys
i = 0
global allFaces
global first
first = True
allFaces = []
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
recognizer = cv2.createLBPHFaceRecognizer()
font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL,1,1,0,1)
id = 0
class Face:
def __init__(self, id, face):
self.id = id
self.face = face
self.gatheredFaces = []
def main(self):
print("main")
def getFace(self):
return self.face
def setKnownFace(self):
self.known = False
def getKownFace(self):
return self.knwon
def getId(self):
return self.id
def setFacesInfo(self, frame, face):
x,y,h,w = face
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.gatheredFaces.append(gray[y:y+h, x:x+w])
# count = 0
# while (count != 10):
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow("frame in set", frame)
# faces = faceCascade.detectMultiScale(gray)
# for face in faces:
# self.gatheredFaces.append(gray[y:y+h,x:x+w])
# cv2.imshow("gathered Faces", self.gatheredFaces[0])
# cv2.imwrite("dataSet/User"+ str(self.getId()) +".jpg", gray)
# count = count+1
# cv2.waitKey(30)
def getFacesInfo(self):
return self.gatheredFaces
def trainDetector(self):
faceSamples = []
Ids = []
print("laenge von gathered FAces")
print(len(allFaces[0].getFacesInfo()))
for (i) in range(len(allFaces)):
temp = allFaces[i].getFacesInfo()
for (j) in range(len(temp)):
imageNP = np.array(temp[j], 'uint8')
id = allFaces[i].getId()
faces = faceCascade.detectMultiScale(imageNP)
for (x,y,h,w) in faces:
faceSamples.append(imageNP)
Ids.append(id)
recognizer.train(faceSamples, np.array(Ids))
recognizer.save('recognizer/train.yml')
def updateDetector(self):
recognizer.load('recognizer/train.yml')
faceSamples = []
Ids = []
for (i) in range(len(allFaces)):
temp = allFaces[i].getFacesInfo()
for (j) in range(len(temp)):
imageNP = np.array(temp[j], 'uint8')
id = allFaces[i].getId()
faces = faceCascade.detectMultiScale(imageNP)
for (x,y,h,w) in faces:
faceSamples.append(imageNP)
Ids.append(id)
recognizer.update(faceSamples, np.array(Ids))
recognizer.save('recognizer/train.yml')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("actual Frame", frame)
cv2.imshow("gray", gray)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
print(faces)
for face in faces:
x,y,h,w = face
temp = Face(id, frame[y:y+h,x:x+w])
allFaces.append(temp)
temp = None
id = id+1
###Detector
detector = cv2.SIFT()
FLANN_INDEX_KDTREE = 0
flannParam = dict(algorithm = FLANN_INDEX_KDTREE, tree = 5)
flann = cv2.FlannBasedMatcher(flannParam,{})
trainImg = allFaces[0].getFace()
trainKP, trainDecs = detector.detectAndCompute(trainImg, None)
if((len(allFaces)==1) and first):
print("only one object in allFaces")
for i in range(10):
print(i)
allFaces[0].setFacesInfo(frame, face)
allFaces[0].trainDetector()
first = False
else:
for(i) in range(len(allFaces)):
QueryImg = cv2.cvtColor(allFaces[i].getFace(), cv2.COLOR_BGR2GRAY)
queryKP, queryDesc = detector.detectAndCompute(QueryImg, None)
matches = flann.knnMatch(queryDesc, trainDecs, k = 2)
goodMatch = []
for m, n in matches:
if(m.distance < 0.75 * n.distance):
goodMatch.append(m)
if(len(goodMatch) > 30):
print("good match")
#allFaces[i].
tp = []
qp = []
for m in goodMatch:
tp.append(trainKP[m.trainIdx].pt)
qp.append(queryKP[m.queryIdx].pt)
tp, qp = np.float32((tp, qp))
H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)
allFaces.pop(len(allFaces)-1)
break
else:
print ("bad match")
for i in range(10):
allFaces[len(allFaces)-1].setFacesInfo(frame, face)
allFaces[len(allFaces)-1].updateDetector()
cv2.waitKey(10)
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255),2)
tempid, conf = recognizer.predict(gray[y:y+h,x:x+w])
cv2.cv.PutText(cv2.cv.fromarray(frame), str(tempid),(x,y+h),font,(0,0,255))
cv2.waitKey(30)
cv2.imshow("detectedFace", frame)
cv2.waitKey(30)

Categories

Resources