Save images in loop with different names - python

I have a problem with saving cropped images in a loop. My code:
def run(self, image_file):
print(image_file)
cap = cv2.VideoCapture(image_file)
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
img = frame
min_h = int(max(img.shape[0] / self.min_height_dec, self.min_height_thresh))
min_w = int(max(img.shape[1] / self.min_width_dec, self.min_width_thresh))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, minNeighbors=5, minSize=(min_h, min_w))
images = []
for i, (x, y, w, h) in enumerate(faces):
images.append(self.sub_image('%s/%s-%d.jpg' % (self.tgtdir, self.basename, i + 1), img, x, y, w, h))
print('%d faces detected' % len(images))
for (x, y, w, h) in faces:
self.draw_rect(img, x, y, w, h)
# Fix in case nothing found in the image
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
cv2.imwrite(outfile, img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
return images, outfile
I have a loop for every frame with cropping on faces. The problem is that for every cropped image and picture it gives the same name, and in the end I have faces only from the last frame. How should I fix this code to save all cropped faces and images?

You are saving each file with the same name. Hence you are overwriting previous saved images
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename)
Change the line to this to add a random string in the name
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename + str(uuid.uuid4()))
You will need to import uuid at the top of your file too

You may use the datetime module to get the current time with milliseconds precision, which would avoid the name conflicts, to assign the name before saving the image as:
from datetime import datetime
outfile = '%s/%s.jpg' % (self.tgtdir, self.basename + str(datetime.now()))
cv2.imwrite(outfile, img)
You can also use other techniques such as uuid4 to get unique random id for each frame, but since the name would be random it would cumbersome on some platforms to display them in sorted orders, So I think using timestamp in name would get your job done.

import numpy as np
import cv2
cap = cv2.VideoCapture(0)
i = 0
while(True):
# Capture frame-by-frame
i = i +1
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
**cv2.imwrite("template {0}.jpg".format(i),gray)**
if cv2.waitKey(0) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
--- Code by rohbhot

i think this will helpful...
import cv2
vid = cv2.VideoCapture("video.mp4")
d = 0
ret, frame = vid.read()
while ret:
ret, frame = vid.read()
filename = "images/file_%d.jpg"%d
cv2.imwrite(filename, frame)
d+=1
this will save every frame with different name.

plt.savefig('/content/drive/MyDrive/Colab Notebooks/res_data/dimers/'+str(yname)+'_'+str(xname)+'_'+str(dimername), bbox_inches='tight' )
you can save as :
/content/drive/MyDrive/Colab Notebooks/res_data/dimers/zeta_beta_GA.png

Related

How do I add an image overlay to my live video using cv2?

This is my code, I've looked at some tutorials but can't find what I'm looking for
I want to overlay the Frame.png image on my webcam. I tried to add the image directly but it didn't work either. If possible, Is there a way to add an image, not to overlay but to keep the image at a certain coordinate in the live webcam window
import cv2
import numpy as np
def detect_and_save():
alpha = 0.2
beta = 1-alpha
cap = cv2.VideoCapture(0)
sciframe = cv2.imread('Frame.png')
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
ret ,frame = cap.read()
overlay = frame.copy()
output = frame.copy()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray,1.5,5)
cv2.putText(output, "HUD Test",(175, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 50, 50), 3)
cv2
for face in faces:
x,y,w,h = face
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,200,0),-1)
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,0,0),1)
cv2.rectangle(overlay,(x,y-20),(x+w,y),(25,20,0),-1)
cv2.addWeighted(overlay,alpha,output,beta,0,output)
cv2.putText(output,"Human",(x+10,y-10),cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
if not ret:
continue
cv2.imshow("HUD",output)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
cv2.imwrite('./images/CID_{}.png'.format(time.strftime('%d%m%y_%H_%M_%S')),output)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import time
detect_and_save()
You can directly add one image on top of another one at any coordinate easily in opencv.
cap = cv2.VideoCapture(0)
im_height = 50 #define your top image size here
im_width = 50
im = cv2.resize(cv2.imread("Frame.png"), (im_width, im_height))
while (True):
ret, frame = cap.read()
frame[0:im_width, 0:im_height] = im #for top-left corner, 0:50 and 0:50 for my image; select your region here like 200:250
cv2.imshow("live camera", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

How to capture photos with cv2.imwrite()?

cv2.imwrite() is not working. I am trying for taking 100 photos when face will be detected.
Here is the code given:
import cv2
import datetime
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
time_stamp = datetime.datetime.now().strftime("%D-%m-%Y")
file_name = f"{time_stamp}-face.jpg"
for i in range(100):
cv2.imwrite(file_name,greyImg)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break
The main bug in your code that you are looping the 100 times saving the same image.
This code is supposed to solve your issue:
import cv2
from datetime import datetime
import numpy as np
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faces_counter: int = 0
while True:
_,frame = cap.read()
greyImg = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
face = face_cascade.detectMultiScale(greyImg,1.3,5)
if np.any(face):
faces_counter += 1
if faces_counter > 100:
break
for x,y,w,h in face:
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),5)
now = datetime.now()
current_time = now.strftime("%H_%M_%S")
file_name = f"Img_{current_time}_{faces_counter}-face.png"
cv2.imwrite(f"{str(file_name)}", frame)
cv2.imshow("Face recogniton", cv2.flip(frame,1))
if cv2.waitKey(1) == ord("q"):
break

How to use the webcam to capture an image and extract the information on it using python?

I was trying to capture am image on the webcam and extract the text information on it using the language of python.
Here is the code:
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pytesseract
from PIL import Image
from pytesseract import image_to_string
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
def main():
# Use the attached camera to capture images
# 0 stands for the first one
cap = cv2.VideoCapture(0)
if cap.isOpened():
ret, frame = cap.read()
print(ret)
print(frame)
else:
ret = False
img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# img = Image.open('image.jpg')
text = pytesseract.image_to_string(img1)
print(text)
# plt.imshow(img1)
# plt.title('Color Image RGB')
# plt.xticks([])
# plt.yticks([])
# plt.show()
cap.release()
if __name__ == "__main__":
main()
The code didn't work. I watched a couple of videos on Youtube, and I saw that people typically use Image.open("image.jpg") to open an image that is located on the computer. But I need to capture the image from the webcam and extract the information on it. So that method won't work in my situation. Is there a way to combine these two methods? Like capture the image using cv2 and extract the information using pytesseract.image_to_string()?
Can you please try by replacing the below code of line,
text = pytesseract.image_to_string(img1)
With the code,
text = pytesseract.image_to_string(Image.fromarray(img1))
Or have a working code snippet here, (Copied your code and updated a little),
def main():
# Use the attached camera to capture images
# 0 stands for the first one
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
img1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
text = pytesseract.image_to_string(Image.fromarray(img1))
cv2.imshow('frame', img1)
if cv2.waitKey(0) & 0xFF == ord('q'):
return None
print("Extracted Text: ", text)
cap.release()
Hope This will help you.
I used while look because with if condtion I did not get result, trying to figure it out.
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
framewidth = 640
frameheight = 480
cap = cv2.VideoCapture(0)
cap.set(3, framewidth)
cap.set(4, frameheight)
while True:
success, img = cap.read( )
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# print(pytesseract.image_to_string(img))
## detecting characters
# hImg,wImg,_= img.shape
# boxes=pytesseract.image_to_boxes(img)
# for b in boxes.splitlines():
# # print(b)
# b=b.split(' ')
# print(b)
# x,y,w,h = int(b[1]),int(b[2]),int(b[3]),int(b[4])
# cv2.rectangle(img,(x,hImg-y),(w,hImg-h),(0,0,255),3)
# cv2.putText(img,b[0],(x,hImg-y+25),cv2.FONT_HERSHEY_COMPLEX,1,(50,100,255),2)
# ## detecting words
hImg, wImg, _ = img.shape
boxes = pytesseract.image_to_data(img)
for x, b in enumerate(boxes.splitlines( )):
if x != 0:
b = b.split( )
print(b)
if len(b)==12:
x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
cv2.rectangle(img, (x, y), (w+x, h+y), (0, 0, 255), 3)
cv2.putText(img, b[11], (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (50, 100, 255), 2)
## detecting digits
# hImg, wImg, _ = img.shape
# cong= r'--oem 3 --psm 6 outputbase digits'
# boxes = pytesseract.image_to_data(img,config=cong)
#
# for x, b in enumerate(boxes.splitlines( )):
#
# if x != 0:
# b = b.split( )
# print(b)
# if len(b) == 12:
# x, y, w, h = int(b[6]), int(b[7]), int(b[8]), int(b[9])
# cv2.rectangle(img, (x, y), (w + x, h + y), (0, 0, 255), 3)
# cv2.putText(img, b[11], (x, y), cv2.FONT_HERSHEY_COMPLEX, 1, (50, 100, 255), 2)
# reading text don't delete it
# print(pytesseract.image_to_boxes(img))
cv2.imshow("video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#I don't no how to give answer but i have my code and it's working fine enjoy

Capturing Screenshot in When when Face is detected

i Have to make a code that caputres screenshot when a face is detected in a video so that the image can be used for image recognition dataset
i made a program that captures all frames but i need to make it capture only when a face is detected
import cv2
cap = cv2.VideoCapture('test.mp4')
count = 0
while cap.isOpened():
ret,frame = cap.read()
cv2.imshow('window-name',frame)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.imwrite("frame%d.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() # destroy all the opened windows
I just tried your code and it has 1 small mistake. You don't specify the XML classifier path correctly. I fixed the path using full path where the XML file is located and works successfully.
import cv2
import numpy as np
cap = cv2.VideoCapture('test.mp4')
#cap = cv2.VideoCapture(0) # I tried using webcam and works
count = 0
while cap.isOpened():
ret,frame = cap.read()
cv2.imshow('window-name',frame)
# Below you have to insert the full path of XML file, below is mine
face_cascade = cv2.CascadeClassifier('C:/ProgramData/Anaconda2/pkgs/opencv-3.2.0-np111py27_0/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.imwrite("frame%d.jpg" % count, frame)
count = count + 1
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows() # destroy all the opened windows

track a image in video and replace with another image using opencv

I have to track a window in a video and need to paste an image on window,I have used camshift to track the window, but it did not track it correct.My window is in brown color so I have given the following color range.
np.array((110,0,0)--lower
np.array((130,255,255)--higher..
I have red many documents in opencv but not able to figure out which method to follow.I am using opencv2.4.9 with python.
Below is the code which I tried.Please help me out to figure out the exact location of window.emphasized text
#!/usr/bin/env python
import numpy as np
import cv2
cap = cv2.VideoCapture("myvideo.mp4")
# take first frame of the video
ret,frame = cap.read()
#print frame
#print ret
# setup initial location of window
r,h,c,w = 157,40,337,40
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((110,0,0)), np.array((130,255,255)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[255],[0,255])
cv2.imshow('img2',roi_hist)
#print roi_hist
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
i = 1
while(1):
ret ,frame = cap.read()
if ret == True:
i += 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,255],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
#print track_window
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(200) & 0xff
if k == 27:
break
else:
# print "comes here2";
cv2.imwrite(str(i)+"test.jpg",frame)
#break
else:
break
cv2.destroyAllWindows()
cap.release()

Categories

Resources