def Read_img(path):
img = cv2.imread(path)
(h, w) = img.shape[:2]
WIDTH = 500
RATIO = WIDTH / float(w)
HEIGHT = int(h * RATIO) + 50
return cv2.resize(img, (WIDTH, HEIGHT))
for names in os.listdir(known_faces_dir):
print(names)
for file_name in os.listdir(f'{known_faces_dir}/{names}'):
print(file_name)
image = Read_img(f'{known_faces_dir}/{names}')
Here is the code my Read_img function is the problem it returns an error when i run the image variable
here is the error
File "Facial_Rec.py", line 23, in <module>
image = Read_img(f'{known_faces_dir}/{names}')
File "Facial_Rec.py", line 12, in Read_img`enter code here`
(h, w) = img.shape[:2]
AttributeError: 'NoneType' object has no attribute 'shape'
PLEASE HELP!
when you try to open an image but the path is not correct, OpenCV does not tell you that it could not find the image, it returns None. So when you want to check the shape you get an error
if the type is None just continue and pass to the next loop
if type(img) is None:
continue
Related
import cv2
import numpy as np
from math import ceil
import os
dst = "C:\OpencvPython\frames\slide" # Images destination
images = os.listdir(dst) # Get their names in a list
length = len(images)
result = np.zeros((360,360,3), np.uint8) # Image window
of size (360, 360)
i = 1
a = 1.0 # alpha
b = 0.0 # beta
img = cv2.imread(dst + images[i])
img = cv2.resize(img, (360, 360))
# Slide Show Loop
while(True):
if(ceil(a)==0):
a = 1.0
b = 0.0
i = (i+1)%length # Getting new image from directory
img = cv2.imread(dst + images[i])
img = cv2.resize(img, (360, 360))
a -= 0.01
b += 0.01
# Image Transition from one to another
result = cv2.addWeighted(result, a, img, b, 0)
cv2.imshow("Slide Show", result)
key = cv2.waitKey(1) & 0xff
if key==ord('q'):
break
cv2.destroyAllWindows()
[ WARN:0#0.007] global D:\a\opencv-python\opencv-python\opencv\modules\imgcodecs\src\loadsave.cpp (239) cv::findDecoder imread_('C:/OpencvPython/frames/slidedownload (2).jpg'): can't open/read file: check file path/integrity
cv2.error: OpenCV(4.6.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\resize.cpp:4052: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
The issue is with the path, you can replace img = cv2.imread(dst + images[i]) with
img = cv2.imread(os.path.join(dst, images[i]))
Using + operator will directly add 2nd sting to the base path, for example if dst = "C:\OpencvPython\frames\slide" and images[i] = "img1.png" then dst + images[i] would become "C:\OpencvPython\frames\slideimg1.png" hence leading to file not found error while os.path.join will generate correct path.
I've run into issues with win32gui when trying to grab a real-time video stream of an application. I've seen I can use ImageGrab from PIL and based on this video Computer Screen Recording using Python & OpenCV I think I can use it instead of win32gui
I'm trying to learn python by writing a bot, the below code should grab images from a specified folder, load them into an array, converts them into a format OpenCV can use and then attempts to find any or all of them on my application window haystack
I can't find any details on google of the error I'm getting:
C:\Users\coyle\OneDrive\froggy-pirate-master\avoidShips>C:/Users/coyle/AppData/Local/Programs/Python/Python39/python.exe c:/Users/coyle/OneDrive/froggy-pirate-master/avoidShips/avoidships4.py
Traceback (most recent call last):
File "c:\Users\coyle\OneDrive\froggy-pirate-master\avoidShips\avoidships4.py", line 41, in <module>
loadImages()
File "c:\Users\coyle\OneDrive\froggy-pirate-master\avoidShips\avoidships4.py", line 22, in loadImages
return matchTemplate(image_list)
File "c:\Users\coyle\OneDrive\froggy-pirate-master\avoidShips\avoidships4.py", line 32, in matchTemplate
result = cv.matchTemplate(haystack, needle_img, cv.TM_CCOEFF_NORMED)
cv2.error: OpenCV(4.5.1) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wvn_it83\opencv\modules\imgproc\src\templmatch.cpp:588: error: (-215:Assertion failed) corr.rows <= img.rows + templ.rows - 1 && corr.cols <= img.cols + templ.cols - 1 in function 'cv::crossCorr'
And my code:
def loadImages():
# Intialise empty array
image_list = []
# Get list of all images in directory
directory = glob.glob(r"C:\Users\*.png")
# Add images to image_list
for img in directory:
ship_img = cv.imread(img, 0)
image_list.append(ship_img)
return matchTemplate(image_list)
def matchTemplate(image_list):
# Video Loop
while True:
haystack_img = ImageGrab.grab()
haystack_img_np = np.array(haystack_img)
haystack = cv.cvtColor(haystack_img_np, cv.COLOR_BGR2GRAY)
# Object Detection
for ships in image_list:
needle_img = cv.imread(str(image_list), cv.IMREAD_UNCHANGED)
result = cv.matchTemplate(haystack, needle_img, cv.TM_CCOEFF_NORMED)
cv.imshow('Result', haystack)
if cv.waitKey(1) == 27:
break
cv.destroyAllWindows()
loadImages()
matchTemplate()
As a test, I've tried doing the same thing using static images and it works so I'm not sure where I'm going wrong.
import cv2 as cv
import glob
# load source images
directory = glob.glob(r'C:\Users\*.jpg')
# empty list to store the source images
image_list = []
for img in directory:
ships_img = cv.imread(img, 0)
image_list.append(ships_img)
haystack_img = cv.imread(r'C:\Users\both.jpg')
haystack_img = cv.cvtColor(haystack_img, cv.COLOR_BGR2GRAY)
#loop for matching
for ships in image_list:
#save the dimensions of the needle images
(H, W) = ships.shape[:2]
result = cv.matchTemplate(haystack_img, ships, cv.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
top_left = max_loc
bottom_right = (top_left[0] + W, top_left[1] + H)
cv.rectangle(haystack_img, top_left, bottom_right, 255, 2)
cv.imshow('Result', haystack_img)
cv.waitKey(0)
I can't test it but you simply try to load image which you already have in memory
You have
needle_img = cv.imread(str(image_list), cv.IMREAD_UNCHANGED)
but image_list has already loaded image, not filenames.
Besides imread() needs sinlge filename but you try use it with some list converted to string.
You should use directly
needle_img = ships
I think it should be
def loadImages():
# Intialise empty array
image_list = []
# Get list of all images in directory
directory = glob.glob(r"C:\Users\*.png")
# Add images to image_list
for img in directory:
ship_img = cv.imread(img, 0) # <-- here you load all images
image_list.append(ship_img)
return image_list # I preferr to send back data instead of running `matchTemplate`
def matchTemplate(image_list):
# Video Loop
while True:
haystack_img = ImageGrab.grab()
haystack_img_np = np.array(haystack_img)
haystack = cv.cvtColor(haystack_img_np, cv.COLOR_BGR2GRAY)
# Object Detection
for ships in image_list:
# you don't have to load images because you already have them in `image_list`
#needle_img = cv.imread(str(image_list), cv.IMREAD_UNCHANGED)
needle_img = ships
result = cv.matchTemplate(haystack, needle_img, cv.TM_CCOEFF_NORMED)
cv.imshow('Result', haystack)
if cv.waitKey(1) == 27:
break
cv.destroyAllWindows()
# --- main ---
image_list = loadImages()
matchTemplate(image_list)
BTW:
In normal open(), read() you get error if it has problem to open or read file but in OpenCV imread() does't raise error when it can't load image but it gives None - but you don't check if you get None - and you don't know that there was problem to load it - and later when you try to use this value in next command (matchTemplate) then it shows error. But real problem was with imread()
When I am processing a bunch of images, on one of them I get this error
File "/home/tensorflowpython/firstmodel/yololoss.py", line 153, in data_generator
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
File "/home/tensorflowpython/firstmodel/yololoss.py", line 226, in get_random_data
image = image.resize((nw,nh), Image.BICUBIC)
File "/home/tensorflowpython/kenv/lib/python3.6/site-packages/PIL/Image.py", line 1858, in resize
self.load()
File "/home/tensorflowpython/kenv/lib/python3.6/site-packages/PIL/ImageFile.py", line 247, in load
"(%d bytes not processed)" % len(b)
OSError: image file is truncated (25 bytes not processed)
I have already tried the solution suggested here but it doesn't work
my code looks like this
from PIL import Image
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
Image.LOAD_TRUNCATED_IMAGES = True
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
try:
image.load()
except IOError:
pass # You can always log it to logger
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC) #error occurs here
The difference between my error and the previous solution is, mine says OS error and the solution is for IO error
EDIT: I have figured out the image that is causing this error, it can be downloaded from this link
I tried the solution that you linked with the truncated image and it worked. You made a slight mistake when trying to apply this solution: you have to set ImageFile.LOAD_TRUNCATED_IMAGES=True, not Image.LOAD_TRUNCATED_IMAGES.
LOAD_TRUNCATED_IMAGES does not originally exist in Image module, so when you do Image.LOAD_TRUNCATED_IMAGES=True you set a new variable which is not used by the library.
So I think you juste have to do that:
from PIL import ImageFile, Image
ImageFile.LOAD_TRUNCATED_IMAGES = True
image = Image.open("00090.jpg")
# resize now doesn't fail
image.resize((h, w), Image.BICUBIC)
I'm creating a function (in Python) that expects/receives a single image of multiple human faces in it, and returns multiple smaller images (one image per human face). I am able to do a cv2.imshow inside the function and see the expected smaller images, but when I attempt a cv2.imshow from outside the function, it does not work (unable to see the smaller image, and get a TypeError instead). Would appreciate some guidance.
def stills(user_image):
#sub_frames = []
fqp_image_src = (user_image)
raw_pic = cv2.imread(fqp_image_src)
mpic = cv2.resize(raw_pic,(0,0), fx=0.30, fy=0.30)
mpic_rgb = cv2.cvtColor(mpic, cv2.COLOR_BGR2RGB)
face_boxes = haar_cascade_face.detectMultiScale(mpic_rgb, scaleFactor = 1.2, minNeighbors = 5)
count = int(len(face_boxes))
for i in range(count):
face_box = face_boxes[i]
final = cv2.rectangle(mpic, (face_box[0], face_box[1]), ((face_box[0]+face_box[2]),(face_box[1]+face_box[3])), (0,255,0),2)
sub_frame = final[face_box[1]:(face_box[1]+face_box[3]), face_box[0]:(face_box[0]+face_box[2])]
#sub_frames.append(sub_frame)
cv2.imshow('frame', sub_frame) # this works
cv2.waitKey()
return (sub_frame, final)
# calling the function
something = stills("abc.jpg")
cv2.imshow('frame',something) # this does not work
cv2.waitKey()
TypeError: Expected cv::UMat for argument 'mat'
This will do what you expected, just whit some simplification and with full file paths
.
One of the key erros was give detectMultiScale a colored image, the imput shuld have 1 dimension, with brigtness (gray scales).
In order to display a colored image with the faces in a box a copy of the image is needed to convert into gar scales and detect, giving coordenates to draw in the colored image.
import cv2
import os
# Take as a global the dir in witch is this file
PATH = os.path.dirname(os.path.abspath(__file__))
haar_cascade_face = cv2.CascadeClassifier(os.path.join(PATH, 'haarcascade_frontalface_alt.xml'))
def stills(user_image):
image = os.path.join(PATH, user_image)
image = cv2.imread(image)
image = cv2.resize(image, (0, 0), fx=0.30, fy=0.30)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_boxes = haar_cascade_face.detectMultiScale(gray_image, scaleFactor=1.073, minNeighbors=8)
final = image # make the funtion alwais give a image
sub_frames = []
# Check if there are faces
if len(face_boxes) > 0:
for x, y, w, h in face_boxes:
final = cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_frame = image[y:y+h, x:x+w]
sub_frames.append([x, y, x+w, y+h])
cv2.imshow('sub_frame', sub_frame)
# cv2.waitKey() # No need to wait the user
else:
print('No faces found')
return (sub_frames, final)
if __name__ == '__main__':
fragments, final = stills("abc.jpg")
cv2.imshow('frame', final)
cv2.waitKey()
Why does image.getdata() return different result second time I invoke it?
from PIL import Image
def write():
image = Image.open('image.jpg')
newimage = Image.new(image.mode, image.size)
pixels = [p for p in image.getdata()]
for i in range(100):
pixels[i] = (255,255,255)
newimage.putdata(pixels)
newimage.save('newimage.jpg')
print(list(newimage.getdata())[0:10])
def read():
image = Image.open('newimage.jpg')
pixels = [p for p in image.getdata()]
print(list(image.getdata())[0:10])
write()
read()
It gives me the following result:
Why is second set of data differ from the first one?