Have a face_recognition code and trying to change the BGR of some images. Running the code with this line: python3 encode_faces.py --dataset dataset --encodings encodings.pickle. There is a way to bypass the error from below:
OpenCV(3.4.1) Error: Assertion failed (scn == 3 || scn == 4) in cvtColor, file /tmp/opencv-20180529-55469-97fkx6/opencv-3.4.1/modules/imgproc/src/color.cpp, line 11115
Traceback (most recent call last):
File "encode_faces.py", line 38, in <module>
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.error: OpenCV(3.4.1) /tmp/opencv-20180529-55469-97fkx6/opencv-3.4.1/modules/imgproc/src/color.cpp:11115: error: (-215) scn == 3 || scn == 4 in function cvtColor
This is my source code:
# import the necessary packages
#asa s ruleaza
# python3 encode_faces.py --dataset dataset --encodings encodings.pickle
from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--dataset", required=True,
help="path to input directory of faces + images")
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from RGB (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input image
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
# add each encoding + name to our set of known names and
# encodings
knownEncodings.append(encoding)
knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open(args["encodings"], "wb")
f.write(pickle.dumps(data))
f.close()
Print(image.shape) error=
[INFO] quantifying faces...
[INFO] processing image 1/1401
libpng warning: iCCP: known incorrect sRGB profile
(1080, 1920, 3)
[INFO] processing image 2/1401
Traceback (most recent call last):
File "encode_faces.py", line 38, in <module>
print(image.shape)
AttributeError: 'NoneType' object has no attribute 'shape'
Seems that there was an error with the photos. They had to be recalibrated. DId run another script to save the photos in order to create a new dataset. It worked 2nd time.
Related
After getting my facial capture component to work (I can gather photos that will be used for the dataset) I am trying to run a trainer.py file that will train the module but I am having some issues with my code. I am new to all of this so someone who is experienced with OpenCV, face rec and python might be able to help me a bit more.
Here is my code.
https://pastebin.com/uSX0Z8qa
import cv2
import numpy as np
from PIL import Image
import os
# Path for face image database
path = "dataset"
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
# function to get the images and label data
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces, ids = getImagesAndLabels(path)
recognizer.train(faces, np.array(ids))
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi
# Prints the number of faces trained and end program
print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
And here is the error I am getting after running it
[INFO] Training faces. It will take a few seconds. Wait ... Traceback (most recent call last): File "c:\Users\Ben\Desktop\Code\OpenCV\trainer\trainer.py", line 34, in <module> faces, ids = getImagesAndLabels(path) File "c:\Users\Ben\Desktop\Code\OpenCV\trainer\trainer.py", line 25, in getImagesAndLabels faces = detector.detectMultiScale(img_numpy) tion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
import imgaug.augmenters as iaa
import cv2
import glob
from tkinter import Frame
from tkinter import Text
from tkinter import Label
# 1. Load Dataset
images = []
images_path = glob.glob("images/*.jpg")
for img_path in images_path:
img = cv2.imread(img_path)
images.append(img)
# 2. Image Augmentation
augmentation = iaa.Sequential([
# 1. Flip
iaa.Fliplr(0.5),
iaa.Flipud(0.5),
# 2. Affine
iaa.Affine(translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-30, 30),
scale=(0.5, 1.5)),
# 3. Multiply
iaa.Multiply((0.8, 1.2)),
# 4. Linearcontrast
iaa.LinearContrast((0.6, 1.4)),
# Perform methods below only sometimes
iaa.Sometimes(0.5,
# 5. GaussianBlur
iaa.GaussianBlur((0.0, 3.0))
)
])
# 3. Show Images
counter = 0
while True:
augmented_images = augmentation(images=images)
for img in augmented_images:
counter += 1
cv2.imwrite(str(counter) + ".jpg", frame)
cv2.imwrite('Desktop/images/dog.jpg', img) # desired save location
cv2.waitKey(0)
"Traceback (most recent call last):
File "C:/Users/MC/PycharmProjects/pythonProject/Python_Augmentation.py", line 48, in
cv2.imwrite(str(counter) + ".jpg", frame)
NameError: name 'frame' is not defined"
The image augmentation code ran well, but when I tried to save the augmented images this error occurred. I also used capital 'F' instead of 'f' (frame), but I got another error.
"Traceback (most recent call last):
File "C:/Users/MC/PycharmProjects/pythonProject/Python_Augmentation.py", line 49, in
cv2.imwrite(str(counter) + ".jpg", Frame)
cv2.error: OpenCV(4.5.4-dev) :-1: error: (-5:Bad argument) in function 'imwrite'
Overload resolution failed:
img is not a numpy array, neither a scalar
Expected Ptr<cv::UMat> for argument 'img'"
Any type of help is appreciatable. Thanks in advance.
The import was:
from tkinter import Frame
but you using, in line 48: frame (lower 'f').
I am trying to use SVG image files to train a model in Python. When I run the below code in Jupyter notebook, I am getting below error. Also Image.open() does not work here. Is there a way to get this code working for .svg image files?
from PIL import Image
drawing = svg2rlg("/content/America_Online_logo.svg")
base_url = "https://upload.wikimedia.org/wikipedia/commons/0/09/America_Online_logo.svg"
base_image_path = tf.keras.utils.get_file(fname = "America_Online_logo.svg", origin =
base_url,)
Image.open(base_image_path)
a = plt.imread(base_image_path)
from keras.optimizers import gradient_descent_v2
width, height = keras.preprocessing.image.load_img(base_image_path).size
Getting following error:
UnidentifiedImageError Traceback (most recent call last)
`<ipython-input-39-a31080608fad> in <module>()`
from keras.optimizers import gradient_descent_v2
width, height = keras.preprocessing.image.load_img(base_image_path).size
img_nrows = 400
img_ncols = int(width * img_nrows / height)
2 frames
/usr/local/lib/python3.7/dist-packages/PIL/Image.py in open(fp, mode)
warnings.warn(message)
raise UnidentifiedImageError( "cannot identify image file %r" % (filename if filename else
fp))
import tensorflow as tf
`import os`
`import numpy as np`
`import os, glob, cv2`
import os.path
import sys, argparse
dir_path= os.path.dirname(os.path.realpath("cat2.jpeg"))
image_path = sys.argv[1]
filename = os.path.join(dir_path, image_path)
image_size = 128
num_channels = 3
images = []
image = cv2.imread(filename)
image = cv2.resize(image, (image_size, image_size), cv2.INTER_LINEAR)
...
error Traceback (most recent call last)
<ipython-input-41-c0a159fd3a64> in <module>()
1 # resizing the image
----> 2 image = cv2.resize(image, (image_size, image_size), cv2.INTER_LINEAR)
3 images.append(image)
4 images = np.array(images, dtype= np.uint8)
5 images = images.astype('float32')
error: /home/travis/miniconda/conda-bld/conda_1485299292920/work/opencv-3.2.0/modules/imgproc/src/imgwarp.cpp:3492: error: (-215) ssize.width > 0 && ssize.height > 0 in function resize
I've looked at other similar posts and tried to debug this error. Can anyone point out how to get rid of this error by looking at the above code? Is this some internal problem or there is some issue with the code?
from PIL import Image
band2 = Image.open('band2.tif')
band3 = Image.open('band3.tif')
band4 = Image.open('band4.tif')
img = Image.merge("RGB",(band4,band3,band2))
the band2.tif,band3.tif,band4.tif are downloaded in USGS(https://earthexplorer.usgs.gov/).
they may have some differents compared to the normal .TIF
the error information is
/usr/bin/python3.5 /home/lixingang/workspace/20170405/main.py
Traceback (most recent call last):
File "/home/lixingang/workspace/20170405/main.py", line 5, in <module>
img = Image.merge("RGB",(band4,band3,band2))
File "/usr/lib/python3/dist-packages/PIL/Image.py", line 2388, in merge
raise ValueError("mode mismatch")
ValueError: mode mismatch
Process finished with exit code 1
You need to convert each channel into a luminosity channel. So instead of this:
band2 = Image.open('band2.tif')
You need do this:
band2 = Image.open('band2.tif').convert('L')
The same as other channels, for merge the order should also be considered.