AttributeError: 'tuple' object has no attribute 'write' , instance segmentation python - python

I have used code of this blog "https://learnopencv.com/deep-learning-based-object-detection-and-instance-segmentation-using-mask-r-cnn-in-opencv-python-c/"
Titled Deep learning based Object Detection and Instance Segmentation using Mask R-CNN in OpenCV in python . I am using live stream and want to do object detection and instance segmentation on that and modified the code below rest is same as explained in the blog
input_path = 'rtsp://...'
cap = cv.VideoCapture(input_path)
print(cap.isOpened())
# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
#cv2.VideoWriter( filename, fourcc, fps, frameSize )
result = cv.VideoWriter('sample.avi',
cv.VideoWriter_fourcc(*'MJPG'),
22, size) ,round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
while True:
ret, frame = cap.read()
if ret:
# You can do processing on this frame variable
blob = cv.dnn.blobFromImage(frame, swapRB=True, crop=False)
# Set the input to the network
net.setInput(blob)
# Run the forward pass to get output from the output layers
boxes, masks = net.forward(['detection_out_final', 'detection_masks'])
# Extract the bounding box and mask for each of the detected objects
postprocess(boxes, masks)
# Put efficiency information.
t, _ = net.getPerfProfile()
label = 'Mask-RCNN : Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
result.write(frame.astype(np.uint8))
cv.imshow("winName", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
result.release()
cap.release()
cv.destroyAllWindows()
I am getting below error while running this
AttributeError Traceback (most recent call last)
<ipython-input-10-9712242a2634> in <module>
36 cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
37
---> 38 result.write(frame)
39
40 cv.imshow("winName", frame)
AttributeError: 'tuple' object has no attribute 'write'
How to correct this error .

Result is a tuple of length 2 whereas it should be a simple type you can change line 38 to :
result[0].write(frame.astype(np.uint8))
The value python round(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) does not seem to do anything so you can remove by it replacing lines 12-14 by:
result = cv.VideoWriter('sample.avi',
cv.VideoWriter_fourcc(*'MJPG'),
22, size)

In this line your are creating a tupple
result = cv.VideoWriter('sample.avi', cv.VideoWriter_fourcc(*'MJPG'), 22, size), round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
instead do simply
result = cv.VideoWriter('sample.avi', cv.VideoWriter_fourcc(*'MJPG'), 22, size)
variableX = round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))

Related

Can't convert object of type 'function' to 'str' for 'text

Good day everyone I'm sorry I'm new to python programming sorry if I'm asking this even this is basic or not. Someone can help me with this? The problem is I want to put the data that has been read by my Pyserial from my Arduino temperature sensor but I don't know how.
Here code for the Temperature to pyserial:
def tempe():
import serial
import time
ser = serial.Serial('COM5', 9600)
time.sleep(2)
data =[] # empty list to store the data
for i in range(50):
b = ser.readline() # read a byte string
string = b.rstrip() # remove \n and \r
temp = string <= this data here I want to show to my opencv
data.append(string) # add to the end of data list
time.sleep(0.1) # wait (sleep) 0.1 seconds
ser.close()
And here's the whole code that I want to show in my PutText on opencv:
def offrecog():
screen2.destroy() <=dont mind this
screen.destroy() <= dont mind this
def recog2(img, classifier, scaleFactor,miNeighbors, color, text, clf):
image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
features = classifier.detectMultiScale(image, scaleFactor, miNeighbors)
for (x,y,w,h) in features:
cv2.rectangle(img, (x,y),(x+w,y+h), color, 2)
id, pred = clf.predict(image [y:y+h, x:x+w])
confidence = int(100*(1-pred/300))
databases = mysql.connector.connect(
host ="localhost",
user = "userdata",
password = "",
database = "facerecog"
)
mycursor = databases.cursor()
mycursor.execute("SELECT names FROM record WHERE ids= " + str(id))
datas = mycursor.fetchone()
datas = "+".join(datas)
cursor2 = databases.cursor()
cursor2.execute("SELECT course_year FROM record WHERE ids= " + str(id))
datas1 = mycursor.fetchone()
datas1 = "+".join(datas1)
cursor3 = databases.cursor()
cursor3.execute("SELECT positions FROM record WHERE ids= " + str(id))
datas2 = mycursor.fetchone()
datas2 = "+".join(datas2)
if confidence>70:
cv2.putText(img, datas, (x,y+205), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
cv2.putText(img, datas1, (x,y+230), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
cv2.putText(img, datas2, (x,y+250), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
cv2.putText(img, tempe, (x,y+280), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
markattend(datas,datas1,datas2)
else:
cv2.putText(img, "UNKNOWN", (x,y+205), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,0,255), 1, cv2.LINE_AA)
return img
faceCascade = cv2.CascadeClassifier("C:\\Users\\So_Low\\Desktop\\final_recog\\haarcascade_frontalface_default.xml")
clf = cv2.face.LBPHFaceRecognizer_create()
clf.read("trained.xml")
video_capture = cv2.VideoCapture(0)
while True:
ret, img = video_capture.read()
img = recog2(img, faceCascade, 1.3, 4, (255,255,255), "Face", clf)
cv2.imshow("FACE RECOGNITION", img)
if cv2.waitKey(1) & 0xFF == ord('!'):
break
video_capture.release()
cv2.destroyAllWindows()
screen2.destroy()
and I got this error when I run it:
File "c:\Users\So_Low\Desktop\Offrecog\offrecog.py", line 97, in recog2
cv2.putText(img, wew, (x,y+280), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
cv2.error: OpenCV(4.5.3) :-1: error: (-5:Bad argument) in function 'putText'
> Overload resolution failed:
> - Can't convert object of type 'function' to 'str' for 'text'
> - Can't convert object of type 'function' to 'str' for 'text'
[ WARN:0] global C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-u4kjpz2z\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
[Done] exited with code=0 in 14.22 seconds
[Running] python -u "c:\Users\So_Low\Desktop\Offrecog\offrecog.py"
Exception in Tkinter callback
Traceback (most recent call last):
File "I:\Python\lib\tkinter\__init__.py", line 1883, in __call__
return self.func(*args)
File "c:\Users\So_Low\Desktop\Offrecog\offrecog.py", line 136, in login_verify
offrecog()
File "c:\Users\So_Low\Desktop\Offrecog\offrecog.py", line 113, in offrecog
img = recog2(img, faceCascade, 1.3, 4, (255,255,255), "Face", clf)
File "c:\Users\So_Low\Desktop\Offrecog\offrecog.py", line 95, in recog2
cv2.putText(img, temps, (x,y+280), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2, cv2.LINE_AA)
cv2.error: OpenCV(4.5.3) :-1: error: (-5:Bad argument) in function 'putText'
> Overload resolution failed:
> - Can't convert object of type 'module' to 'str' for 'text'
> - Can't convert object of type 'module' to 'str' for 'text'
Even If I don't put the temperature code into function It run the pyserial 1st before the opencv.
Please Help I don't know what to do. Please
presumably you want to sample your temperature 50 times and then return a single value?
def get_temp(ser, num_samples=50):
float_vals = [float(ser.readline()) for _ in range(num_samples)]
avg_val = sum(float_vals)/len(float_vals)
return str(avg_val) # convert to string for open cv
then in your opencv call use get_temp(ser) instead of tempe
where ser is a serial instance thats already open
if taking 50 samples is too slow then you can always take less samples with get_temp(ser,5) to only take 5 samples for example ... if you want the mode or median instead of the mean then i would recommend just using numpy.mode or numpy.median instead of calculating it (its probably faster to use numpy.mean than calculating the average manually)
Your function tempe doesn't seem to be returning anything.
Why don't you try adding return data at the end of the function (after ser.close()) ?
def tempe():
import serial
import time
ser = serial.Serial('COM5', 9600)
time.sleep(2)
data =[] # empty list to store the data
for i in range(50):
b = ser.readline() # read a byte string
string = b.rstrip() # remove \n and \r
temp = string <= this data here I want to show to my opencv
data.append(string) # add to the end of data list
time.sleep(0.1) # wait (sleep) 0.1 seconds
ser.close()
return data <= try adding this
You might have to note that, you can't directly put this list into the addText command since cv2 does not seem to support taking data from anything other than strings. I think you can use a for loop to iterate over tempe() using for text in tempe() and do whatever you want with it. Again, depends on your specific use case.

OpenCv imread error while trying it in real-time

code:
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from time import sleep
def get_encoded_faces():
encoded = {}
for dirpath, dnames, fname in os.walk("./faces"):
for f in fname:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded, fname
def unknown_image_encoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classify_face(im):
faces, fname = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
img = cv2.imread(im, 1)
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
return face_names, fname
cap = cv2.VideoCapture(0)
while True:
ret, image = cap.read()
recog, fname = classify_face(image)
print(recog)
cv2.imshow(fname, image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video.release()
cv2.destroyAllWindows()
Error:
Traceback (most recent call last):
File "face.py", line 70, in <module>
recog, fname = classify_face(image)
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
SystemError: <built-in function imread> returned NULL without setting an error
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wbmte9m7\opencv\modules\videoio\src\cap_msmf.cpp (435) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
The code works properly while using an image but now when I tried using it with video/real-time its throwing this error
I guess it requires the path instead of the image that is passed on to it, is there any other work around
I am trying to recognize faces in real time and the major issue with it was detecting unknown faces so when I started coding for real time I got this error.
The code and the error message don't agree. Are you running an older version of the code?
Error message:
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
Code:
img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
For debugging it may be helpful to display the received frame from the camera with code like the following:
ret, image = cap.read()
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', grey)
cv2.waitKey()
cv2.imread(im, 1) requires im to be the filename (datatype: string) of the image that you want to read.
Using cap = cv2.VideoCapture(0), you don't need to read images from files anymore, since the image that you want to classify is returned as an array from cap.read().
To fix your code for using cv2.VideoCapture, remove img = cv2.imread(im, 1) from your classify_face method and change the method definition to
def classify_face(img):
instead of
def classify_face(im):
Note, that the 0 option of cv2.VideoCapture refers to reading the live video stream from a camera with index 0.

How to explicitly access mjpeg backend for videocapture in opencv

When I execute following:
availableBackends = [cv2.videoio_registry.getBackendName(b) for b in cv2.videoio_registry.getBackends()]
print(availableBackends)
I get ['FFMPEG', 'GSTREAMER', 'INTEL_MFX', 'V4L2', 'CV_IMAGES', 'CV_MJPEG'].
If I now try:
print(cv2.CAP_FFMPEG)
print(cv2.CAP_GSTREAMER)
print(cv2.CAP_INTEL_MFX)
print(cv2.CAP_V4L2)
print(cv2.CAP_IMAGES)
print(cv2.CAP_MJPEG)
all work except the last one:
AttributeError: module 'cv2.cv2' has no attribute 'CAP_MJPEG'
How can I explicitly set cv2.CAP_MJPEG backend (cv2.CAP_CV_MJPEG does not work either)?
You can see all the flags here.
It looks like cv2.CAP_OPENCV_MJPEG is what you are looking for.
The following test creates MJPEG synthetic AVI video file, and reads the video using cv2.CAP_OPENCV_MJPEG backend:
import numpy as np
import cv2
#availableBackends = [cv2.videoio_registry.getBackendName(b) for b in cv2.videoio_registry.getBackends()]
#print(availableBackends)
print('cv2.CAP_OPENCV_MJPEG = ' + str(cv2.CAP_OPENCV_MJPEG))
intput_filename = 'vid.avi'
# Generate synthetic video files to be used as input:
###############################################################################
width, height, n_frames = 640, 480, 100 # 100 frames, resolution 640x480
# Use motion JPEG codec (for testing)
synthetic_out = cv2.VideoWriter(intput_filename, cv2.VideoWriter_fourcc(*'MJPG'), 25, (width, height))
for i in range(n_frames):
img = np.full((height, width, 3), 60, np.uint8)
cv2.putText(img, str(i+1), (width//2-100*len(str(i+1)), height//2+100), cv2.FONT_HERSHEY_DUPLEX, 10, (30, 255, 30), 20) # Green number
synthetic_out.write(img)
synthetic_out.release()
###############################################################################
# Read the video using CV_MJPEG backend
###############################################################################
cap = cv2.VideoCapture(intput_filename, cv2.CAP_OPENCV_MJPEG)
# Keep iterating break
while True:
ret, frame = cap.read() # Read frame from first video
if ret:
cv2.imshow('frame', frame) # Display frame for testing
cv2.waitKey(100) #Wait 100msec (for debugging)
else:
break
cap.release() #Release must be inside the outer loop
###############################################################################

error: C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:11111: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor

i have followed a tutorial on creating a training set to recognize a face
here is the code
import cv2
import numpy as np
# Load HAAR face classifier
face_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
# Load functions
def face_extractor(img):
# Function detects faces and returns the cropped face
# If no face detected, it returns the input image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
return None
# Crop all faces found
for (x,y,w,h) in faces:
cropped_face = img[y:y+h, x:x+w]
return cropped_face
# Initialize Webcam
cap = cv2.VideoCapture(0)
count = 0
# Collect 100 samples of your face from webcam input
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (200, 200))
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Save file in specified directory with unique name
file_name_path = r'C:\Users\madhumani\path\\' + str(count) + '.jpg'
cv2.imwrite(file_name_path, face)
# Put count on images and display live count
cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Cropper', face)
else:
print("Face not found")
pass
if cv2.waitKey(1) == 13 or count == 100: #13 is the Enter Key
break
cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete")
and whenever i tried to run this code in my jupyter notebook i have got this error
error: C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:11111: error: (-215) scn == 3 || scn == 4 in function cv::cvtColor
and tried to figure out the issue by searching this over stackoverflow and all other sites even after searching i couldnt understand the reason behind this issue
here is the commplete traceback of the error
error
Traceback (most recent call last)
<ipython-input-13-6aa561124bc3> in <module>()
30
31 ret, frame = cap.read()
---> 32 if face_extractor(frame) is not None:
33 count += 1
34 face = cv2.resize(face_extractor(frame), (200, 200))
<ipython-input-13-6aa561124bc3> in face_extractor(img)
11
12 gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
---> 13 faces = face_classifier.detectMultiScale(gray, 1.3, 5)
14
15 if faces is ():
and i have tried adding 2.4.13 v dll files in the python root directory as well as added them to the environment variable but still its of no use

OpenCV tracker: The model is not initialized in function init

On the first frame of a a video, I'm running an object detector that returns the bounding box of an object like this:
<type 'tuple'>: ((786, 1225), (726, 1217), (721, 1278), (782, 1288))
I want to pass this bounding box as the initial bounding box to the tracker. However, I get the following error:
OpenCV Error: Backtrace (The model is not initialized) in init, file /Users/jenkins/miniconda/1/x64/conda-bld/conda_1486588158526/work/opencv-3.1.0/build/opencv_contrib/modules/tracking/src/tracker.cpp, line 81
Traceback (most recent call last):
File "/Users/mw/Documents/Code/motion_tracking/motion_tracking.py", line 49, in <module>
tracker.init(frame, bbox)
cv2.error: /Users/jenkins/miniconda/1/x64/conda-bld/conda_1486588158526/work/opencv-3.1.0/build/opencv_contrib/modules/tracking/src/tracker.cpp:81: error: (-1) The model is not initialized in function init
The frame shape is 1080 x 1920 and the values I'm passing into tracker look like this:
I'm not sure if the order I'm sending the bounding box is wrong, or if I'm doing something else wrong.
tracker = cv2.Tracker_create("MIL")
init_once = False
while True:
(grabbed, frame) = camera.read()
if not grabbed:
break
symbols = scan(frame)
for symbol in symbols:
if not init_once:
bbox = (float(symbol.location[0][0]), float(symbol.location[0][1]), float(symbol.location[2][0]), float(symbol.location[2][1]))
tracker.init(frame, bbox)
init_once = True
break
# draw_symbol(symbol, frame)
_, newbox = tracker.update(frame)
if _:
top_left = (int(newbox[0]), int(newbox[1]))
bottom_right = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, top_left, bottom_right, (200, 0, 0))
cv2.imshow("asd", frame)
out.write(frame)
out.release()

Categories

Resources