Decode DM code by webcam using python and pylibdmtx library - python

Here is the code where I decode an image but I don't know how to decode from the webcam.
import numpy as np
import cv2
from pylibdmtx import pylibdmtx
if __name__ == '__main__':
image = cv2.imread('new.jpg', cv2.IMREAD_UNCHANGED);
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
msg = pylibdmtx.decode(thresh)
print(msg)
Result
[Decoded(data=b'09903010917072337', rect=Rect(left=1, top=7, width=128, height=122))]

This should work with your camera. Don't forget to add pylibdmtx library
import numpy as np
import cv2
capture = cv2.VideoCapture(0)
while(True):
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
msg = pylibdmtx.decode(thresh)
print(msg)
if cv2.waitKey(1) & 0xFF == ord('q'):
# press Q in order to stop the feed
break

Related

Detect if a object is big enough open cv

I need some help here plz :)
I have this piece of code with open cv:
import cv2
import numpy as np
import time
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
low_red = np.array([100, 100, 100])
high_red = np.array([225, 225, 225])
mask = cv2.inRange(frame, low_red, high_red)
hasColor = np.sum(mask)
if hasColor > 1000000:
print(f'Hand opened, infos : {hasColor}')
elif hasColor > 500000 and hasColor < 1000000:
print(f'Hand closed, infos : {hasColor}')
cv2.imshow("Camera", frame)
cv2.imshow("Mask", mask)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
And I would like that it detect only the objects of a certain size (like a hand) and not the smaller ones.
Thanks for help :)
EDIT: Made some good progress but still don't know how to get the size
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
low = np.array([100, 100, 100])
high = np.array([225, 225, 225])
mask = cv2.inRange(frame, low, high)
cv2.imshow("Camera", frame)
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
ret, tresh = cv2.threshold(gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(tresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("Mask", mask)
cv2.imshow("Hull", frame)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
So I think I have to get the value of the bounding box but idk how
Problem solved thanks to a kind human,
I just had to add the following code in the for loop:
if h > 150 and w > 150:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
I just had to check for the size, thanks for your help !

cv2 issue for face-detection algorithm

I have a face detection program.
I tried to run the code but it's not working.
import cv2
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2)
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
if(conf<50):
if(Id==1):
Id="chandra"
elif(Id==2):
Id="vamsi"
else:
Id="Unknown"
cv2.putText(im,str(Id), (x,y+h),fontFace, 255)
cv2.imshow('im',im)
if (cv2.waitKey(10) == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
I got this error:
(-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
I'm using opencv2 and python 3.7
Maybe this will help:
try:
ret, im =cam.read()
except:
continue
Please, use this code below to check if you have any images coming from your camera:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

Using IP wecam for live streaming in OpenCV

When I run this program it uses front camera of my Android phone. But I want to have video processing from back camera. How should i do it?
import cv2
import numpy as np
import urllib
url = 'http://192.168.1.183:8080/shot.jpg'
while True:
imgResp = urllib.urlopen(url)
img = np.array(bytearray(imgResp.read()), dtype=np.uint8)
img1 = cv2.imdecode(img, -1)
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(img1, img1, mask=mask)
cv2.imshow('img',res)
cv2.waitKey(10)
import cv2
import numpy as np
import urllib
url = 'http://192.168.1.183:8080/video'
cap = cv2.VideoCapture(url)
while True:
_, img1 = cap.read()
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(img1, img1, mask=mask)
cv2.imshow('img',res)
cv2.waitKey(10)
I had passed the url into VideoCapture(). Then read the frames. It worked.
Added Code:
url = 'http://192.168.1.183:8080/video'
cap = cv2.VideoCapture(url)
(Inside while loop)
_, img1 = cap.read()

Adjusting RGB on PIL ImageGrab Module Python

I am trying to make a screencast script with ImageGrab but I am getting wired blue color images
import cv2
import numpy as np
from PIL import ImageGrab
out = cv2.VideoWriter('record.avi', 2, 8.0, (1366, 768))
while (True):
img = ImageGrab.grab(bbox=(0, 0, 1366, 768)) # x, y, w, h
img_np = np.array(img)
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
out.write(img_np)
cv2.imshow("Recorder", frame)
key = cv2.waitKey(1)
if key == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()
RGB_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
vid.write(RGB_img)
This will work.

Finding circles in a live video feed using python 2.7.11and opencv 3.0.0

I need help finding circles in a live video feed from my webcam. I just need feedback from python that a circle has or has not been detected. Also what is the best method for finding the size of the circle in pixels for better detection. My code so far
import cv2
import numpy as np
import sys
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray,5)
cimg = frame.copy()
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 200, 100, 100, 200)
if circles == 1:
print('Circle true')
else:
print('No circle')
cv2.imshow('video',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
There it is !
import cv2
import numpy as np
import sys
cap = cv2.VideoCapture(0)
while(True):
gray = cv2.medianBlur(cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY),5)
cirles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10)# ret=[[Xpos,Ypos,Radius],...]
if cirles!=None:print "Circle There !"
cv2.imshow('video',gray)
if cv2.waitKey(1)==27:# esc Key
break
cap.release()
cv2.destroyAllWindows()

Categories

Resources