Using IP wecam for live streaming in OpenCV - python

When I run this program it uses front camera of my Android phone. But I want to have video processing from back camera. How should i do it?
import cv2
import numpy as np
import urllib
url = 'http://192.168.1.183:8080/shot.jpg'
while True:
imgResp = urllib.urlopen(url)
img = np.array(bytearray(imgResp.read()), dtype=np.uint8)
img1 = cv2.imdecode(img, -1)
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(img1, img1, mask=mask)
cv2.imshow('img',res)
cv2.waitKey(10)

import cv2
import numpy as np
import urllib
url = 'http://192.168.1.183:8080/video'
cap = cv2.VideoCapture(url)
while True:
_, img1 = cap.read()
hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
lower_red = np.array([30,150,50])
upper_red = np.array([255,255,180])
mask = cv2.inRange(hsv, lower_red, upper_red)
res = cv2.bitwise_and(img1, img1, mask=mask)
cv2.imshow('img',res)
cv2.waitKey(10)
I had passed the url into VideoCapture(). Then read the frames. It worked.
Added Code:
url = 'http://192.168.1.183:8080/video'
cap = cv2.VideoCapture(url)
(Inside while loop)
_, img1 = cap.read()

Related

Color enhancement using PIL: How to compute color value?

I am using these functions to modify the values of brightness, contrast and sharpness. My default values are 128, 24, 4 respectively. I extracted these values. I don't know how to extract the value of the color to modify it in the same way.
import numpy as np
import cv2
import matplotlib.pyplot as plt
from PIL import Image, ImageEnhance
def brightness_enhancer(img, br):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
enhancer = ImageEnhance.Brightness(img)
factor = 128/br
return enhancer.enhance(factor)
def contrast_enhancer(img, ct):
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#img = Image.fromarray(img)
enhancer = ImageEnhance.Contrast(img)
factor = 24/ct
return enhancer.enhance(factor)
def sharpness_enhancer(img, sh):
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#img = Image.fromarray(img)
enhancer = ImageEnhance.Sharpness(img)
factor = 4/sh
return enhancer.enhance(factor)
def color_enhancer(img):
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#img = Image.fromarray(img)
enhancer = ImageEnhance.Color(img)
factor = 0.5 #enhancement of the image
return enhancer.enhance(factor)

Unable to count objects in image using opencv python

I used below code for find cigarettes count in the below image using opencv python, but its not worked. Only this code finding some places only. i don't know what is the issue.. please help me
import numpy as np
import cv2
from PIL import Image
import sys
Path='D:\Artificial intelligence\Phyton'
filename='Test.png'
img = cv2.imread('D:\Artificial intelligence\Phyton\Test.png')
img1 = cv2.imread('D:\Artificial intelligence\Phyton\Test.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY)
img[thresh == 255] = 0
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
erosion = cv2.erode(img, kernel, iterations = 1)
cv2.imwrite('D:\Artificial intelligence\Phyton\Test112.png',erosion)
def findcircles(img,contours):
minArea = 300;
minCircleRatio = 0.5;
for contour in contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
if radius > 5:
continue;
cv2.circle(img1, center, 1, (191, 255, 0), 2)
cv2.imwrite('D:\Artificial intelligence\Phyton\Test11234.png',img1)
img = cv2.imread("D:\Artificial intelligence\Phyton\Test112.png")
cv2.imwrite('D:\Artificial intelligence\Phyton\org.png',img)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,threshold = cv2.threshold(gray, 199, 255,cv2.THRESH_BINARY_INV)
cv2.imwrite('D:\Artificial intelligence\Phyton\threshold.png',threshold)
blur = cv2.medianBlur(gray,7)
cv2.imwrite('D:\Artificial intelligence\Phyton\blur.png',blur)
laplacian=cv2.Laplacian(blur,-1,ksize = 5,delta = -50)
cv2.imwrite('D:\Artificial intelligence\Phyton\laplacian.png',laplacian)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
dilation = cv2.dilate(laplacian,kernel,iterations = 1)
cv2.imwrite('D:\Artificial intelligence\Phyton\dilation.png',dilation)
result= cv2.subtract(threshold,dilation)
cv2.imwrite('D:\Artificial intelligence\Phyton\result.png',result)
contours, hierarchy = cv2.findContours(result,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
findcircles(gray,contours)
Image :
enter image description here
My result:
enter image description here

Read text below barcode pytesseract python

I am trying to get the number below a barcode in an image. I have tried the same code with some other images and works fine but not for that image
Here's the image
And here is the code till now
def readNumber():
image = cv2.imread(sTemp)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
invert = 255 - opening
data = pytesseract.image_to_string(invert, lang='eng', config='--psm 6 -c tessedit_char_whitelist=0123456789')
print(data)
try:
data = re.findall('(\d{9})\D', data)[0]
except:
data = ''
return data
And I used it using this line
readNumber()
Here's another example
This is the last example I promise
I tried this with the third example and it works
img = cv2.imread("thisimage.png")
blur = cv2.GaussianBlur(img, (3,3), 0)
#gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
txt = pytesseract.image_to_string(blur)
print(txt)
But how I adopt all the cases to work with the three cases?
I tried such a code but couldn't implement the thrid case
import pytesseract, cv2, re
def readNumber(img):
img = cv2.imread(img)
gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
try:
txt = pytesseract.image_to_string(gry)
#txt = re.findall('(\d{9})\D', txt)[0]
except:
thr = cv2.adaptiveThreshold(gry, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 51, 4)
txt = pytesseract.image_to_string(thr, config="digits")
#txt = re.findall('(\d{9})\D', txt)[0]
return txt
# M5Pr5 191876320
# RWgrP 202131290
# 6pVH4 193832560
print(readNumber('M5Pr5.png'))
You don't need any preprocessing methods or configuration for the input image. Since there is no artifacts in the image.
import cv2
import pytesseract
img = cv2.imread("RWgrP.png")
gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
txt = pytesseract.image_to_string(gry)
print(txt)
Result:
202131290
My pytesseract version is 4.1.1
Update-1
The second image requires preprocessing
If you apply adaptive-thresholding:
But the output also consists of unwanted characters. Therefore if you set the configuration to digits, the result will be:
193832560
Update-2
For the third image, you need to change the adaptive method, using ADAPTIVE_THRESH_MEAN_C will result in:
191876320
The rest are same.
Code:
import cv2
import pytesseract
img = cv2.imread("6pVH4.png")
gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thr = cv2.adaptiveThreshold(gry, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 51, 4)
txt = pytesseract.image_to_string(thr, config="digits")
print(txt)
cv2.imshow("thr", thr)
cv2.waitKey(0)

Decode DM code by webcam using python and pylibdmtx library

Here is the code where I decode an image but I don't know how to decode from the webcam.
import numpy as np
import cv2
from pylibdmtx import pylibdmtx
if __name__ == '__main__':
image = cv2.imread('new.jpg', cv2.IMREAD_UNCHANGED);
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
msg = pylibdmtx.decode(thresh)
print(msg)
Result
[Decoded(data=b'09903010917072337', rect=Rect(left=1, top=7, width=128, height=122))]
This should work with your camera. Don't forget to add pylibdmtx library
import numpy as np
import cv2
capture = cv2.VideoCapture(0)
while(True):
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
msg = pylibdmtx.decode(thresh)
print(msg)
if cv2.waitKey(1) & 0xFF == ord('q'):
# press Q in order to stop the feed
break

Reading a video file VideoCapture

Why is it if i read a video file using cv2.VideoCapture('video.avi') the fps is very very very low? I want the fps to be the same as when i play the video using a video player(30fps). What changes should i make to achieve this? Btw, i am using a raspberry pi 3 with python.
import cv2
import numpy as np
cap = cv2.VideoCapture('Fchecking.avi')
kernel = np.ones((5,5), np.uint8)
while(1):
# Take each frame
ret, frame = cap.read()
img2gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,mask = cv2.threshold(img2gray,140,255,cv2.THRESH_BINARY)
w3w = cv2.adaptiveThreshold(mask,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,47,2)
cv2.imshow("mask", mask)
mask_inv = cv2.bitwise_not(w3w)
img2_fg = cv2.bitwise_and(frame, frame, mask=mask_inv)
hsv = cv2.cvtColor(img2_fg, cv2.COLOR_BGR2HSV)
lower_red= np.array([0,58,130])
upper_red = np.array([255,255,255])
erosion = cv2.erode(mask,kernel,iterations = 3)
rmask = cv2.inRange(hsv, lower_red, upper_red)
mask2 = cv2.morphologyEx(rmask, cv2.MORPH_CLOSE, kernel)
final = cv2.bitwise_and(frame, frame, mask=mask2)
cv2.imshow('final',final)
cv2.imshow('original',frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
Why is it if i read a video file using cv2.VideoCapture('video.avi') the fps is very very very low?
The answer to this question is the massive chunk of code that you have between reading the frame and showing it to the screen. When you watch the video in a standard video player it doesnt do this processing.
img2gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret,mask = cv2.threshold(img2gray,140,255,cv2.THRESH_BINARY)
w3w = cv2.adaptiveThreshold(mask,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,47,2)
cv2.imshow("mask", mask)
mask_inv = cv2.bitwise_not(w3w)
img2_fg = cv2.bitwise_and(frame, frame, mask=mask_inv)
hsv = cv2.cvtColor(img2_fg, cv2.COLOR_BGR2HSV)
lower_red= np.array([0,58,130])
upper_red = np.array([255,255,255])
erosion = cv2.erode(mask,kernel,iterations = 3)
rmask = cv2.inRange(hsv, lower_red, upper_red)
mask2 = cv2.morphologyEx(rmask, cv2.MORPH_CLOSE, kernel)
final = cv2.bitwise_and(frame, frame, mask=mask2)
What changes should i make to achieve this?
It's likely that:
If you reduce the processing it will speed up
If you increase the processing power it will speed up

Categories

Resources