Adjusting RGB on PIL ImageGrab Module Python - python

I am trying to make a screencast script with ImageGrab but I am getting wired blue color images
import cv2
import numpy as np
from PIL import ImageGrab
out = cv2.VideoWriter('record.avi', 2, 8.0, (1366, 768))
while (True):
img = ImageGrab.grab(bbox=(0, 0, 1366, 768)) # x, y, w, h
img_np = np.array(img)
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
out.write(img_np)
cv2.imshow("Recorder", frame)
key = cv2.waitKey(1)
if key == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()

RGB_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
vid.write(RGB_img)
This will work.

Related

Why isn't pytesseract recognizing this image?

It can read other images fine, it just can not read this one
enter image description here
import numpy as np
from pytesseract import pytesseract
import cv2
import numpy as np
from PIL import Image
import os
path_to_tesseract = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
pytesseract.tesseract_cmd = path_to_tesseract
img = cv2.imread(r'C:\Users\Owner\Desktop\Coding\PNGs\tugteam project\tugteam2.png')
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(thresh, img) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY)
img = cv2.bitwise_not(img)
img = cv2.resize(img, (600, 400))
cv2.imshow('asd',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
text = pytesseract.image_to_string(img)
print(text)
I tried resizing the image, converting it to grayscale b/w w/b nothing will work.

crop face from video and save as image

I am trying to crop a face everytime it appears in video but it does not seem to be creating the image file.
I have included an imshow line to see if there is an image being created from the bounding boxes and there is a face that gets cropped. Any help would be appreciated.
Code:
import numpy as np
import cv2
import os
detector= cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture("smallvid.mp4")
while(True):
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
count = 1
for (x,y,w,h) in faces:
cropped = img[ y : y+h, x : x+w ]
cv2.imwrite("cropped_face" + str(id) + ".png", cropped)
count=count+1
cv2.imshow(out_dir+str(count), cropped); # show an image of each face
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # create bounding box around face
cv2.imshow('frame',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
enter code here
You can crop just like this.
img[y:y+w, x:x+w]

cv2 issue for face-detection algorithm

I have a face detection program.
I tried to run the code but it's not working.
import cv2
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2)
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
if(conf<50):
if(Id==1):
Id="chandra"
elif(Id==2):
Id="vamsi"
else:
Id="Unknown"
cv2.putText(im,str(Id), (x,y+h),fontFace, 255)
cv2.imshow('im',im)
if (cv2.waitKey(10) == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
I got this error:
(-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
I'm using opencv2 and python 3.7
Maybe this will help:
try:
ret, im =cam.read()
except:
continue
Please, use this code below to check if you have any images coming from your camera:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

Getting TypeError: Expected cv::UMat for argument 'src' while casting bgr to rgb

I wanna cast bgr to rgb, but I'm getting "TypeError: Expected cv::UMat for argument 'src'" error
pip freeze:
greenlet==0.4.15
msgpack==0.6.1
mss==4.0.3
numpy==1.17.0
opencv-python==4.1.0.25
Pillow==6.1.0
pywin32==224
import numpy as np
import cv2
from mss import mss
from PIL import Image
from win32api import GetSystemMetrics
sct = mss()
(w, h) = (GetSystemMetrics(0) // 2, GetSystemMetrics(1) * 2 // 3)
(margin_l, margin_t) = (GetSystemMetrics(0) // 4, GetSystemMetrics(1)
// 3)
while True:
monitor = {
'top': margin_t,
'left': margin_l,
'width': w,
'height': h,
}
img = Image.frombytes('RGB', (w, h), sct.grab(monitor).rgb)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow('DEBUG', np.array(img))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) doesn't work to change "colorspace", but without this string, code works just fine, but all reds are blue.
Better way to capture screen with correct colors below
# -*- coding: utf-8 -*-
import numpy as np
from PIL import ImageGrab
import cv2
from win32api import GetSystemMetrics
bbox = (GetSystemMetrics(0)//5, GetSystemMetrics(1)//3, GetSystemMetrics(0)//1.3, GetSystemMetrics(1))
while(True):
printscreen = np.array(ImageGrab.grab(bbox=bbox))
printscreen = cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB)
cv2.imshow('window', printscreen)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
This code works for me, with adjustments:
Moved out the monitor definition outside the while.
Used the with context manager for MSS.
Removed the use of PIL (it slows down the whole process).
Removed the access to img.rgb (it slows down the whole process).
monitor = {
'top': margin_t,
'left': margin_l,
'width': w,
'height': h,
}
with mss() as sct:
while True:
# Grab it
img = np.array(sct.grab(monitor))
# Convert from BGRA to RGB
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
# Display
cv2.imshow('DEBUG', img)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break

Finding circles in a live video feed using python 2.7.11and opencv 3.0.0

I need help finding circles in a live video feed from my webcam. I just need feedback from python that a circle has or has not been detected. Also what is the best method for finding the size of the circle in pixels for better detection. My code so far
import cv2
import numpy as np
import sys
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray,5)
cimg = frame.copy()
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 200, 100, 100, 200)
if circles == 1:
print('Circle true')
else:
print('No circle')
cv2.imshow('video',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
There it is !
import cv2
import numpy as np
import sys
cap = cv2.VideoCapture(0)
while(True):
gray = cv2.medianBlur(cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY),5)
cirles=cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10)# ret=[[Xpos,Ypos,Radius],...]
if cirles!=None:print "Circle There !"
cv2.imshow('video',gray)
if cv2.waitKey(1)==27:# esc Key
break
cap.release()
cv2.destroyAllWindows()

Categories

Resources