Have to reset resolution between each picture with PiCamera - python

I am using Picamera library in Python in combination ith the new HQ camera and I found that if I want to
use the image port and take pictures in a loop, I must set the resolution again and again in the loop. Is this normal behaviour? It seems stupid to have to set it over and over (I checked the call that setting the resolution makes and it's not a very fast one).
Here is the code I am using:
from picamera.array import PiRGBArray
from picamera.camera import PiCamera
with PiCamera() as camera:
with PiRGBArray(camera) as output:
#camera.resolution = (1280, 720) #THIS DOES NOT WORK
while not self._stop_camera_stream:
camera.resolution = (1280, 720) #BUT THIS DOES
camera.capture(output, 'rgb')
print('Captured %dx%d image' % (output.array.shape[1], output.array.shape[0]))
#The line beginning here are for a preview inside a PyQt5 Window
image = output.array
h, w, ch = image.shape
bytesPerLine = ch*w
convertedToQtFormat = QImage(image, w, h, bytesPerLine, QImage.Format_RGB888)
p = convertedToQtFormat.scaled(640,480,Qt.KeepAspectRatio)
self.changePixmap.emit(p)
output.truncate(0)

Related

The video I recorded via webcam with opencv does not open

i tried to make a webcam video recording to a file using openCV python, i could not open the file with any of my video players. here is the code,
it works fine but I stop the recording and looking the file and it doesn't open. I guess there are some codec issues. I tried also (*'XVID') .avi format. but changed nothing.
here is the code
please help
from tkinter import *
from PIL import ImageTk, Image
import cv2
import threading
root = Tk()
root.geometry("750x500")
root.configure(bg="#0059aa")
#camera
camera_frame = LabelFrame(root, text=u"KAMERA STREAMING",
border=2,
width=398,
height=265)
camera_frame.place(x=183,y=33)
camera_label = Label(camera_frame,width=55,height=14)
camera_label.grid(row=0,column=0)
global capture
capture = cv2.VideoCapture(0)
# edit: close following two lines
# capture.set(3,250)
# capture.set(4,225)
global out
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('blabla.mp4', fourcc, 20.0, (640, 480))
global stopCam
stopCam = False
def show_frames():
global capture
# read the capture
ret, frame = capture.read()
# turned into image and display
cv2image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
height, width, channels = cv2image.shape
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image = img)
camera_label.imgtk = imgtk
camera_label.configure(image=imgtk,width=width,height=height)
# record
global out
out.write(frame)
# quit
if (stopCam):
out.release()
capture.release()
cv2.destroyAllWindows()
return
camera_label.after(20,show_frames)
p1 = threading.Thread(target=show_frames)
buttonLabel = Label(camera_frame)
buttonLabel.grid(row=1,column=0)
connectButton = Button (buttonLabel, text=u"connect", command=p1.start, width=14)
connectButton.grid(row=0,column=0)
stopButton = Button(buttonLabel, text=u"stop", command= lambda: globals().update(stopCam=True) , width=14)
stopButton.grid(row=0,column=1)
root.mainloop()
edit (also solved way):
I looked at some code that worked properly. and I saw capture.set() as the difference. When I close the capture.set() lines, I had no problems with either streaming or recording. Now the main problem is that I have to show the video in a label with a certain size. Without set() the video size gets too big. how can i solve it now?

OpenCV Video Capture not working for Android | Kivy, Buildozer

I am building an App which got several video filters. But the App is not working on Android cause => capture = cv.VideoCapture(0) doesnt get access to the android camera.
The Code below shows how i edit the Video footage of the App at the moment. On PC it works perfect. But after building it, it shows no
video footage on android
Buidlozer Spec got permission btw..
capture = cv.VideoCapture(0)
class BinaryCam(Image):
def on_kv_post(self, base_widget):
#self.capture = cv.VideoCapture(0)
# cv.namedWindow("CV2 Image")
Clock.schedule_interval(self.update, 1.0 / 33.0)
def update(self, dt):
# display image from cam in opencv window
ret, frame = capture.read()
if ret==True:
# cv.imshow("CV2 Image", frame)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# convert it to texture
adaptive_thresh = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 3)
buf1 = cv.flip(adaptive_thresh, 0)
buf = buf1.tobytes()
texture1 = Texture.create(size=(adaptive_thresh.shape[1], adaptive_thresh.shape[0]),
colorfmt='luminance') # in grayscale gibts kein bgr
# if working on RASPBERRY PI, use colorfmt='rgba' here instead, but stick with "bgr" in blit_buffer.
texture1.blit_buffer(buf, colorfmt='luminance', bufferfmt='ubyte') # replacing texture
# display image from the texture
self.texture = texture1
Or maybe is there a way to get the Camera Frames from the Kivy Camera and edit them with OpenCV.
But i really would prefer this method i showed here with OpenCV.
Thank you

[ERROR]libpng warning: image width is zero in ihdr using OpenCV and Python

I'm working on a small code, where a GPIO-trigger will take pictures with 3 webcams on a RPi3 with opencv.
Here is my code :
import numpy as np
import cv2
import datetime
import RPi.GPIO as GPIO
import os
import os.path
from time import gmtime, strftime
import time
#ini GPIO to get the trigger for cooling pause
GPIO_trigger=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_trigger, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#Create a list with all pluged-in Cameras
usblist=[0,1,2]
PATH="/home/pi/opencv-3.4.3/python_project/"
def csnip(channel):
#Number of frames to throw away while the camera adjusts to light levels
ramp_frames = 30
for Kn in usblist:
#set name for Video with camera name and Timestamp
Kn=str(Kn)
time_stamp = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
Kameran="KRASP"
name=str(time_stamp) + "KRASP" + Kn
directory = str(PATH + "KRASP" +Kn)
if not os.path.exists(directory):
os.makedirs(directory)
camera_port = Kn
#Number of discarded frames
ramp_frames = 30
camera = cv2.VideoCapture(camera_port)
def make_1080p():
camera.set(3, 1920)
camera.set(4, 1080)
def make_720p():
camera.set(3, 1280)
camera.set(4, 720)
def make_480p():
camera.set(3, 640)
camera.set(4, 480)
def change_res(width, height):
camera.set(3, width)
camera.set(4, height)
make_720p()
#change_res(1280, 720)
# Captures a single image from the camera
def get_image():
retval, im = camera.read()
return im
for i in range(ramp_frames):
temp = get_image()
# Take the actual image we want to keep
camera_capture = get_image()
file = PATH+"KRASP"+Kn+"/"+name+".png"
print("Saving pic...")
cv2.imwrite(file, camera_capture)
del(camera)
time.sleep(2)
#When a falling trigger in detected on the GPIO, uses callback function and sleeps for 2s
GPIO.add_event_detect(GPIO_trigger, GPIO.FALLING, callback=csnip, bouncetime=2000)
try:
while True:
time.sleep(0.01)
except KeyboardInterrupt:
GPIO.cleanup() # clean up GPIO on CTRL+C exit
GPIO.cleanup() # clean up GPIO on normal exit
The code runs fine but i cannot open the picture. i get the following console output:
Saving pic...
libpng warning: Image width is zero in IHDR
libpng warning: Image height is zero in IHDR
libpng error: Invalid IHDR data
What is really wierd is that i have a similar code that is working perfectly, with the same functions ect ..
import numpy as np
import cv2
import datetime
#import RPi.GPIO as GPIO
import os
import os.path
from time import gmtime, strftime
#ini GPIO to get the trigger for cooling pause
#GPIO_trigger=7
#GPIO.setmode(GPIO.BOARD)
#GPIO.setup(GPIO_trigger, GPIO.IN)
#
Camera=int(input("How many Cameras are plugged in RPi?"))
i=0
#Create a list with all pluged-in Cameras
usblist=[]
PATH="/home/pi/opencv/KameraPi/"
def set_name(Kn):
#set name for Video with camera name and Timestamp
Kn=str(Kn)
time_stamp = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
Kameran="KRASP"
name=time_stamp + Kameran + Kn
return name
def img_save(set_name):
# Camera number Kn
camera_port = Kn
#Number of discarded frames
ramp_frames = 30
camera = cv2.VideoCapture(camera_port)
def make_1080p():
camera.set(3, 1920)
camera.set(4, 1080)
def make_720p():
camera.set(3, 1280)
camera.set(4, 720)
def make_480p():
camera.set(3, 640)
camera.set(4, 480)
def change_res(width, height):
camera.set(3, width)
camera.set(4, height)
make_720p()
#change_res(1280, 720)
# Captures a single image from the camera
def get_image():
retval, im = camera.read()
return im
for i in range(ramp_frames):
temp = get_image()
# Take the actual image we want to keep
camera_capture = get_image()
file = str(PATH+usblist[Kn]+"/"+set_name+".png")
print("Saving pic...")
cv2.imwrite(file, camera_capture)
del(camera)
#Add Cameras to the list
while i < Camera:
usbname="KRASP"+str(i)
usblist.append(usbname)
i=i+1
listlength=len(usblist)
#Creat a folder for each Camera in the list if it doesn't exist already
for usb in usblist:
directory = str(PATH + usb)
if not os.path.exists(directory):
os.makedirs(directory)
#Temporary solution: Film and save video for given Camera (max 4 Camera)
while True:
print(usblist)
mchoice = int(input ("Take a picture with what Camera? "))
if mchoice == '':
break
else:
mchoice = int(float(mchoice))
if mchoice==0:
Kn=mchoice
img_save(set_name(Kn))
elif mchoice==1:
Kn=mchoice
img_save(set_name(Kn))
elif mchoice==2:
Kn=mchoice
img_save(set_name(Kn))
elif mchoice==3:
Kn=mchoice
img_save(set_name(Kn))
The last code runs on a user input but it should do the same... and no error on this one.
Does someone have a clue what is wrong with the triggered one?
Thank you

stream from thermal camera (ip camera) on python , connection error

i want to stream from a thermal camera, usually it export its frames as gray scale frames
the thermal camera is an IP camera , i tried different codes and package but with no output.
when i change the code a little bit to view from a USB camera it works normally, so any help please.
this is the code i have tried :
import sys
sys.path.append('C:\Python27\Lib\site-packages')
import Tkinter as tk
import cv2
from PIL import Image, ImageTk
i=0
window = tk.Tk()
window.title('thermal image')
var = tk.IntVar()
width, height = 800, 600
cap = cv2.VideoCapture(0)
cap.open("http://169.254.110.119/")
left_label = tk.Label(window)
left_label.pack(side="left")
right_label = tk.Label(window)
right_label.pack(side="right")
def show_frame():
_, frame = cap.read()
print frame
if frame != None:
frame = cv2.flip(frame, 1)
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
left_label.imgtk = imgtk
left_label.configure(image=imgtk)
left_label.after(10, show_frame)
show_frame()
window.mainloop()
I think that the image from the websitre is not being grabbed in the code, what worked for me was
img_requested = requests.get(url)
img_arr = np.array(bytearray(img_requested.content), dtype=np.uint8)
frame = cv2.imdecode(img_arr, -1)
And there you would get the frame (color picures/video).Keep in mind that you need to import requests and numpy as np.
It is important if you are using IP Webcam that you do not forget to write the '/shot.jpg'
at the end of the url, like this: 'http://190.160.0.0:8080/shot.jpg', so that it effectivelly grabs the image.

Capture video data from screen in Python

Is there a way with Python (maybe with OpenCV or PIL) to continuously grab frames of all or a portion of the screen, at least at 15 fps or more? I've seen it done in other languages, so in theory it should be possible.
I do not need to save the image data to a file. I actually just want it to output an array containing the raw RGB data (like in a numpy array or something) since I'm going to just take it and send it to a large LED display (probably after re-sizing it).
With all of the above solutions, I was unable to get a usable frame rate until I modified my code in the following way:
import numpy as np
import cv2
from mss import mss
from PIL import Image
bounding_box = {'top': 100, 'left': 0, 'width': 400, 'height': 300}
sct = mss()
while True:
sct_img = sct.grab(bounding_box)
cv2.imshow('screen', np.array(sct_img))
if (cv2.waitKey(1) & 0xFF) == ord('q'):
cv2.destroyAllWindows()
break
With this solution, I easily get 20+ frames/second.
For reference, check this link: OpenCV/Numpy example with mss
There is an other solution with mss which provide much better frame rate. (Tested on a Macbook Pro with MacOS Sierra)
import numpy as np
import cv2
from mss import mss
from PIL import Image
mon = {'left': 160, 'top': 160, 'width': 200, 'height': 200}
with mss() as sct:
while True:
screenShot = sct.grab(mon)
img = Image.frombytes(
'RGB',
(screenShot.width, screenShot.height),
screenShot.rgb,
)
cv2.imshow('test', np.array(img))
if cv2.waitKey(33) & 0xFF in (
ord('q'),
27,
):
break
You will need to use ImageGrab from Pillow (PIL) Library and convert the capture to numpy array. When you have the array you can do what you please with it using opencv. I converted capture to gray color and used imshow() as a demonstration.
Here is a quick code to get you started:
from PIL import ImageGrab
import numpy as np
import cv2
img = ImageGrab.grab(bbox=(100,10,400,780)) #bbox specifies specific region (bbox= x,y,width,height *starts top-left)
img_np = np.array(img) #this is the array obtained from conversion
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
cv2.imshow("test", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
you can plug an array there with the frequency you please to keep capturing frames. After that you just decode the frames. don't forget to add before the loop:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
vid = cv2.VideoWriter('output.avi', fourcc, 6, (640,480))
and inside the loop you can add:
vid.write(frame) #the edited frame or the original img_np as you please
UPDATE
the end result look something like this (If you want to achieve a stream of frames that is. Storing as video just a demonstration of using opencv on the screen captured):
from PIL import ImageGrab
import numpy as np
import cv2
while(True):
img = ImageGrab.grab(bbox=(100,10,400,780)) #bbox specifies specific region (bbox= x,y,width,height)
img_np = np.array(img)
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
cv2.imshow("test", frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
Hope that helps
based on this post and others posts, i made something like this .
Its taking a screenshot and writing into a video file without saving the img.
import cv2
import numpy as np
import os
import pyautogui
output = "video.avi"
img = pyautogui.screenshot()
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
#get info from img
height, width, channels = img.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output, fourcc, 20.0, (width, height))
while(True):
try:
img = pyautogui.screenshot()
image = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
out.write(image)
StopIteration(0.5)
except KeyboardInterrupt:
break
out.release()
cv2.destroyAllWindows()
You can try this code as it is working for me. I've tested it on Linux
import numpy as np
import cv2
from mss import mss
from PIL import Image
sct = mss()
while 1:
w, h = 800, 640
monitor = {'top': 0, 'left': 0, 'width': w, 'height': h}
img = Image.frombytes('RGB', (w,h), sct.grab(monitor).rgb)
cv2.imshow('test', cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Make sure that the following packages are installed:
Pillow, opencv-python, numpy, mss
You can try this=>
import mss
import numpy
with mss.mss() as sct:
monitor = {'top': 40, 'left': 0, 'width': 800, 'height': 640}
img = numpy.array(sct.grab(monitor))
print(img)
I tried all of the above but it did not give me the real-time screen update.
You can try this. This code is tested and worked successfully and also give you a good fps output. You can also judge this by each loop time it's needed.
import numpy as np
import cv2
from PIL import ImageGrab as ig
import time
last_time = time.time()
while(True):
screen = ig.grab(bbox=(50,50,800,640))
print('Loop took {} seconds',format(time.time()-last_time))
cv2.imshow("test", np.array(screen))
last_time = time.time()
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
If anyone looking for a much easier and fastest way to grab screen as frame in python, then try ScreenGear API from my high-performance video-processing vidgear library in just few lines of python code on any machine (Tested on all platforms, including Windows 10, MacOS Serra, Linux Mint) and enjoy threaded screen-casting:
Note: It also supports multiple backends and screens out-of-the box.
# import required libraries
from vidgear.gears import ScreenGear
import cv2
# define dimensions of screen w.r.t to given monitor to be captured
options = {'top': 40, 'left': 0, 'width': 100, 'height': 100}
# open video stream with defined parameters
stream = ScreenGear(logging=True, **options).start()
# loop over
while True:
# read frames from stream
frame = stream.read()
# check for frame if Nonetype
if frame is None:
break
# {do something with the frame here}
# Show output window
cv2.imshow("Output Frame", frame)
# check for 'q' key if pressed
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# close output window
cv2.destroyAllWindows()
# safely close video stream
stream.stop()
VidGear library Docs: https://abhitronix.github.io/vidgear
ScreenGear API: https://abhitronix.github.io/vidgear/latest/gears/screengear/overview/
More examples: https://abhitronix.github.io/vidgear/latest/gears/screengear/usage/
I've tried ImageGrab from PIL and it gave me 20fps which is ok but using win32 libraries gave me +40fps which is amazing!
I used this code by Frannecklp but it didn't work just fine so I needed to modify it:
-Firstly pip install pywin32 in case using the libraries
-import the libraries like this instead:
import cv2
import numpy as np
from win32 import win32gui
from pythonwin import win32ui
from win32.lib import win32con
from win32 import win32api
for geting a simple image screen do:
from grab_screen import grab_screen
import cv2
img = grab_screen()
cv2.imshow('frame',img)
and for getting frames:
while(True):
#frame = grab_screen((0,0,100,100))
frame = grab_screen()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q') or x>150:
break
This task is very simple with opencv, we are just capturing screenshots in loop, and converting it into frames. I created timer for screenrecording, in start you have to enter how many seconds you want to record:) Here is the code.
import cv2
import numpy as np
import pyautogui
from win32api import GetSystemMetrics
import time
#Take resolution from system automatically
w = GetSystemMetrics(0)
h = GetSystemMetrics(1)
SCREEN_SIZE = (w,h)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
out = cv2.VideoWriter("recording.mp4", fourcc, 20.0, (SCREEN_SIZE))
tim = time.time()
tp = int(input('How many times you want to record screen?->(Define value in Seconds): '))
tp = tp+tp
f = tim+tp
while True:
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
tu = time.time()
if tu>f:
break
cv2.destroyAllWindows()
out.release()
So that's how you can use time in screen recording, you don't need to use imshow() because it shows infinitely our screen recording on-screen so output video looks weird.

Categories

Resources