Python Harvesters Image Acquisition GigeCam - python

I tried get image from my gige camera. In the camera's own software its working just fine, but when I do it with harvesters my image has a weird grid and I don't know why is it there and how to remove it. I need this for a stereovision project. Any idea?
Don't mind the brightness I tried it with higher expo as well, it did not changed a thing. :D
enter image description here
import genicam.genapi as ge
import cv2
from harvesters.core import Harvester
import matplotlib.pyplot as plt
import numpy as np
# Create a Harvester object:
h = Harvester()
# Load a GenTL Producer; you can load many more if you want to:
h.add_file("C:/Program Files\MATRIX VISION/mvIMPACT Acquire/bin/x64/mvGenTLProducer.cti")
# Enumerate the available devices that GenTL Producers can handle:
h.update()
# Select a target device and create an ImageAcquire object that
# controls the device:
ia = h.create(0)
ia2 = h.create(1)
# Configure the target device; it looks very small but this is just
# for demonstration:
ia.remote_device.node_map.Width.value = 1456
ia.remote_device.node_map.Height.value = 1088
# ia.remote_device.node_map.PixelFormat.symbolics
ia.remote_device.node_map.PixelFormat.value = 'BayerRG8'
ia2.remote_device.node_map.Width.value = 1456
ia2.remote_device.node_map.Height.value = 1088
# ia2.remote_device.node_map.PixelFormat.symbolics
ia2.remote_device.node_map.PixelFormat.value = 'BayerRG8'
ia.remote_device.node_map.ChunkSelector.value = 'ExposureTime'
ia.remote_device.node_map.ExposureTime.set_value(100000.0)
ia2.remote_device.node_map.ChunkSelector.value = 'ExposureTime'
ia2.remote_device.node_map.ExposureTime.set_value(100000.0)
# Allow the ImageAcquire object to start image acquisition:
ia.start()
ia2.start()
# We are going to fetch a buffer filled up with an image:
# Note that you'll have to queue the buffer back to the
# ImageAcquire object once you consumed the buffer; the
# with statement takes care of it on behalf of you:
while True:
with ia.fetch() as buffer:
component = buffer.payload.components[0]
_2d = component.data.reshape(1088, 1456)
img = _2d
img = cv2.resize(img,(640,480))
cv2.imshow('right',img)
cv2.imwrite('test_left.png',img)
cv2.waitKey(10)
with ia2.fetch() as buffer:
component = buffer.payload.components[0]
_2d = component.data.reshape(component.height, component.width)
img2 = _2d
img2 = cv2.resize(img2, (640, 480))
cv2.imshow('left', img2)
cv2.imwrite('test_right.png',img2)
cv2.waitKey(10)
ia.stop()
ia2.stop()
ia.destroy()
ia2.destroy()
h.reset()

I just had to convert it to Gray or RGB with cvtColor, and its working.
Thanks anyway.

Related

Raspberry Pi CSI camera to USB webcam in Python

I'm trying to use an example from
https://github.com/ageitgey/face_recognition
for face detection on Raspberry Pi.
This is the 'facerec_on_raspberry_pi.py' code:
# This is a demo of running face recognition on a Raspberry Pi.
# This program will print out the names of anyone it recognizes to the console.
# To run this, you need a Raspberry Pi 2 (or greater) with face_recognition and
# the picamera[array] module installed.
# You can follow this installation instructions to get your RPi set up:
# https://gist.github.com/ageitgey/1ac8dbe8572f3f533df6269dab35df65
import face_recognition
import picamera
import numpy as np
# Get a reference to the Raspberry Pi camera.
# If this fails, make sure you have a camera connected to the RPi and that you
# enabled your camera in raspi-config and rebooted first.
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
# Load a sample picture and learn how to recognize it.
print("Loading known face image(s)")
obama_image = face_recognition.load_image_file("obama_small.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Initialize some variables
face_locations = []
face_encodings = []
while True:
print("Capturing image.")
# Grab a single frame of video from the RPi camera as a numpy array
camera.capture(output, format="rgb")
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(output)
print("Found {} faces in image.".format(len(face_locations)))
face_encodings = face_recognition.face_encodings(output, face_locations)
# Loop over each face found in the frame to see if it's someone we know.
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces([obama_face_encoding], face_encoding)
name = "<Unknown Person>"
if match[0]:
name = "Barack Obama"
print("I see someone named {}!".format(name))
Code is working fine with CSI camera, but how can I change to work with USB webcam?
Thanks
You can use opencv API directly.
Instead of creating a PiCamera object like:
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
Try:
import cv2
camera = cv2.VideoCapture(0) # 0 is the first camera
Then, when reading an image, replace:
camera.capture(output, format="rgb")
With:
_, output = camera.read() # To read the image in the camera native resolution
output = cv2.resize(output, (320, 240)) #To get the image in the same size as expected

Trying to encrypt video frames with RSA; getting garbage instead of original data after decrypting

I am writing a script to encrypt and decrypt video using RSA algo in python. Now I have extracted the frames from the video and encrypt each image individually and then combining the images to create a video. Then I am reading the frames of the encrypted video again and when I am applying the decryption key I am not getting back the original image. But when I am applying the same key on any image with which the video is made I am getting back the original image. let us say we have image1 which is encrypted and will be used to make the encrypted video when I am applying the key on this image I am getting back the original image. now I have image2 which is read from the encrypted video and if the keys are applied then it is giving a more encrypted image. Heres the code :
import cv2
import numpy
import os
import imageio
import time
from tkinter.filedialog import askopenfilename
from tkinter.ttk import *
from tkinter import *
from tkinter import filedialog
from tqdm import tqdm
from tkinter import messagebox
import subprocess
def load_image_decrypt(folder):
videofile = 'envid.avi'
try:
if not os.path.exists('Dedata'):
os.makedirs('Dedata')
except OSError:
messagebox.showinfo('Error Occured', 'Error: Creating directory of decrypted data')
vid_to_image(videofile)
for filename1 in tqdm(os.listdir(folder)):
imgV = imageio.imread(os.path.join(folder, filename1), format='PNG-FI')
if imgV is not None:
RGBdecryption(imgV, filename1)
else:
break
vidname = 'devid.avi'
image_to_vid(dedata2, vidname)
messagebox.showinfo('Finish!', 'Decryption Done succesfully!')
def RGBdecryption(img, filename):
img1 = img
img = img.astype(numpy.uint16)
img1= img1.tolist()
for i1 in tqdm(range(len(img1))):
for j1 in (range(len(img1[i1]))):
for k1 in (range(len(img1[i1][j1]))):
x1 = img1[i1][j1][k1]
x1 = pow(x1,16971,25777)
img1[i1][j1][k1] = x1
img1 = numpy.array(img1).astype(numpy.uint16)
name = './Dedata/'+str(filename)
imageio.imwrite(name, img1, format='PNG-FI')
def vid_to_image(filename):
# Playing video from file:
cap = cv2.VideoCapture(filename)
try:
if not os.path.exists('data'):
os.makedirs('data')
messagebox.showinfo('Info!', 'Data directory is created where the frames are stored')
except OSError:
print ('Error: Creating directory of data')
currentFrame = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
# Saves image of the current frame in jpg file
name = './data/frame' + str(currentFrame) + '.png'
print ('Creating...' + name)
imageio.imwrite(name, frame,format='PNG-FI')
# To stop duplicate images
currentFrame += 1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def image_to_vid(folder, vidname): #the code which is creating a video out of images stored in the folder
image_folder = folder
video_name = vidname
sort_image = []
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
print(images)
print('\n\n')
for i in range(0,1000):
for j in range(len(images)):
name = 'frame' + str(i) + '.png'
if ((str(images[j])) == str(name)):
sort_image.append(images[j])
print(sort_image)
frame = cv2.imread(os.path.join(image_folder, sort_image[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 29, (width,height)) #29 is the fs of the original video and I don't know what the 0 is for
for image in sort_image:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
data = './data'
load_image_decrypt(data)
I do not know where I am getting it wrong. I am new to opencv and video processing. Any help will be appreciated. Thank you.
Video frames are subject to lossy compression. So you cannot feed a codec some binary data under the guise of images, encode it and expect to get exactly the same binary data back when playing the resulting video.
Your best bet is to encrypt the video file as a whole as per Encryption of video files? or How can I Encrypt Video in Real Time?. It will need to be decrypted to be playable; this is apparently what OSX's "content protection" does, encrypting and decrypting data transparently.
A (paywalled) IEEE article Video Encryption Based on OpenCV - IEEE Conference Publication says they applied an Arnold Transform to image data. It is a transposition cipher and as such, can be broken. Its main strength seems to rather be that it makes content unintelligible in regular playback, and it preserves image characteristics critical for video codecs (lighting, frame differences) and doesn't require the exact ciphertext for decryption, so it's not damaged beyond repair by the lossy compression.

Asynchronous task in AWS Lambda

I am currently working on a python project for a DeepLens camera on AWS. I try to save images, put them in a list and in parallel add them to AWS S3.
When the image capture and sends it to S3 work sequentially, everything works. It's when I try to run both functions asynchronously it doesn't work anymore.
Indeed, only one of the two threads starts: I capture images but I can't send them to S3.
Do you think it is necessary to use another library than the threads or that the error comes from my code.
Thank you.
#*****************************************************
# *
# Copyright 2018 Amazon.com, Inc. or its affiliates. *
# All Rights Reserved. *
# *
#*****************************************************
""" A sample lambda for face detection"""
from threading import Thread, Event, Timer
import os
import json
import numpy as np
import awscam
import cv2
import greengrasssdk
import boto3
from botocore.session import Session
import time
# ----------------------------------- Function ------------------------------------
liste_of_frame = []
def write_image_to_s3(img):
session = Session()
s3 = session.create_client('s3')
file_name = 'DeepLens/image-'+time.strftime("%Y%m%d-%H%M%S")+'.jpg'
# You can contorl the size and quality of the image
encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),100]
_, jpg_data = cv2.imencode('.jpg', img, encode_param)
response = s3.put_object(ACL='public-read', Body=jpg_data.tostring(),Bucket='deeplens-sagemaker-f2e',Key=file_name)
image_url = 'https://s3.amazonaws.com/deeplens-sagemaker-f2e/'+file_name
return image_url
def upload_image():
client.publish(topic=iot_topic, payload='upload_image')
if len(liste_of_frame) > 0:
for i,img in enumerate(liste_of_frame):
write_image_to_s3(img)
try:
liste_of_frame.pop(i)
except:
pass
return liste_of_frame
def capture_img(model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width):
# client.publish(topic=iot_topic, payload='inside function')
ret, frame = awscam.getLastFrame()
if not ret:
raise Exception('Failed to get frame from the stream')
# Resize frame to the same size as the training set.
frame_resize = cv2.resize(frame, (input_height, input_width))
# Run the images through the inference engine and parse the results using
# the parser API, note it is possible to get the output of doInference
# and do the parsing manually, but since it is a ssd model,
# a simple API is provided.
parsed_inference_results = model.parseResult(model_type,
model.doInference(frame_resize))
# Compute the scale in order to draw bounding boxes on the full resolution
# image.
yscale = float(frame.shape[0]/input_height)
xscale = float(frame.shape[1]/input_width)
# Dictionary to be filled with labels and probabilities for MQTT
cloud_output = {}
# Get the detected faces and probabilities
for obj in parsed_inference_results[model_type]:
if obj['prob'] > detection_threshold:
cloud_output[output_map[obj['label']]] = obj['prob']
# client.publish(topic=iot_topic, payload='Ajout a la liste')
liste_of_frame.append(frame)
# Set the next frame in the local display stream.
local_display.set_frame_data(frame)
# Send results to the cloud
# client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
def init_greengrass():
# This face detection model is implemented as single shot detector (ssd).
model_type = 'ssd'
output_map = {1: 'face'}
# Create an IoT client for sending to messages to the cloud.
client = greengrasssdk.client('iot-data')
iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
# Create a local display instance that will dump the image bytes to a FIFO
# file that the image can be rendered locally.
local_display = LocalDisplay('480p')
local_display.start()
# The sample projects come with optimized artifacts, hence only the artifact
# path is required.
model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
# Load the model onto the GPU.
client.publish(topic=iot_topic, payload='Loading face detection model')
model = awscam.Model(model_path, {'GPU': 1})
client.publish(topic=iot_topic, payload='Face detection model loaded')
# Set the threshold for detection
detection_threshold = 0.5
# The height and width of the training set images
input_height = 300
input_width = 300
return model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width
# --------------------------------------- End Function -----------------------------------
class LocalDisplay(Thread):
""" Class for facilitating the local display of inference results
(as images). The class is designed to run on its own thread. In
particular the class dumps the inference results into a FIFO
located in the tmp directory (which lambda has access to). The
results can be rendered using mplayer by typing:
mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
"""
def __init__(self, resolution):
""" resolution - Desired resolution of the project stream """
# Initialize the base class, so that the object can run on its own
# thread.
super(LocalDisplay, self).__init__()
# List of valid resolutions
RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}
if resolution not in RESOLUTION:
raise Exception("Invalid resolution")
self.resolution = RESOLUTION[resolution]
# Initialize the default image to be a white canvas. Clients
# will update the image when ready.
self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]
self.stop_request = Event()
def run(self):
""" Overridden method that continually dumps images to the desired
FIFO file.
"""
# Path to the FIFO file. The lambda only has permissions to the tmp
# directory. Pointing to a FIFO file in another directory
# will cause the lambda to crash.
result_path = '/tmp/results.mjpeg'
# Create the FIFO file if it doesn't exist.
if not os.path.exists(result_path):
os.mkfifo(result_path)
# This call will block until a consumer is available
with open(result_path, 'w') as fifo_file:
while not self.stop_request.isSet():
try:
# Write the data to the FIFO file. This call will block
# meaning the code will come to a halt here until a consumer
# is available.
fifo_file.write(self.frame.tobytes())
except IOError:
continue
def set_frame_data(self, frame):
""" Method updates the image data. This currently encodes the
numpy array to jpg but can be modified to support other encodings.
frame - Numpy array containing the image data tof the next frame
in the project stream.
"""
ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))
if not ret:
raise Exception('Failed to set frame data')
self.frame = jpeg
def join(self):
self.stop_request.set()
def greengrass_infinite_infer_run():
""" Entry point of the lambda function"""
try:
model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width = init_greengrass()
# Do inference until the lambda is killed.
while True:
t1 = Thread(target = capture_img, args=[model_type, output_map, client, iot_topic, local_display, model_path, model, detection_threshold, input_height, input_width])
t2 = Thread(target = upload_image)
t1.start()
t2.start()
t1.join()
t2.join()
except Exception as ex:
client.publish(topic=iot_topic, payload='Error in face detection lambda: {}'.format(ex))
greengrass_infinite_infer_run()

RuntimeError: Expected writable numpy.ndarray with shape set

I try to load some images into an RDD and use the face recognition library(https://github.com/ageitgey/face_recognition) to compare different images. Following code work
import face_recognition
import numpy as np
from io import BytesIO
from PIL import Image
from pyspark import SparkContext
sc = SparkContext(appName="LoadingImage")
images = sc.binaryFiles("./images/")
image_to_array = lambda rawdata: np.asarray(Image.open(BytesIO(rawdata)))
i_arr = images.values().map(image_to_array)
new_encoding = face_recognition.face_encodings(i_arr.first())
next_encoding = face_recognition.face_encodings(i_arr.first())
result = face_recognition.compare_faces([new_encoding[0]], next_encoding[0])
print(result)
However, when I try to map face_encodings function to all the elements inside the RDD, it always gives me an error:
RuntimeError: Expected writable numpy.ndarray with shape set.
img_to_encodings = lambda img: face_recognition.face_encodings(img)[0]
i_arrm = i_arr.map(img_to_encodings)
result = face_recognition.compare_faces([i_arrm.first()], i_arrm.first())
print(result)
The error is from dlib library, but I reckon I did something wrong with spark. Any idea how to solve this?
The frame returned by picamera has flag set to false i.e Writable : False.
Set frame flag to true so that face_recognition package can use it. Code Snippet:
image.setflags(write=True)
For Demo Code Have A Look:
#import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
#initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
#allow the camera to warmup
time.sleep(0.1)
#capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
image.setflags(write=True)
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the q key was pressed, break from the loop
if key == ord("q"):
break
cv2.destroyAllWindows()

Palette method using numpy

I am trying to apply the numpy palette method to an opencv processed video (references: this question and this tutorial ). I aim at replacing all frame pixels of a certain color range by another. The code below is an example that replaces black with green. Unfotunately, my code raises an error on line:
image[np.where((image==[0,0,0]).all(axis=2))]=green
the error: exceptions.ValueError:axis(=2) out of bounds
I am running python 2.7 with PyScripter, and I find it odd because the code did work before, and I did not make any major modification to it. Can someone please help me? I am quite stuck on this one...
My code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##IMPORTS
import cv2.cv as cv
import numpy as np
import time
##VARIABLES
#colors
green=[0,255,0]
##MAIN
#video file input
frames = raw_input('Please input video file:')
if not frames:
print "This program requires a file as input!"
sys.exit(1)
#create window
cv.NamedWindow("image", 1)
#File capture
vidFile = cv.CaptureFromFile(frames)
nFrames = int( cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FRAME_COUNT ) )
fps = cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FPS )
waitPerFrameInMillisec = int( 1/fps * 1000/1 )
#time adjustment, frame capture
for f in xrange( nFrames ):
frame = cv.QueryFrame( vidFile )
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
# copy the frame, so we can draw on it
if not frame:
break
else:
cv.Copy (frame, image)
#get pixel HSV colors
rows,cols=cv.GetSize(frame)
image=np.asarray(image)
image[np.where((image==[0,0,0]).all(axis=2))]=green
image=cv.fromarray(image)
#show the image
cv.ShowImage("image", image)
#quit command ESC
if cv.WaitKey(waitPerFrameInMillisec)==27:
break
else:
cv.WaitKey(waitPerFrameInMillisec) % 0x100
I solved it. In fact, the answer was not a modification of the line raising the exception, but rather on a modification of the arguments that were passed to this line. Indeed, it seems that the '[:,:]' arguments are required in opencv when converting cvMat to Numpy and back.
Here is the corrected code:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##IMPORTS
import cv2.cv as cv
import numpy as np
##VARIABLES
#colors
green=[0,255,0]
##MAIN
#start video stream analysis
frames = raw_input('Please enter video file:')
if not frames:
print "This program requires a file as input!"
sys.exit(1)
# first, create the necessary windows
cv.NamedWindow ('image', cv.CV_WINDOW_AUTOSIZE)
#File capture
vidFile = cv.CaptureFromFile(frames)
nFrames = int( cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FRAME_COUNT ) )
fps = cv.GetCaptureProperty( vidFile, cv.CV_CAP_PROP_FPS )
waitPerFrameInMillisec = int( 1/fps * 1000/1 )
for f in xrange( nFrames ):
#time adjustment, frame capture
sec = f/fps
frame = cv.QueryFrame( vidFile )
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
# copy the frame, so we can draw on it
if not frame:
break
else:
cv.Copy (frame, image)
#Replace pixel colors
rows,cols=cv.GetSize(frame)
image=np.asarray(image[:,:])
image[np.where((image==[0,0,0]).all(axis=2))]=green
image=cv.fromarray(image[:,:])
#show the image
cv.ShowImage("image", image)
#quit command ESC
if cv.WaitKey(waitPerFrameInMillisec)==27:
break
else:
cv.WaitKey(waitPerFrameInMillisec) % 0x100
cv.DestroyAllWindows()

Categories

Resources