Error capturing HCSR04 sensor with PyMata4 - python

I'm codiing an application that moves a ServoMotor according to the license plate identification, and it will only close the gate if the sensor distance is greater than 120! The code recognizes the board, turns the motor, but when it goes to capture the sensor to validate if it can turn the motor again, it is giving the following error:
Traceback (most recent call last):
File "C:/Users/Star/Desktop/Arquivos Programação Phyton/TCC/app.py", line 48, in <module>
reconhecePlaca()
File "C:/Users/Star/Desktop/Arquivos Programação Phyton/TCC/app.py", line 32, in reconhecePlaca
if(UNO.Sensor().capturaSensor() == True):
File "C:\Users\Star\Desktop\Arquivos Programação Phyton\TCC\arduino.py", line 43, in __init__
Config.__init__(self)
File "C:\Users\Star\Desktop\Arquivos Programação Phyton\TCC\arduino.py", line 10, in __init__
self.board = pymata4.Pymata4()
File "C:\Users\Star\anaconda3\envs\Py38\lib\site-packages\pymata4\pymata4.py", line 235, in __init__
self._find_arduino()
File "C:\Users\Star\anaconda3\envs\Py38\lib\site-packages\pymata4\pymata4.py", line 404, in _find_arduino
raise RuntimeError(f'arduino_instance_id does not match '
RuntimeError: arduino_instance_id does not match a value on the boards.
These are my main classes:
import time
import cv2
import processamento_img as pi
import reconhecimento_caracter as rc
import arduino as UNO
def reconhecePlaca():
# portao = UNO.Portao()
# sensor = UNO.Sensor()
reconhece_caracter = rc.Reconhece()
webcam = cv2.VideoCapture(0)
if webcam.isOpened():
validacao, frame = webcam.read()
while validacao:
validacao, frame = webcam.read()
conts = pi.encontrar_contornos(pi.preProcessamentoContornos(frame))
for c in conts:
peri = cv2.arcLength(c, True)
if peri > 120:
aprox = cv2.approxPolyDP(c, 0.03 * peri, True)
if len(aprox) == 4:
(x, y, alt, larg) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + alt, y + larg), (0, 255, 0), 2)
roi = frame[y + 3:(y + larg) - 3, x + 5:(x + alt) - 5]
pi.preProcessamentoPlaca(roi)
if (reconhece_caracter.validaPlaca()):
UNO.Portao().abrePortao()
#print('reconheceu')
time.sleep(4)
if(UNO.Sensor().capturaSensor() == True):
time.sleep(3)
UNO.Portao().fechaPortao()
else:
print("SINAL NÃO ABRE PORTÃO")
cv2.imshow("SmartEntry", frame)
key = cv2.waitKey(50)
if key == 27:
break
webcam.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
reconhecePlaca()
Class that controls Arduino:
import sys
from pymata4 import pymata4
import time
class Config():
def __init__(self):
self.pin = 7
self.triggerPin = 11
self.echoPin = 12
self.board = pymata4.Pymata4()
self.board.set_pin_mode_servo(self.pin)
self.i = 0
self.distance = 0
class Portao(Config):
def __init__(self):
Config.__init__(self)
def rotateServo(self, angle):
self.board.servo_write(self.pin, angle)
time.sleep(0.003)
def abrePortao(self):
try:
for i in range(self.i, 180):
self.rotateServo(i)
except KeyboardInterrupt:
self.board.shutdown()
sys.exit(0)
def fechaPortao(self):
try:
for i in range(180, 1, -1):
self.rotateServo(i)
except KeyboardInterrupt:
self.board.shutdown()
sys.exit()
class Sensor(Config):
def __init__(self):
Config.__init__(self)
def the_callback(self, data):
self.distance = data[2]
def capturaSensor(self):
self.board.set_pin_mode_sonar(self.triggerPin, self.echoPin, self.the_callback)
while(self.distance <= 120):
try:
time.sleep(2)
self.board.sonar_read(self.triggerPin)
except Exception:
self.board.shutdown()
if(self.distance > 120):
return True
I'm Brazilian, here are some terms in the code to make your life easier:
Portao = Gate
Sensor = Sensor
abrePortao = openGate
fechaPortao = closeGate
capturaSensor = captureSensor

Related

How to fix this red and blue stripe in python RBGMatrix on raspberry Pi

2 months ago i ordered 3 RBG Matrix panel + an ada fruit matrix bonnet for Raspberrypi to setup a matrix display showing my google Calendar. suddenly the display shows a big redstripe in the upper half of the display. i also cant control the brightness. if the brightness is to low nothing but the red stripe is visisible.i ordered a second copy of the bonnet hoping i incedently destroyed something while sodering or due to too high currents of my powersuply (actually 5V max 0.7A, used to be higher, 12v). at software level i tried i few different args with no difference.
here dome images
at the beginning of the project everything was fine, no stripe nothing, then suddenly
i use 1 matrix P4 matrix from adafruit and one without brand from ebay
thank you for helping me
regards Anika
currently use RGBMAtrix lib with
sudo python3 time.py --led-cols=64 --led-gpio-mapping=adafruit-hat-pwm --led-slowdown-gpio=5 --led-no-drop-privs --led-pwm-bits=1 --led-chain=2 --led-panel-type=FM6126A --led-pixel-mapper=Rotate:180
this i the python code for drive 2 matrixes in chain
#!/usr/bin/env python
# Display a runtext with double-buffering.
from samplebase import SampleBase
from rgbmatrix import graphics, RGBMatrix, RGBMatrixOptions
from datetime import datetime, timedelta
import requests, json, time, _thread
##################################################################################################
##################################################################################################
##################################################################################################
class Color():
GREEN = graphics.Color(0 , 255, 0)
BLACK = graphics.Color(0 , 0 , 0)
##################################################################################################
##################################################################################################
##################################################################################################
class Pref(SampleBase):
#TEXT_TIME_REVERSED = False
#FIRST_EVENT_DETAILS = False
INLINE_TIME = True
INLINE_TIME_COL = 6.5
BEGIN_SECOND_DETAIL = 60
MINUTE_COUNT_MAX = 60
MINUTE_BLINK_MAX = 15
NIGHT_OFF = 22
DAY_ON = 4
##################################################################################################
##################################################################################################
##################################################################################################
class MatrixCal(SampleBase):
def __init__(self, *args, **kwargs):
super(MatrixCal, self).__init__(*args, **kwargs)
#self.parser.add_argument("-t", "--text", help="The text to scroll on the RGB LED panel", default="Hello world!")
def run(self):
#make canvas and grafics
offscreen_canvas = self.matrix.CreateFrameCanvas()
secondary_canvas = self.matrix.CreateFrameCanvas()
#font
font_height = 8
font_width = 5
font = graphics.Font()
font.LoadFont("../../../fonts/5x8_custom.bdf")
global data, dt0
data = {"events":[], "annc":[], "todos":[]}
data_ttl = 1 #minute
data_error_retry = 10#mintes
#some vars
format_date_time_second = '%Y-%m-%dT%H:%M:%S'
format_time = "%H:%M"
pos = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
skip_next_event = False
screen_width = offscreen_canvas.width
screen_height = offscreen_canvas.height
screen_main = screen_width
if Pref.INLINE_TIME: screen_main = screen_width - font_width*Pref.INLINE_TIME_COL
tlen = 0
f_dub_ev = False
f_gehen_ev = False
f_no_blink = False
# DEF ###################################################################################
def drawText(_x, _y, _text, _color=Color.GREEN, _font=font):
return graphics.DrawText(offscreen_canvas, _font, _x, _y,_color, _text)
def drawLine(_x, _y, _w, _h, _color):
graphics.DrawLine(offscreen_canvas, _x, _y, _w, _h, _color)
def clearArea(x, y, w, h, _color=Color.BLACK):
for j in range(0, h):
graphics.DrawLine(offscreen_canvas, x, y+j, x+w-1, y+j, _color)
def showMinuteCount(time_diff, ev):
#show minutes count
if dt_start>dt0 and (time_diff<=Pref.MINUTE_COUNT_MAX or ('description' in ev and "display_type_duration" in ev['description'])):
if sec%2==0 and time_diff<=Pref.MINUTE_BLINK_MAX and not f_no_blink :
return ""
else:
if time_diff==0:return "jetzt"
else: return "ʼn{: >2d}mʼn".format(time_diff)
elif dt_start<dt0 or ('description' in ev and "display_type_duration" in ev['description']):
time_diff = int((dt_end-dt0).seconds/60)
if time_diff<=Pref.MINUTE_COUNT_MAX:
return str(time_diff) + "mʼn"
else:
if "schlafen" in ev['summary']:
return "noch " + str(dt_end-dt0)[0:4] +"h"
return "-"+dt_end.strftime("%H:%M")
else:
return dt_start.strftime("%H:%M")
def getData(a):
#api access
deploment_id = "THATS_CONFIDENTIAL"
access_token = "THAT_TOO"
url = "https://script.google.com/macros/s/"+deploment_id+"/exec?token="+access_token
next_data_load = datetime.now();
global data
while True:
#load data from api
dt0 = datetime.now()
if(dt0 > next_data_load):
print(dt0.strftime("%H:%M:%S") + " aquiring new data. next in " + str(data_ttl)+" Minute")
try:
r = requests.get(url)
r.raise_for_status()
data = json.loads(r.text)['data']
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
data['annc'].append({"type":"warning", "text":"HTTP " + str(errh.response.status_code) + " - Es gab ein Problem beim empfangen der Daten"})
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
data['annc'].append({"type":"warning", "text":"Es gab ein Problem bei der Verbindung"})
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
data['annc'].append({"type":"warning", "text":"Zeitüberschreitung beim laden der Daten"})
except requests.exceptions.RequestException as err:
print ("OOps: Something Else",err)
data['annc'].append({"type":"warning", "text":"Es gab ein Problem beim laden der Daten"})
except Exception as err:
print ("OOps: Something unexpected happend",err)
finally:
next_data_load = datetime.now() + timedelta(minutes=data_ttl);
#end if
time.sleep(1)
#end while
#end def getData
#def getWeather():
#Nachmittags wird es bewölkt. 🌡 min 13°C, max 23°C, ⌀ 20°C
#########################################################################################
#start data thread
_thread.start_new_thread(getData,(99,))
#data thread
while True:
offscreen_canvas.Clear()
if len(data['todos'])>0:screen_main=64
else: screen_main = screen_width
#determine whether main screen is large enought to display events with inline time
Pref.INLINE_TIME=screen_main>64;
#current date and time
dt0 = datetime.now()
sec = int(datetime.now().strftime("%S"))
hour = int(datetime.now().strftime("%H"))
ms = int(datetime.now().strftime("%f")[:-3])
#nacht abschaltung von helligkeit
#if False and Pref.NIGHT_OFF <= hour or hour < Pref.DAY_ON: self.matrix.brightness = 10
#else:
#self.matrix.brightness = 80
#reset screen vars
screen_y = font_height-1
detail_line = not Pref.INLINE_TIME
#print max 4 independet lines
index = 0
line = 0
#when no data available dim screen and only print time
if len(data['events'])==0:
#self.matrix.brightness = 10
graphics.DrawText(offscreen_canvas, font, screen_width-( 5 *font_width), font_height-1 ,Color.GREEN, datetime.now().strftime("%H:%M"))
else:
for todo in data['todos']:
t_title = todo['title']
if index==0:
clearArea(screen_main, 0, screen_width-screen_main, 8, Color.GREEN)
graphics.DrawText(offscreen_canvas, font, screen_width-( 12 *font_width), font_height-1 ,Color.BLACK, datetime.now().strftime("%d.%m. %H:%M"))
screen_y += font_height
#runtext optimization
if len(t_title)*font_width>screen_main-16: t_title = t_title+" "+t_title
else: pos[index+4] = screen_main + font_width+2
#display todo title
tlen = drawText(pos[index+4], screen_y, t_title)
clearArea(0, 8, screen_main+8, 32)
#move line if its longer then screen
if tlen > screen_main-16:
pos[index+4] -= 1
if (tlen/2-(font_width))*-1+screen_main+8> pos[index+4]:
pos[index+4] = screen_main + font_width+12
#graphics.DrawText(secondary_canvas, font, pos[index+4], screen_y ,Color.GREEN, todo['title'])
#graphics.DrawText(offscreen_canvas, font, screen_main+2, 15 ,Color.GREEN, "-")
graphics.DrawText(offscreen_canvas, font, screen_main+2, 15 ,Color.GREEN, "-")
if len(data['todos'])>1:graphics.DrawText(offscreen_canvas, font, screen_main+2, 23 ,Color.GREEN, "-")
if len(data['todos'])>2:graphics.DrawText(offscreen_canvas, font, screen_main+2, 31 ,Color.GREEN, "-")
drawLine(screen_main, 0, screen_main, screen_height, Color.GREEN)
screen_y += font_height
t_title = ""
index +=1
if screen_y>32:break
#end for todo
if len(data['todos'])>0:
clearArea(0, screen_main, screen_width-screen_main, screen_height)
#reset screen_y
screen_y = font_height-1
index =0
for ev in data['events']:
#events params
dt_start = datetime.strptime(ev['begin'][0:18], format_date_time_second)
dt_end = datetime.strptime(ev['end'][0:18], format_date_time_second)
time_diff = int((dt_start-dt0).seconds/60)
ev_summary = ev['summary']
#skip ended events or when event is from main cal and dark blue
if dt_end<=dt0 or time_diff==0 or ev_summary.startswith('#') or ('color' in ev and ev['color'] == 9): continue
#parallel events
b0 = data['events'][index]['begin']==data['events'][index+1]['begin']
b1 = (datetime.strptime(data['events'][index]['begin'][0:18], format_date_time_second)<dt0 and datetime.strptime(data['events'][index+1]['begin'][0:18], format_date_time_second)<dt0)
if (b0 or b1) and data['events'][index]['end']==data['events'][index+1]['end']:
f_dub_ev = True
index +=1 #skip ev
continue
#event details
if f_dub_ev: ev_summary = data['events'][index-1]['summary'] + " & " + ev_summary;
#skip gehen event and attach it to next event
if ev_summary.replace(">", "")=="gehen":
f_gehen_ev = True
data['events'][index+1]['travel_start'] = ev['begin']
continue
if ev_summary.startswith("-"):f_no_blink = True
#replace control chars
if ev_summary.startswith(">"):
#minute_blink_max = 15
ev_summary = ev_summary[1:len(ev_summary)]
ev_summary = ev_summary.replace(">>", "")
ev_summary = ev_summary.replace("schlafen>", "schlafen")
if f_no_blink:ev_summary = ev_summary[1:len(ev_summary)]
#runtext optimization
if len(ev_summary)*font_width>screen_main and (line>1 and f_dub_ev): ev_summary = ev_summary+" "+ev_summary
else: pos[line] = 0
#display main
tlen = drawText(pos[line], screen_y, ev_summary)
#move line if its longer then screen
if tlen > screen_main and (detail_line or Pref.INLINE_TIME):
pos[line] -= 1
if (tlen/2-(font_width))*-1>pos[line]:
pos[line] = font_width+4
#calculate time difference
# show second detail line when time time has come
if time_diff<=Pref.BEGIN_SECOND_DETAIL or dt_start<dt0 and dt_end>dt0: detail_line=not Pref.INLINE_TIME
if Pref.INLINE_TIME:
x = screen_width-( Pref.INLINE_TIME_COL *font_width)
clearArea(x, screen_y-7, Pref.INLINE_TIME_COL*font_width, font_height)
text = showMinuteCount(time_diff, ev)
x = screen_width-( len(text) *font_width)
drawText (x, screen_y, text)
if detail_line or Pref.INLINE_TIME:
if detail_line:
detail_line = False
screen_y += font_height
text = showMinuteCount(time_diff, ev)
if 'travel_start' in ev:
if Pref.INLINE_TIME: screen_y += font_height
blink_go = 15
if data['events'][index-1]['summary']=='>>>gehen': blink_go = 60
dt_travel = datetime.strptime(ev['travel_start'][0:18], format_date_time_second)
travel_diff = int((dt_travel-dt0).seconds/60)
hz=sec%20<=9
if travel_diff<=15: hz=sec%2==0;
#elif travel_diff<15: hz=sec%4<=1;
if Pref.INLINE_TIME:text = ""
else: text = " >"+text
if travel_diff <= Pref.MINUTE_COUNT_MAX:
if hz and travel_diff<=blink_go :
#text = text
if not Pref.INLINE_TIME:text = "gehen" + text
#wechsel blinken gehen mit zeit/"jetzt"
elif not Pref.INLINE_TIME:
if travel_diff==0: text="jetzt" + text
else: text= str(travel_diff) + "mʼn" + text
#wechsel blinken gehen in zeit/jetzt gehen mit leerzeile
elif Pref.INLINE_TIME:
if travel_diff<=0 : text="jetzt gehen"
else: text= "gehen ʼn{: >2d}mʼn".format(travel_diff)
elif not Pref.INLINE_TIME:
text = dt_travel.strftime("%H:%M") + text
else:
text = "gehen "+dt_travel.strftime("%H:%M")
#draw the text to the line
tl = int(len(text))
drawText( screen_main-( tl *font_width), screen_y, text)
#end if detail_line or Pref.INLINE_TIME:
f_dub_ev = False
f_gehen_ev = False
f_no_blink = False
#incrementation & loop break conditions
index +=1
line +=1
screen_y += font_height
if screen_y>screen_height:break
#end for
#end else of events len == 0
#annc = [{"type":"warning", "text":"Es dd"}]
#runtext announcement
if len(data['annc'])>0 and False:
#clear last line
clearArea(0, screen_height-font_height, screen_width, 8)
#get text, double it for smooth runtext animation
annc_text = data['annc'][0]['text']
annc_text = annc_text+" "+annc_text
alen = len(annc_text)*font_width
pos[-1] -= 1
if (alen/2-(2*font_width))*-1>pos[-1]:
pos[-1] = 2*font_width+4
graphics.DrawText(offscreen_canvas, font, pos[-1], screen_height-1 ,Color.GREEN, annc_text)
clearArea(0, screen_height-font_height, 8, 8)
#graphics.DrawText(offscreen_canvas, font, 0, screen_height-1 ,Color.GREEN, "ⓘ")
graphics.DrawText(offscreen_canvas, font, 0, screen_height-1 ,Color.GREEN, "⚠")
#offscreen_canvas = self.matrix.SwapOnVSync(secondary_canvas)
offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)
time.sleep(0.07)
#end while
#end def run(self)
# Main function
if __name__ == "__main__":
mcal = MatrixCal()
if (not mcal.process()):
mcal.print_help()

click automatically with object detection by pixel color

my project uses object detection by pixel color to identify some things, and I would like the mouse to click automatically with left mouse button when the object that was identified passes over my mouse cursor, how do I do that ? preferably using mouse_event
import time
import win32api
from win32gui import GetDC
from PIL import ImageGrab
import numpy as np
import overlay
def filter_detection_result(matrix, result):
mask = np.isin(matrix, result)
detection_matrix = np.zeros(matrix.shape, dtype=int)
np.place(detection_matrix, mask, result)
return detection_matrix
def rgb_2_int(rgb):
rgb_int = rgb[0]
rgb_int = (rgb_int << 8) + rgb[1]
rgb_int = (rgb_int << 8) + rgb[2]
return rgb_int
def img_2_matrix(img):
rgb_matrix = np.asarray(img).reshape((radius * 2) ** 2, 3)
int_matrix = np.array(list(map(rgb_2_int, rgb_matrix)))
return int_matrix.reshape((radius * 2), (radius * 2))
def detection(field_of_view):
matrix = img_2_matrix(ImageGrab.grab(field_of_view))
result = np.asarray(np.intersect1d(matrix, config))
if result.size >= 120:
detected_matrix = filter_detection_result(matrix, result)
i, j = np.where(detected_matrix != 0)
print(i)
_overlay.create_box(fov, win32api.RGB(255, 255, 255))
_overlay = overlay.Overlay(GetDC(0))
radius = 20
try:
config = np.loadtxt('config.txt', dtype=int, delimiter='\n')
print('[+] loaded')
except Exception as err:
print(err)
while True:
x, y = win32api.GetCursorPos()
fov = (x - radius, y - radius, x + radius, y + radius)
detection(fov)
time.sleep(.15)

i need help TypeError: Fishing_Location() missing 1 required positional argument: 'self' pls

#imports
from pynput.mouse import Button, Controller
import cv2 as cv
import time
import mss
import pyaudio
import math
import struct
import numpy as np
import time
import pyautogui
from sshkeyboard import listen_keyboard, stop_listening
mouse = Controller()
#variables
Threshold = 30
SHORT_NORMALIZE = (1.0/32768.0)
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
swidth = 2
bait=0
bait_total=0
FishingSpots = []
fishing_location1=[]
fishing_location2=[]
fishing_location3=[]
def press(key):
if key == "1":
FishingSpots.append(mouse.position)
time.sleep(1)
print(FishingSpots)
if len(FishingSpots)>=3:
fishing_location1, fishing_location2,fishing_location3 = [FishingSpots[i] for i in (0, 1, 2)]
print('location taken')
stop_listening()
listen_keyboard(on_press=press)
#mouse
def mouse_down():
mouse.press(Button.left)
def mouse_up():
mouse.release(Button.left)
class Fishing:
#staticmethod
def Fishing_Location(self):
while 1:
pos=0
if pos==0:
mouse.position = (fishing_location1)
mouse_down()
time.sleep(1)
mouse_up()
pos=pos+1
break
if pos==1:
mouse.position = (fishing_location2)
mouse_down()
time.sleep(1)
mouse_up()
pos=pos+1
break
if pos==3:
mouse.position = (fishing_location3)
mouse_down()
time.sleep(1)
mouse_up()
break
def rms(frame):
count = len(frame) / swidth
format = "%dh" % (count)
shorts = struct.unpack(format, frame)
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n * n
rms = math.pow(sum_squares / count, 0.5)
return rms * 1000
def __init__(self):
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=chunk)
def listen(self):
while True:
input = self.stream.read(chunk)
rms_val = self.rms(input)
print(rms_val)
if rms_val > Threshold:
mouse_down()
time.sleep(0.1)
mouse_up()
break
def minigame():
mouse_down()
while 1:
#minigame
needle_img = cv.imread('bob.png', cv.IMREAD_GRAYSCALE)
method = cv.TM_CCOEFF_NORMED
#screenshot full screen
with mss.mss() as sct:
region = {'top': 380, 'left': 420, 'width': 172, 'height': 25}
fullscreen = sct.grab(region)
fullscreen=np.array(fullscreen)
fullscreen = cv.cvtColor(fullscreen, cv.COLOR_BGR2GRAY)
resultfull = cv.matchTemplate(fullscreen, needle_img, method)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(resultfull)
#minigame function
threshold = 0.7
if max_loc >= (137,6) and max_val>=0.7:
mouse_up()
elif max_val<= threshold:
time.sleep(2)
break
else:
mouse_down()
fish=Fishing()
max_time = 300
start_time = time.time()
while (time.time() - start_time) < max_time:
print('Fishing Started 1')
fish.Fishing_Location()
print('fish threw 2')
time.sleep(0.5)
print('starting audio detection')
fish.listen()
('fish located')
fish.minigame()
bait=bait+1
bait_total=bait_total+1
if bait >= 10:
pyautogui.keyDown('1')
time.sleep(1)
pyautogui.keyUp('1')
print('bait used')
bait=bait-10
if bait_total>=100:
pyautogui.keyDown('i')
time.sleep(0.5)
pyautogui.rightClick()# need to add position
time.sleep(0.5)
pyautogui.keyUp('i')
bait_total=bait_total-100
else:
time.sleep(5)
print('food used')
pyautogui.keyDown('2')
time.sleep(1)
pyautogui.keyUp('2')
ok so i need spme help i need to paste here to type cus i cant paste so help me i am getting this error TypeError: Fishing_Location() missing 1 required positional argument: 'self' so pls help me i need more words lol this is sad i dont know i will just write yoda baby yoda dfasdfadsfasdfasdfadsfadasdasdada
In the provided code Fishing_Location is a static method. Static methods in Python don't use self as the first argument. You need to either remove the #staticmethod decorator or remove the self argument from the method, i.e. def Fishing_Location().
When you call fish.Fishing_Location() on while loop you're not providing any value. However, Fishing_Location is an static method with a self var declared and not used. Thus, when you call Fishing_Location method it raises a TypeError since you're not providing any value.
So basically all you need to do is remove self from Fishing_Location method.

Convert Output Thermal Camera to RGB in OpenCV

I'm working on Lepton 2.5. thermal camera. When I run the code below, the output is on greyscale (attached image 1)..
Maybe somebody can help me to adjust this code so the output will be a RGB color (attached image 2). I really appreciate the assistance given. Sorry if I am not very competent in python or opencv, I try my best.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from uvctypes import *
import time
import cv2
import numpy as np
try:
from queue import Queue
except ImportError:
from Queue import Queue
import platform
BUF_SIZE = 2
q = Queue(BUF_SIZE)
def py_frame_callback(frame, userptr):
array_pointer = cast(frame.contents.data, POINTER(c_uint16 * (frame.contents.width * frame.contents.height)))
data = np.frombuffer(
array_pointer.contents, dtype=np.dtype(np.uint16)
).reshape(
frame.contents.height, frame.contents.width
) # no copy
# data = np.fromiter(
# frame.contents.data, dtype=np.dtype(np.uint8), count=frame.contents.data_bytes
# ).reshape(
# frame.contents.height, frame.contents.width, 2
# ) # copy
if frame.contents.data_bytes != (2 * frame.contents.width * frame.contents.height):
return
if not q.full():
q.put(data)
PTR_PY_FRAME_CALLBACK = CFUNCTYPE(None, POINTER(uvc_frame), c_void_p)(py_frame_callback)
def ktof(val):
return (1.8 * ktoc(val) + 32.0)
def ktoc(val):
return (val - 27315) / 100.0
def raw_to_8bit(data):
cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(data, 8, data)
return cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
def display_temperature(img, val_k, loc, color):
val = ktoc(val_k) #select between ktoc or ktof
cv2.putText(img,"{0:.1f} Celcius".format(val), loc, cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
x, y = loc
cv2.line(img, (x - 2, y), (x + 2, y), color, 1)
cv2.line(img, (x, y - 2), (x, y + 2), color, 1)
def main():
ctx = POINTER(uvc_context)()
dev = POINTER(uvc_device)()
devh = POINTER(uvc_device_handle)()
ctrl = uvc_stream_ctrl()
res = libuvc.uvc_init(byref(ctx), 0)
if res < 0:
print("uvc_init error")
exit(1)
try:
res = libuvc.uvc_find_device(ctx, byref(dev), PT_USB_VID, PT_USB_PID, 0)
if res < 0:
print("uvc_find_device error")
exit(1)
try:
res = libuvc.uvc_open(dev, byref(devh))
if res < 0:
print("uvc_open error")
exit(1)
print("device opened!")
# device format
print_device_info(devh)
print_device_formats(devh)
frame_formats = uvc_get_frame_formats_by_guid(devh, VS_FMT_GUID_Y16)
if len(frame_formats) == 0:
print("device does not support Y16")
exit(1)
libuvc.uvc_get_stream_ctrl_format_size(devh, byref(ctrl), UVC_FRAME_FORMAT_Y16,
frame_formats[0].wWidth, frame_formats[0].wHeight, int(1e7 / frame_formats[0].dwDefaultFrameInterval)
)
res = libuvc.uvc_start_streaming(devh, byref(ctrl), PTR_PY_FRAME_CALLBACK, None, 0)
if res < 0:
print("uvc_start_streaming failed: {0}".format(res))
exit(1)
try:
while True:
data = q.get(True, 500)
if data is None:
break
data = cv2.resize(data[:,:], (640, 480))
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(data)
img = raw_to_8bit(data)
display_temperature(img, minVal, minLoc, (255, 0, 0))
display_temperature(img, maxVal, maxLoc, (0, 0, 255))
cv2.imshow('Lepton Radiometry', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
finally:
libuvc.uvc_stop_streaming(devh)
print("done")
finally:
libuvc.uvc_unref_device(dev)
finally:
libuvc.uvc_exit(ctx)
if __name__ == '__main__':
main()
This is the sample output of greyscale image
This is the sample output of RGB image (with another code)
Thanks to Peter Gibson
You can find the answer on
https://learnopencv.com/applycolormap-for-pseudocoloring-in-opencv-c-python/
you just have to take the latest image variable to show and redirect it into
im_color = cv2.applyColorMap(img, cv2.COLORMAP_JET)
and send it to cv2.imshow

Error while testing the handwritten recognition project

This is the code for testing the handwritten character recognition in OpenCV python using KNN.
import cv2
import numpy as np
import operator
import os
MIN_CONTOUR_AREA = 100
RESIZED_IMAGE_WIDTH = 20
RESIZED_IMAGE_HEIGHT = 30
class ContourWithData():
npaContour = None
boundingRect = None
intRectX = 0
intRectY = 0
intRectWidth = 0
intRectHeight = 0
fltArea = 0.0
def calculateRectTopLeftPointAndWidthAndHeight(self):
[intX, intY, intWidth, intHeight] = self.boundingRect
self.intRectX = intX
self.intRectY = intY
self.intRectWidth = intWidth
self.intRectHeight = intHeight
def checkIfContourIsValid(self):
if self.fltArea < MIN_CONTOUR_AREA: return False
return True
def main():
allContoursWithData = []
validContoursWithData = []
try:
npaClassifications = np.loadtxt("classifications1.txt", np.float32)
except:
print "error, unable to open classifications.txt, exiting program\n"
os.system("pause")
return
# end try
try:
npaFlattenedImages = np.loadtxt("flattened_images1.txt", np.float32)
except:
print "error, unable to open flattened_images.txt, exiting program\n"
os.system("pause")
return
# end try
npaClassifications = npaClassifications.reshape((npaClassifications.size, 1))
kNearest = cv2.ml.KNearest_create()
kNearest.train(npaFlattenedImages, cv2.ml.ROW_SAMPLE, npaClassifications)
imgTestingNumbers = cv2.imread("count.png")
if imgTestingNumbers is None:
print "error: image not read from file \n\n"
os.system("pause")
return
# end if
imgGray = cv2.cvtColor(imgTestingNumbers, cv2.COLOR_BGR2GRAY)
imgBlurred = cv2.GaussianBlur(imgGray, (5,5), 0)
imgThresh = cv2.adaptiveThreshold(imgBlurred,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV,
11,
2)
imgThreshCopy = imgThresh.copy()
imgContours, npaContours, npaHierarchy = cv2.findContours(imgThreshCopy,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for npaContour in npaContours:
contourWithData = ContourWithData()
contourWithData.npaContour = npaContour
contourWithData.boundingRect = cv2.boundingRect(contourWithData.npaContour)
contourWithData.calculateRectTopLeftPointAndWidthAndHeight()
contourWithData.fltArea = cv2.contourArea(contourWithData.npaContour)
allContoursWithData.append(contourWithData)
# end for
for contourWithData in allContoursWithData:
if contourWithData.checkIfContourIsValid():
validContoursWithData.append(contourWithData)
# end if
# end for
validContoursWithData.sort(key = operator.attrgetter("intRectX"))
strFinalString = ""
for contourWithData in validContoursWithData:
cv2.rectangle(imgTestingNumbers,
(contourWithData.intRectX, contourWithData.intRectY),
(contourWithData.intRectX + contourWithData.intRectWidth, contourWithData.intRectY + contourWithData.intRectHeight),
(0, 255, 0),
2)
imgROI = imgThresh[contourWithData.intRectY : contourWithData.intRectY + contourWithData.intRectHeight,
contourWithData.intRectX : contourWithData.intRectX + contourWithData.intRectWidth]
imgROIResized = cv2.resize(imgROI, (RESIZED_IMAGE_WIDTH, RESIZED_IMAGE_HEIGHT))
npaROIResized = imgROIResized.reshape((1, RESIZED_IMAGE_WIDTH * RESIZED_IMAGE_HEIGHT))
npaROIResized = np.float32(npaROIResized)
retval, npaResults, neigh_resp, dists = kNearest.findNearest(npaROIResized, k = 1)
strCurrentChar = str(chr(int(npaResults[0][0])))
strFinalString = strFinalString + strCurrentChar
# end for
print "\n" + strFinalString + "\n"
cv2.imshow("imgTestingNumbers", imgTestingNumbers)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
if __name__ == "__main__":
main()
# end if
And, I'm getting the following error :
Traceback (most recent call last):
File "C:\Users\malmad\Desktop\cip\Testing1.py", line 141, in
main()
File "C:\Users\malmad\Desktop\cip\Testing1.py", line 60, in main
kNearest.train(npaFlattenedImages, cv2.ml.ROW_SAMPLE, npaClassifications)
error: C:\builds\master_PackSlaveAddon-win32-vc12-static\opencv\modules\ml\src\data.cpp:290: error: (-215) (layout == ROW_SAMPLE && responses.rows == nsamples) || (layout == COL_SAMPLE && responses.cols == nsamples) in function cv::ml::TrainDataImpl::setData
How should i proceed? Sorry for the bad indentation!! I'm new to stack overflow

Categories

Resources