I am new here and also in python (^_^')
I have a question about my code.
This is an infinite loop, when my code finds a red pixel in a saved screenshot send me a message, it works if there is a red pixel, but if I try to test another red pixel or delete the last red pixel detected and then re-use it, my code stops working with this error:
Warning (from warnings module):
File "C:\Users\Desktop\DCN.py", line 126
comparison_dcn = check_dcn == control_dcn
DeprecationWarning: elementwise comparison failed; this will raise an error in the future.
Traceback (most recent call last):
File "C:\Users\Desktop\DCN.py", line 127, in <module>
equal_dcn= comparison_dcn.all()
AttributeError: 'bool' object has no attribute 'all'
My idea was to create a numpy array to save the coordinates (x,y) and check if already exist inside this array, it must not detect it to me two times...
I tried to figure out the problem, but it is too early for my python experience....
I hope my english is understandable XD
Can someone kindly help me with my code and explain my issue?
#libaries
import mss
import mss.tools
from PIL import image
import psutil
import time
import cv2
import numpy as np
#global variables
loop = 1
check_dcn = np.column_stack((0,0))
counter_dcn = 0
while loop == 1 :
#detect red pixel
def detect_color(rgb, filename):
img = Image.open(filename)
img = img.convert('RGBA')
data = img.getdata()
for item in data:
if item[0] == rgb[0] and item[1] == rgb[1] and item[2] == rgb[2]:
return True
return False
with mss.mss() as sct:
# The screen part to capture
monitor = {"top": 190, "left": 0, "width": 1920, "height": 840}
output = "example.png".format(**monitor)
# Grab the data
sct_img = sct.grab(monitor)
# Save to the picture file
mss.tools.to_png(sct_img.rgb, sct_img.size, output=output)
print (detect_color((255,102,102), 'example.png')) #dcn red pixel
#dcn alarm detected
if detect_color((255,102,102), 'example.png'):
pixel_img = cv2.imread('example.png')
pop = [102,102,255] #BGR order
X,Y = np.where(np.all(pixel_img == pop, axis = 2)) #coordinates
control_dcn = np.column_stack((X,Y)) #assign coordinates
print(control_dcn) #test
if counter_dcn == 0:
counter_dcn = 1
check_dcn = control_dcn
print("first round dcn") #test
print(check_dcn) #test
###looking for solution here to empty comparison_dcn
comparison_dcn = check_dcn == control_dcn
equal_dcn= comparison_dcn.all()
if equal_dcn:
print("red pixel alread reported,waiting 20 seconds") #test
time.sleep(20)
else:
check_dcn = np.column_stack(X,Y)
print("red pixel added,waiting 5 seconds") #test
print(check_dcn) #test
time.sleep(5)
else:
print("Nothing, waiting 10 seconds")
time.sleep(10)
Related
I'm coding a Python program to automate image processing tasks (threshold images, detect borders, optical flow, etc.) using PySimpleGUI, openCV and Numpy. However, when I run all the code and press the button that takes me to the function to obtain the image with border detection through the graphical interface, only a black box appears, but implementing only the border detection function by itself seems to work without any problem. Here is the code of the border detection function:
def correl_Sobel(image_filename=None):
image = cv2.imread(image_filename)
img_gray = (image[:,:,0]/3+image[:,:,1]/3+image[:,:,2]/3)
img_th = img_gray
img_th[img_th < 120] = 0
img_th[img_th != 0] = 255
img_th = abs(img_th)
vertFilter =[[-1,-2,-1],[0,0,0],[1,2,1]]
horiFilter = [[-1,0,1],[-2,0,2],[-1,0,1]]
img_edges = np.zeros_like(image)
n, m = img_gray.shape
for x in range(3, n-2):
for y in range(3, m-2):
pixels = img_th[x-1:x+2, y-1:y+2]
verticalPixels = vertFilter*pixels
vertScore = (verticalPixels.sum()+4)/8
horizontalPixels = horiFilter*pixels
horiScore = (horizontalPixels.sum()+4)/8
#formula
edgeScore = (vertScore**2 + horiScore**2)**0.5
#three-layered images
img_edges[x,y] = [edgeScore]*3
#pixel values / maximum value
img_edges = img_edges/img_edges.max()
return img_edges
And here is the code for the GUI and button implementation:
prev_filename = correlated = thresholded = convolution = None
while True:
#read window
event, values = window.read()
if event == '-EXIT-' or event == sg.WIN_CLOSED:
break
if event == '-FOLDER-':
folder = values['-FOLDER-']
img_types = (".png", ".jpg", "jpeg", ".tiff", ".bmp")
try:
flist0 = os.listdir(folder)
except:
continue
fnames = [f for f in flist0 if os.path.isfile(
os.path.join(folder, f)) and f.lower().endswith(img_types)]
#update file directory
window['-FILE LIST-'].update(fnames)
#Select a file
elif event == '-FILE LIST-':
try:
filename = os.path.join(values['-FOLDER-'], values['-FILE LIST-'][0])
image = cv2.imread(filename)
window['-IN-'].update(data=cv2.imencode('.png', image)[1].tobytes())
window['-OUT-'].update(data='')
except:
continue
#edge detection button
elif event == '-CONV-':
try:
if values['-FILE LIST-']:
filename = os.path.join(values['-FOLDER-'], values['-FILE LIST-'][0])
correlated = correl_Sobel(filename)
# plt.imshow(correlated)
window['-IN-'].update(data=cv2.imencode('.png', image)[1].tobytes())
window['-OUT-'].update(data=cv2.imencode('.png',correlated)[1].tobytes())
else:
continue
except:
continue
Here's what happens when I press the edge detection button: screenshot
And here's what it should look like (I got this image by running only the function above): edgeDetectedImg
Thanks in advance
I'm Trying To Use An Automation Piano Game With Python, But When I Want To Try Implementing The Code, I Get An Error
As Shown In The Attached Picture
Please Help Me To Fix This Error, I Used ** Python 3.8.4 **
*<!-- The Code -->*
import mss as mss
import numpy as np
from cv2 import cv2
import time
import pyautogui
import keyboard
pyautogui.PAUSE = 0.005
def take_screenshot():
with mss.mss() as sct:
filename = sct.shot(output="fullscreen.png")
return filename
# take_screenshot()
def get_frame(region):
with mss.mss() as sct:
screen = np.array(sct.grab(region))
screen_grayscale = cv2.cvtColor(screen, cv2.COLOR_BGR2BGRA)
# print(screen_grayscale.shape)
# cv2.imwrite('region.png', screen_grayscale)
return screen_grayscale
def detect_tiles(frame):
for x in range(frame.shape[0]):
for y in range(frame.shape[1]):
if frame[x, y] == 1:
return x, y
return None
region = {"top": 560, "left": 350, "width": 300, "height": 2}
time.sleep(3)
while True:
if keyboard.is_pressed('q'):
break
start_time = time.time()
frame = get_frame(region)
coors = detect_tiles(frame)
if coors:
target_x = region['left'] + coors[1] + 1
target_y = region['top'] + coors[0] + 1
pyautogui.moveTo(x=target_x, y=target_y)
pyautogui.mouseDown()
print("%d FPS" % (1 / (time.time() - start_time)))
*<!-- end code -->*
my error image:
Your problem seems to stem from the fact that frame[x,y] is not a single value but a list or tuple of values. Hence, when you execute if frame[x,y] == 1 you get an error message like ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all().
Depending on what you want to accomplish you can use one of the following remedies:
Use the .all method which will result in True if and only if all elements of frame[x] and frame[y] equal 1. Using the following:
if frame[x,y].all() == 1:
Use the .any method which will result in True if and only if any frame[x] or frame[y] element equals 1. Using the following:
if frame[x,y].any() == 1
I have a red pixeled image and i wanted another image to be blitted at the red pixel, so I did this code:
import sys, pygame
pygame.init()
from pygame.locals import *
import time
#the function with get at
def colorscan(rect):
red = ( 255 , 0 , 0 , 255 )
for x in range(rect[0],rect[0]+rect[2]+1):
for y in range(rect[1],rect[1]+rect[3]+1):
print(x,y)
if tuple(screen.get_at((x,y)))==red:
print(x,y,"done")
return (x,y)
def load(path):
x = pygame.image.load(path)
return x
beam = load("menu/beam.png")
w_plat = load("menu/w_plat.png")
videoinfo = pygame.display.Info()
fullscreen = pygame.FULLSCREEN
screen = pygame.display.set_mode((videoinfo.current_w,videoinfo.current_h), fullscreen, 32)
time = pygame.time.Clock()
#main loop
while True:
beamrect = screen.blit(beam,(0,0))
xtl,ytl=beamrect.topleft
w_pos=colorscan( (xtl,ytl,35,+35) )
screen.blit(w_plat, w_pos)
my code is too large so i just wrote here what's important. Anyways, when i run it I get this error:
Traceback (most recent call last):
File "C:\Users\André Luiz\Desktop\Equilibrium\Equilibrium.py", line 171, in
screen.blit(w_plat, w_pos)
TypeError: invalid destination position for blit
after checking, printing w_pos returned "None", but I'm sure the red pixel has been ""scanned"".
I think what happens is that either your for loops are broken
for x in range(rect[0],rect[0]+rect[2]+1):
for y in range(rect[1],rect[1]+rect[3]+1):
by not executing, or the if statement is not executed because its conditions are not met:
if tuple(screen.get_at((x,y)))==red: (eg: not executed if != red)
because it's the only location you return a value. Otherwise when a function is not specified a return value, it returns None.
colorscan() has no default return value, this is why you get a None.
Beginner to Python, I've been trying to alter the pixel values of an image as follows. I've been getting an error that says 'TypeError: an integer is required'on the last but one line
How do I sort this out?
This is my code:
from PIL import Image
img = Image.open(r'...')
pix = img.load()
def quantf(pval):
if pval>=0 and pval<0.25:
pval=0
elif pval>=0.25 and pval<0.5:
pval=0.25
elif pval>=0.5 and pval<0.75:
pval=0.5
elif pval>=0.75 and pval<1:
pval=0.75
elif pval==1:
pval=1
for i in range (0,31):
for j in range (0,31):
pix[i,j]=quantf(pix[i,j])
img.show()
According to:
http://pillow.readthedocs.io/en/3.4.x/reference/PixelAccess.html#example
After performing an image load each pixel is a tuple when using a multi-band image, otherwise it's an individual value:
from PIL import Image
im = Image.open('hopper.jpg')
px = im.load()
print (px[4,4])
prints:
(23, 24, 68)
or
0.23
You'll need to adjust your quantf(pval) function in order to account for this as well as ensuring that quantf(pval) actually returns a value.
For example:
def quantf(pval):
if pval[0]>=0 and pval[0]<64:
pval=(0, pval[1], pval[2])
elif pval[0]>=64 and pval[0]<128:
pval=(64, pval[1], pval[2])
elif pval[0]>=128 and pval[0]<192:
pval=(128, pval[1], pval[2])
elif pval[0]>=192 and pval[0]<256:
pval=(192, pval[1], pval[2])
return pval
or
def quantf(pval):
if pval>=0 and pval<0.25:
pval=0
elif pval>=0.25 and pval<0.5:
pval=0.25
elif pval>=0.5 and pval<0.75:
pval=0.5
elif pval>=0.75 and pval<1:
pval=0.75
elif pval==1:
pval=1
return pval
i would like to do some program by capture image from webcam, then cropped it. after crop, i do some image processing and from the process it will run my robots. Here the full program:
import cv2
from cv2 import *
import numpy as np
import pylab
import pymorph
import mahotas
from matplotlib import pyplot
from PIL import Image
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
# frame captured without any errors
if s:
imwrite("img.jpg",img) #save image
#Crop Image
imageFile = "img.jpg"
im1 = Image.open(imageFile)
def imgCrop(im):
box = (0, 199, 640, 200)
region = im.crop(box)
region.save('crop.jpg')
cImg = imgCrop(im1)
#thresholding
def greyImg(im):
gray = im.convert('L')
bw = gray.point(lambda x: 0 if x<128 else 255, '1')
bw.save("bw.jpg")
tImg = greyImg(cImg )
#direction
def find_centroid(im, rez):
width, height = im.size
XX, YY, count = 0, 0, 0
for x in xrange(0, width, rez):
for y in xrange(0, height, rez):
if im.getpixel((x, y)) == 255:
XX += x
YY += y
count += 1
return XX/count, YY/count
print find_centroid(tImg, 1)
def robo_direct():
cen = find_centroid(im, 1)
diff = cen[0] - 320
if diff > 10:
print 'right'
if diff < -10:
print 'left'
else:
print 'straight'
print robo_direct()
The error was come out like this:
File "compile.py", line 32, in greyImg
gray = im.convert('L')
AttributeError: 'NoneType' object has no attribute 'convert'
That is because im is a None object.
Try again the code with:
print im is None
And you'll see. I don't know about threshold, but obviously you are creating the im object the wrong way.
Your function imgCrop(im1) has no return statement and as such returns None. And then your greyImg(im) function also has no return statement and also will return None.
To fix that add return statements to both functions that for the first return region and the second return bw.
Also your robo_direct() function should return and not print the direction so that the call to it in the statement print robo_direct() would print the direction.