NoneType error .convert appear - python

i would like to do some program by capture image from webcam, then cropped it. after crop, i do some image processing and from the process it will run my robots. Here the full program:
import cv2
from cv2 import *
import numpy as np
import pylab
import pymorph
import mahotas
from matplotlib import pyplot
from PIL import Image
# initialize the camera
cam = VideoCapture(0) # 0 -> index of camera
s, img = cam.read()
# frame captured without any errors
if s:
imwrite("img.jpg",img) #save image
#Crop Image
imageFile = "img.jpg"
im1 = Image.open(imageFile)
def imgCrop(im):
box = (0, 199, 640, 200)
region = im.crop(box)
region.save('crop.jpg')
cImg = imgCrop(im1)
#thresholding
def greyImg(im):
gray = im.convert('L')
bw = gray.point(lambda x: 0 if x<128 else 255, '1')
bw.save("bw.jpg")
tImg = greyImg(cImg )
#direction
def find_centroid(im, rez):
width, height = im.size
XX, YY, count = 0, 0, 0
for x in xrange(0, width, rez):
for y in xrange(0, height, rez):
if im.getpixel((x, y)) == 255:
XX += x
YY += y
count += 1
return XX/count, YY/count
print find_centroid(tImg, 1)
def robo_direct():
cen = find_centroid(im, 1)
diff = cen[0] - 320
if diff > 10:
print 'right'
if diff < -10:
print 'left'
else:
print 'straight'
print robo_direct()
The error was come out like this:
File "compile.py", line 32, in greyImg
gray = im.convert('L')
AttributeError: 'NoneType' object has no attribute 'convert'

That is because im is a None object.
Try again the code with:
print im is None
And you'll see. I don't know about threshold, but obviously you are creating the im object the wrong way.

Your function imgCrop(im1) has no return statement and as such returns None. And then your greyImg(im) function also has no return statement and also will return None.
To fix that add return statements to both functions that for the first return region and the second return bw.
Also your robo_direct() function should return and not print the direction so that the call to it in the statement print robo_direct() would print the direction.

Related

I tried to make a slideshow for images from an images folder with python using OpenCv but it doesn't work with me

import cv2
import numpy as np
from math import ceil
import os
dst = "C:\OpencvPython\frames\slide" # Images destination
images = os.listdir(dst) # Get their names in a list
length = len(images)
result = np.zeros((360,360,3), np.uint8) # Image window
of size (360, 360)
i = 1
a = 1.0 # alpha
b = 0.0 # beta
img = cv2.imread(dst + images[i])
img = cv2.resize(img, (360, 360))
# Slide Show Loop
while(True):
if(ceil(a)==0):
a = 1.0
b = 0.0
i = (i+1)%length # Getting new image from directory
img = cv2.imread(dst + images[i])
img = cv2.resize(img, (360, 360))
a -= 0.01
b += 0.01
# Image Transition from one to another
result = cv2.addWeighted(result, a, img, b, 0)
cv2.imshow("Slide Show", result)
key = cv2.waitKey(1) & 0xff
if key==ord('q'):
break
cv2.destroyAllWindows()
[ WARN:0#0.007] global D:\a\opencv-python\opencv-python\opencv\modules\imgcodecs\src\loadsave.cpp (239) cv::findDecoder imread_('C:/OpencvPython/frames/slidedownload (2).jpg'): can't open/read file: check file path/integrity
cv2.error: OpenCV(4.6.0) D:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\resize.cpp:4052: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
The issue is with the path, you can replace img = cv2.imread(dst + images[i]) with
img = cv2.imread(os.path.join(dst, images[i]))
Using + operator will directly add 2nd sting to the base path, for example if dst = "C:\OpencvPython\frames\slide" and images[i] = "img1.png" then dst + images[i] would become "C:\OpencvPython\frames\slideimg1.png" hence leading to file not found error while os.path.join will generate correct path.

Problems with red pixel and loop check

I am new here and also in python (^_^')
I have a question about my code.
This is an infinite loop, when my code finds a red pixel in a saved screenshot send me a message, it works if there is a red pixel, but if I try to test another red pixel or delete the last red pixel detected and then re-use it, my code stops working with this error:
Warning (from warnings module):
File "C:\Users\Desktop\DCN.py", line 126
comparison_dcn = check_dcn == control_dcn
DeprecationWarning: elementwise comparison failed; this will raise an error in the future.
Traceback (most recent call last):
File "C:\Users\Desktop\DCN.py", line 127, in <module>
equal_dcn= comparison_dcn.all()
AttributeError: 'bool' object has no attribute 'all'
My idea was to create a numpy array to save the coordinates (x,y) and check if already exist inside this array, it must not detect it to me two times...
I tried to figure out the problem, but it is too early for my python experience....
I hope my english is understandable XD
Can someone kindly help me with my code and explain my issue?
#libaries
import mss
import mss.tools
from PIL import image
import psutil
import time
import cv2
import numpy as np
#global variables
loop = 1
check_dcn = np.column_stack((0,0))
counter_dcn = 0
while loop == 1 :
#detect red pixel
def detect_color(rgb, filename):
img = Image.open(filename)
img = img.convert('RGBA')
data = img.getdata()
for item in data:
if item[0] == rgb[0] and item[1] == rgb[1] and item[2] == rgb[2]:
return True
return False
with mss.mss() as sct:
# The screen part to capture
monitor = {"top": 190, "left": 0, "width": 1920, "height": 840}
output = "example.png".format(**monitor)
# Grab the data
sct_img = sct.grab(monitor)
# Save to the picture file
mss.tools.to_png(sct_img.rgb, sct_img.size, output=output)
print (detect_color((255,102,102), 'example.png')) #dcn red pixel
#dcn alarm detected
if detect_color((255,102,102), 'example.png'):
pixel_img = cv2.imread('example.png')
pop = [102,102,255] #BGR order
X,Y = np.where(np.all(pixel_img == pop, axis = 2)) #coordinates
control_dcn = np.column_stack((X,Y)) #assign coordinates
print(control_dcn) #test
if counter_dcn == 0:
counter_dcn = 1
check_dcn = control_dcn
print("first round dcn") #test
print(check_dcn) #test
###looking for solution here to empty comparison_dcn
comparison_dcn = check_dcn == control_dcn
equal_dcn= comparison_dcn.all()
if equal_dcn:
print("red pixel alread reported,waiting 20 seconds") #test
time.sleep(20)
else:
check_dcn = np.column_stack(X,Y)
print("red pixel added,waiting 5 seconds") #test
print(check_dcn) #test
time.sleep(5)
else:
print("Nothing, waiting 10 seconds")
time.sleep(10)

How to find if a specific pixel of an image has a higher Red, Green, or Blue value in Python

I am trying to find out if a certain pixel of an image has more of a Red Green or Blue value. I tried this code below:
im = Image.open("image.png")
x = 32
y = 32
pixel = im.load()
rgb = pixel[x,y]
rgb = int(rgb)
if rgb[0] > rgb[1,2]:
print("Red")
elif rgb[1] > rgb[0,2]:
print("Green")
elif rgb[2] > rgb[0,1]:
print("Blue")
But its giving me this Error:
File "d:\path\app.py", line 11, in <module>
rgb = int(rgb)
TypeError: int() argument must be a string, a bytes-like object or a number, not 'tuple'
Please let me know what I'm doing wrong or if there's a better way of doing this!
Thanks
-Daniel
You can simply do:
red_image = Image.open("image.png")
red_image_rgb = red_image.convert("RGB")
rgb_pixel_value = red_image_rgb.getpixel((10,15))
if rgb_pixel_value[0]>rgb_pixel_value[1] and rgb_pixel_value[0]>rgb_pixel_value[2]:
print("Red")
elif rgb_pixel_value[0]<rgb_pixel_value[2] and <rgb_pixel_value[1]<rgb_pixel_value[2]:
print("Blue")
else:
print("Green")
Here is an interactive program:
from PIL import Image, ImageTk
import tkinter as tk
from tkinter import filedialog
root=tk.Tk()
def select_image():
s=filedialog.askopenfilename(filetypes=(("PNG",'*.png'),("JPEG",'*.jpg')))
if s!='':
global red_image
red_image = Image.open(s)
image1=ImageTk.PhotoImage(file=s)
lbl1.config(image=image1)
lbl1.image=image1
root.bind("<Motion>",check_pixel)
def check_pixel(event):
red_image_rgb = red_image.convert("RGB")
rgb_pixel_value = red_image_rgb.getpixel((event.x,event.y))
lbl2.config(text=f"Red: {rgb_pixel_value[0]} Green: {rgb_pixel_value[1]} Blue: {rgb_pixel_value[2]}")
if rgb_pixel_value[0]>rgb_pixel_value[1] and rgb_pixel_value[0]>rgb_pixel_value[2]:
lbl3.config(text="Red",fg="Red")
elif rgb_pixel_value[0]<rgb_pixel_value[2]and rgb_pixel_value[1]<rgb_pixel_value[2]:
lbl3.config(text="Blue",fg="Blue")
else:
lbl3.config(text="Green",fg="green")
button1=tk.Button(root,text='Select Image',command=select_image)
button1.pack()
lbl1=tk.Label(root)
lbl1.pack()
lbl2=tk.Label(root,text="Red: Green: Blue:")
lbl2.pack(side=tk.BOTTOM)
lbl3=tk.Label(root)
lbl3.pack()
root.mainloop()
What you are doing wrong here is that you are passing a tuple in the int() function which returns a TypeError.
A better way is to use Opencv for getting the values of R, G and B at your given (x, y)
import cv2
im = cv2.imread("image.png")
x, y = 32, 32
#get r, g and b values at (x, y)
r, g, b = im[x, y]
print(f"Pixel at ({x}, {y}) - Red: {r}, Green: {g}, Blue: {b}")
#then do your comparison
#following is an example
if (r>=g) and (r>=b):
print("Red")
elif (g>=r) and (g>=b):
print("Green")
else:
print("Blue")

how to load data from for in loop on python?

hello o try to combine detect.multiscale code wit calc.hist code. i try to run this program but i can't access 'w' in for in loop.??
import cv2
import numpy as np
from matplotlib import pyplot as plt
import time
import sys
import serial
#execfile("/home/arizal/Documents/Sorting Jeruk/motor1.py")
#ser = serial.Serial('/dev/ttyACM0', 9600)
#Cascade jeruk
jeruk_cascade = cv2.CascadeClassifier('cascade.xml')
camera = cv2.VideoCapture(1)
base1 = cv2.imread('base11.jpg')
base2 = cv2.imread('base22.jpg')
base3 = cv2.imread('base33.jpg')
#Set hist parameters
hist_height = 64
hist_width = 256
nbins = 32
bin_width = hist_width/nbins
hrange = [0,180]
srange = [0,256]
ranges = hrange+srange # ranges = [0,180,0,256]
#Create an empty image for the histogram
e = np.zeros((hist_height,hist_width))
#print ("h : ",h)
#print type(h)
#x=1
this is for detect.multiscale loop
while 1:
grabbed, img = camera.read()
cam = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if not grabbed:
"Camera could not be started."
break
# add this
# image, reject levels level weights.
jeruks = jeruk_cascade.detectMultiScale(cam, 1.03, 5)
this for cascade for in loop, for give rectangle mark on the object
# add this
for (x,y,w,h) in jeruks:
cv2.rectangle(img,(x,y),(x+w,y+h),(17,126,234),2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Jeruk',(x+w,y+h), font, 1, (17,126,234), 2, cv2.LINE_AA) #---write the text
roi_gray = cam[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
and calc the histogram when the object detected
if w > 250 :
print ('w', w)
histcam = cv2.calcHist([cam], [0], None, [nbins], [0,256])
cv2.normalize(histcam,histcam, hist_height, cv2.NORM_MINMAX)
hist=np.int32(np.around(histcam))
but i got this error :
Traceback (most recent call last):
File "/home/arizal/Documents/Sorting Jeruk/doalcoba.py", line 65, in <module>
if w > 250 :
NameError: name 'w' is not defined
anyone can help me ?
I think the problem that your code has is of indentation. In the code -
for (x,y,w,h) in jeruks:
....
And
if w > 250 :
....
Are on same level of indentation. (x,y,w,h) are only available for the for loop, not outside of it. Fix you indentation -
for (x,y,w,h) in jeruks:
....
if w > 250 :
print ('w', w)
Let me know if that works

OpenCV: ValueError: zero-size array to reduction operation minimum which has no identity while cropping an image

I have batchs of 2000 images that needs to be cropped. I am trying to do this using opencv.
All the images are of the same format.
Sample Image:
I am cropping it to:
It works fine for most of the images, but for some it doesn't.
I keep getting an error:
ValueError: zero-size array to reduction operation minimum which has no identity
Here's the link to the images for which I keep getting the above error.
Here's my code.
import numpy as np
import os, shutil
import sys
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2 as cv
from pprint import pprint
from matplotlib import pyplot as plt
def utilCropImage( img, dictPointValues = None ):
if dictPointValues != None:
x = dictPointValues[ 'x' ]
y = dictPointValues[ 'y' ]
w = dictPointValues[ 'w' ]
h = dictPointValues[ 'h' ]
crop_img = img[y:y+h, x+55:x+w ]
cv.waitKey(0)
return crop_img
def cropImage( imageToBeCropped ):
img = cv.imread( imageToBeCropped )
if img is None:
print( "Error reading the image" )
sys.exit(0)
gray = cv.cvtColor( img, cv.COLOR_BGR2GRAY )
__, contours, hierarchy = cv.findContours( gray, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE )
counter = 0
for contour in contours:
counter += 1
x, y, w, h = cv.boundingRect( contour )
roi = img[ y: y+h , x: x+w ]
return ( utilCropImage( img, { 'x': x, 'y': y, 'w': w, 'h': h } ) )
DIR_PATH = 'xxxxxxxxxxxxx/'
OUTPUT_PATH = 'yyyyyy/'
lst = os.listdir( DIR_PATH )
for i in lst:
image = DIR_PATH + i
cv.imwrite( OUTPUT_PATH + i , cropImage( image ))
cv.waitKey(0)
Can someone guide me, where exactly am I going wrong. I'm new to opencv.
Thanks.

Categories

Resources