Variable is not defined when the variable is defined Python - python

I have some code that gives me an img is not defined error. When I defined image earlier in the code.
What it does is it scans an image and generates a maze out of emojis according to the image. The issue is on the line:
for c in xrange if c < img.shape[1]] for r in yrange if r < img.shape[0]]
Code is below, also I should point out that this is all in a class:
import numpy as np
import cv2
import matplotlib.pyplot as plt
def downloadImage(URL):
"""Downloads the image on the URL, and convers to cv2 RGB format"""
from io import BytesIO
from PIL import Image as PIL_Image
import requests
response = requests.get(URL)
image = PIL_Image.open(BytesIO(response.content))
return cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
URL = "https://cdn.discordapp.com/attachments/670656848256434176/1001139167159406602/maze2.png"
img = downloadImage(URL)
# Convert ot 2 color
img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2GRAY)
ret3, th3 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)
# plt.imshow(th3, cmap='gray')
# Detect corners
CornerKernel = np.ones((3, 3), np.uint8)
corner = cv2.filter2D(th3//255, -1, CornerKernel)
# A corner add up to to 1 or 9
Corners = np.argwhere((corner == 4) | (corner == 8))
antiCorners = np.argwhere((corner == 1) | (corner == 5))
# for each point in Corners, find the closet point in antiCorners
Corner_antiCorner = []
for point in Corners:
distances = np.linalg.norm(antiCorners-point, axis=1)
closest = antiCorners[np.argmin(distances)]
Corner_antiCorner.append((point+closest)/2)
plt.plot([point[1], closest[1]], [point[0], closest[0]], color='r')
# For eachpoint in Corner_antiCorner, find the closet point in Corner_antiCorner
closestCorners = []
for point in Corner_antiCorner:
distances = np.linalg.norm(Corner_antiCorner-point, axis=1)
# closest is itself, so second closest is chosen
closest = Corner_antiCorner[distances.argsort()[1]]
closestCorners.append((point, closest))
plt.plot([point[1], closest[1]], [point[0], closest[0]], color='r')
# Sample of separations dx,dy
dx = np.array([abs(p[1]-q[1]) for p, q in closestCorners])
dy = np.array([abs(p[0]-q[0]) for p, q in closestCorners])
mediandx = np.median(dx[dx > 0])
mediandy = np.median(dy[dy > 0])
print("is this working")
stepY, stepX = int(mediandy), int(mediandx)
xrange = range(stepX//2, img.shape[0], stepX)
yrange = range(stepY//2, img.shape[1], stepY)
x, y = [], []
mazeElement = {0: ':black_large_square:', 1: ':white_large_square:'}
print('for loop 1')
for r in yrange:
for c in xrange:
x.append(c)
y.append(r)
plt.scatter(x, y)
print('for loop 2cd')
self.base_map = [[mazeElement[img[r, c]//255]
for c in xrange if c < img.shape[1]] for r in yrange if r < img.shape[0]]
Can someone tell me how to fix this?

Try removing self. before the assignment self.base_map = . self can only be used inside a class.

Related

How to place object randomly on Different angle and different position python

I want to make a function which generate a dataset, The object will be place on the black image at different position with different angle, different size and place randomly maximum up to 20 time in image. and Save the x,y and angle position in the text file.
The following image is for five objects at different position and angle.
import numpy as np
import cv2
patch=cv2.imread('imagersult.png')
img = np.zeros((2048, 2048, 1), dtype = "uint8")
Here is how you can use the scipy.ndimage module to rotate your patches:
import numpy as np
import cv2
from random import randrange
from scipy import ndimage
def patch_img(img, patch, amt=5):
h, w, _ = img.shape
for _ in range(amt):
p = ndimage.rotate(patch, randrange(360))
p_h, p_w, _ = p.shape
x = randrange(w - p_w)
y = randrange(h - p_h)
seg = img[y: y + p_h, x: x + p_w]
seg[:] = cv2.bitwise_xor(seg, p)
patch = cv2.imread('imagersult.png')
img = np.zeros((2048, 2048, 3), dtype="uint8")
patch_img(img, patch)
cv2.imshow("Image", img)
cv2.waitKey(0)
Outputs for multiple runs:
For grayscale and variation in size of the patches:
import numpy as np
import cv2
from random import randrange, uniform
from scipy import ndimage
def patch_img(img, patch, amt=5):
h, w = img.shape
min_scale = 0.5
max_scale = 2
for _ in range(amt):
patch_h, patch_w = patch.shape
scale = uniform(min_scale, max_scale)
p = ndimage.rotate(cv2.resize(patch, (int(patch_w * scale), int(patch_h * scale))), randrange(360))
p_h, p_w = p.shape
x = randrange(w - p_w)
y = randrange(h - p_h)
seg = img[y: y + p_h, x: x + p_w]
seg[:] = cv2.bitwise_xor(seg, p)
patch = cv2.imread('imagersult.png', 0)
img = np.zeros((2048, 2048), dtype="uint8")
patch_img(img, patch)
cv2.imshow("Image", img)
cv2.imwrite("result.png", img)
cv2.waitKey(0)
Sample output:

How to find angle made by the image with the front parallel view?

I am working on a problem in order to warp the perspective of images to get a front-parallel view. Non-front-parallel plane of the image is rotated by angle alpha with respect to x-axis and by angle beta with respect to y-axis, assuming optical axis of camera as z-axis. Can we estimate alpha and beta? If so how? What is the mathematical procedure one should follow in order to find these angles? Are there any library functions for this?
Thanks in advance for any help!
import cv2
import numpy as np
circles = np.zeros((4,2),np.int)
counter = 0
def mousePoints(event,x,y,flags,params):
global counter
if event == cv2.EVENT_LBUTTONDOWN:
circles[counter] = x,y
counter = counter + 1
print(circles)
img = cv2.imread("DSC_0273.JPG")
img = cv2.resize(img,(1500,1000))
q = 0
while True:
if counter == 4:
q = q+1
height1,width1 = 1080,1920
pts1 = np.float32([circles[0],circles[1],circles[2],circles[3]])
width = np.sqrt((circles[1][0] - circles[0][0])**2 + (circles[1][1] - circles[0][1])**2)
height = np.sqrt((circles[2][1] - circles[0][1])**2 + (circles[2][0] - circles[0][0])**2)
width = int(np.round(width))
height = int(np.round(height))
x1,y1 = circles[0]
pts2 = np.float32([[x1,y1],[(x1+width),y1],[(x1+width),(y1+height)],[x1,(y1+height)]])
matrix = cv2.getPerspectiveTransform(pts1,pts2)
if q == 1:
print(matrix.shape)
print(matrix)
imgOutput = cv2.warpPerspective(img,matrix,(width1,height1))
cv2.imshow("Output Image ", imgOutput)
for x in range (0,4):
cv2.circle(img,(circles[x][0],circles[x][1]),3,(0,255,0),cv2.FILLED)
cv2.imshow("Original Image ", img)
cv2.setMouseCallback("Original Image ", mousePoints)
cv2.waitKey(1)

How to save specific color image from the output of Kmeans using python

I am using the below code for color-based segmentation using K-means. In this code, each cluster is saving into one image. In my case requirement is a bit different. I want to save only blue color images. Could you please help me how can I save only blue color images?
import numpy as np
import cv2
import pdb
from matplotlib import pyplot as plt
img = cv2.imread('a.png')
Z = np.float32(img.reshape((-1,3)))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
_,labels,centers = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
labels = labels.reshape((img.shape[:-1]))
reduced = np.uint8(centers)[labels]
result = [np.hstack([img, reduced])]
for i, c in enumerate(centers):
mask = cv2.inRange(labels, i, i)
mask = np.dstack([mask]*3) # Make it 3 channel
ex_img = cv2.bitwise_and(img, mask)
ex_reduced = cv2.bitwise_and(reduced, mask)
result.append(np.hstack([ex_img, ex_reduced]))
pdb.set_trace()
cv2.imwrite('watermelon_out.jpg', np.vstack(result))
Original Image
After using this code I am getting result link below:
Expected Result:
This should only print the blue image. First find the center which is closest to blue color and then plot points only in cluster represented by that center
import numpy as np
import cv2
import pdb
from matplotlib import pyplot as plt
img = cv2.imread('a.png')
Z = np.float32(img.reshape((-1,3)))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
_,labels,centers = cv2.kmeans(Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
labels = labels.reshape((img.shape[:-1]))
reduced = np.uint8(centers)[labels]
blue_dis = 99999999
blue_center = -1
b = (255, 50 , 0)
for i, c in enumerate(centers):
dis = (c[0]-b[0])**2 + (c[1]-b[1])**2 + (c[1]-b[1])**2
if dis < blue_dis:
blue_center = i
blue_dis = dis
result = [np.hstack([img, reduced])]
for i, c in enumerate(centers):
if i!=blue_center:
continue
mask = cv2.inRange(labels, i, i)
mask = np.dstack([mask]*3) # Make it 3 channel
ex_img = cv2.bitwise_and(img, mask)
ex_reduced = cv2.bitwise_and(reduced, mask)
result.append(np.hstack([ex_img, ex_reduced]))
pdb.set_trace()
cv2.imwrite('watermelon_out.jpg', np.vstack(result))

Detection of leaf on unpredictable background

A project I have been working about for some time is a unsupervised leaf segmentation. The leaves are captured on a white or colored paper, and some of them has shadows.
I want to be able to threshold the leaf and also remove the shadow (while reserving the leaf's details); however I cannot use fixed threshold values due to diseases changing the color of the leaf.
Then, I begin to research and find out a proposal by Horprasert et. al. (1999) in "A Statistical Approach for Real-time Robust Background Subtraction and Shadow Detection", which compare areas in the image with colour of the now-known background using the chromacity distortion measure. This measure takes account of the fact that for desaturated colours, hue is not a relevant measure.
Based on it, I was able to achieve the following results:
However, the leaves that are captured on a white paper need to change the Mask V cv2.bitwise_not() giving me the below result:
I'm thinking that I'm forgetting some step to get a complete mask that will work for all or most of my leaves. Samples can be found here.
My Code:
import numpy as np
import cv2
import matplotlib.pyplot as plot
import scipy.ndimage as ndimage
def brightness_distortion(I, mu, sigma):
return np.sum(I*mu/sigma**2, axis=-1) / np.sum((mu/sigma)**2, axis=-1)
def chromacity_distortion(I, mu, sigma):
alpha = brightness_distortion(I, mu, sigma)[...,None]
return np.sqrt(np.sum(((I - alpha * mu)/sigma)**2, axis=-1))
def bwareafilt ( image ):
image = image.astype(np.uint8)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
return img2
img = cv2.imread("Amostra03.jpeg")
sat = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,1]
val = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,2]
sat = cv2.medianBlur(sat, 11)
val = cv2.medianBlur(val, 11)
thresh_S = cv2.adaptiveThreshold(sat , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
thresh_V = cv2.adaptiveThreshold(val , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
mean_S, stdev_S = cv2.meanStdDev(img, mask = 255 - thresh_S)
mean_S = mean_S.ravel().flatten()
stdev_S = stdev_S.ravel()
chrom_S = chromacity_distortion(img, mean_S, stdev_S)
chrom255_S = cv2.normalize(chrom_S, chrom_S, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)[:,:,None]
mean_V, stdev_V = cv2.meanStdDev(img, mask = 255 - thresh_V)
mean_V = mean_V.ravel().flatten()
stdev_V = stdev_V.ravel()
chrom_V = chromacity_distortion(img, mean_V, stdev_V)
chrom255_V = cv2.normalize(chrom_V, chrom_V, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX).astype(np.uint8)[:,:,None]
thresh2_S = cv2.adaptiveThreshold(chrom255_S , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
thresh2_V = cv2.adaptiveThreshold(chrom255_V , 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 401, 10);
images = [img, thresh_S, thresh_V, cv2.bitwise_and(thresh2_S, cv2.bitwise_not(thresh2_V))]
titles = ['Original Image', 'Mask S', 'Mask V', 'S + V']
for i in range(4):
plot.subplot(2,2,i+1),
if i == 0 :
plot.imshow(images[i])
else :
plot.imshow(images[i], cmap='gray')
plot.title(titles[i])
plot.xticks([]),plot.yticks([])
plot.show()
Any idea to solve this issue?
Try this on...I'm using "grabCut" from the openCV lib. It's not perfect, but it might be a good start.
import cv2
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
#%matplotlib inline #uncomment if in notebook
def mask_leaf(im_name, external_mask=None):
im = cv2.imread(im_name)
im = cv2.blur(im, (5,5))
height, width = im.shape[:2]
mask = np.ones(im.shape[:2], dtype=np.uint8) * 2 #start all possible background
'''
#from docs:
0 GC_BGD defines an obvious background pixels.
1 GC_FGD defines an obvious foreground (object) pixel.
2 GC_PR_BGD defines a possible background pixel.
3 GC_PR_FGD defines a possible foreground pixel.
'''
#2 circles are "drawn" on mask. a smaller centered one I assume all pixels are definite foreground. a bigger circle, probably foreground.
r = 100
cv2.circle(mask, (int(width/2.), int(height/2.)), 2*r, 3, -3) #possible fg
#next 2 are greens...dark and bright to increase the number of fg pixels.
mask[(im[:,:,0] < 45) & (im[:,:,1] > 55) & (im[:,:,2] < 55)] = 1 #dark green
mask[(im[:,:,0] < 190) & (im[:,:,1] > 190) & (im[:,:,2] < 200)] = 1 #bright green
mask[(im[:,:,0] > 200) & (im[:,:,1] > 200) & (im[:,:,2] > 200) & (mask != 1)] = 0 #pretty white
cv2.circle(mask, (int(width/2.), int(height/2.)), r, 1, -3) #fg
#if you pass in an external mask derived from some other operation it is factored in here.
if external_mask is not None:
mask[external_mask == 1] = 1
bgdmodel = np.zeros((1,65), np.float64)
fgdmodel = np.zeros((1,65), np.float64)
cv2.grabCut(im, mask, None, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_MASK)
#show mask
plt.figure(figsize=(10,10))
plt.imshow(mask)
plt.show()
#mask image
mask2 = np.where((mask==1) + (mask==3), 255, 0).astype('uint8')
output = cv2.bitwise_and(im, im, mask=mask2)
plt.figure(figsize=(10,10))
plt.imshow(output)
plt.show()
mask_leaf('leaf1.jpg', external_mask=None)
mask_leaf('leaf2.jpg', external_mask=None)
Addressing the external mask. Here's an example of HDBSCAN clustering...I'm not going to go into the details...you can look up the docs and change it or use as-is.
import hdbscan
from collections import Counter
def hdbscan_mask(im_name):
im = cv2.imread(im_name)
im = cv2.blur(im, (5,5))
indices = np.dstack(np.indices(im.shape[:2]))
data = np.concatenate((indices, im), axis=-1)
data = data[:,2:]
data = imb.reshape(im.shape[0]*im.shape[1], 3)
clusterer = hdbscan.HDBSCAN(min_cluster_size=1000, min_samples=20)
clusterer.fit(data)
plt.figure(figsize=(10,10))
plt.imshow(clusterer.labels_.reshape(im.shape[0:2]))
plt.show()
height, width = im.shape[:2]
mask = np.ones(im.shape[:2], dtype=np.uint8) * 2 #start all possible background
cv2.circle(mask, (int(width/2.), int(height/2.)), 100, 1, -3) #possible fg
#grab cluster number for circle
vals_im = clusterer.labels_.reshape(im.shape[0:2])
vals = vals_im[mask == 1]
commonvals = []
cnts = Counter(vals)
for v, count in cnts.most_common(20):
#print '%i: %7d' % (v, count)
if v == -1:
continue
commonvals.append(v)
tst = np.in1d(vals_im, np.array(commonvals))
tst = tst.reshape(vals_im.shape)
hmask = tst.astype(np.uint8)
plt.figure(figsize=(10,10))
plt.imshow(hmask)
plt.show()
return hmask
hmask = hdbscan_mask('leaf1.jpg')
then to use the initial function with the new mask (output suppressed):
mask_leaf('leaf1.jpg', external_mask=hmask)
This was all made in a notebook from scratch so hopefully there's no errant variables that choke it up when running it somewhere else. (note: I did NOT swap BGR to RGB for plt display, sorry)

How to use PIL (Python Image Library) rotate image and let black background to be transparency

I want to rotate a gray "test" image and paste it onto a blue background image. Now I just can remove the black color after rotate my gray "test" image, but their is now a white color section. How can I use Python to change the "white" color section to blue?
Here is my code, can someone help me? I'd appreciate it.
dst_im = Image.new("RGBA", (196,283), "blue" )
im = src_im.convert('RGBA')
rot = im.rotate( angle, expand=1 ).resize(size)
f = Image.new( 'RGBA', rot.size, (255,)*4 )
im2 = Image.composite( rot, f, rot )
im2.convert(src_im.mode)
im2_width, im2_height = im2.size
cut_box = (0, 0, im2_width, im2_height )
paste_box = ( left, top, im2_width+left, im2_height+top )
region = im2.crop( cut_box )
dst_im.paste( region, paste_box )
dst_im.save("test.gif")
I have the impression that your code could be simplified as follows:
from PIL import Image
src_im = Image.open("winter3.jpg")
angle = 45
size = 100, 100
dst_im = Image.new("RGBA", (196,283), "blue" )
im = src_im.convert('RGBA')
rot = im.rotate( angle, expand=1 ).resize(size)
dst_im.paste( rot, (50, 50), rot )
dst_im.save("test.png")
This gives the following result:
Another answer using PIL is clearly more succinct. I had a similar problem and had the image in an ndarray. Yipes, mine came out way more complicated than user1202136. I'm posting it only because it demonstrates another solution using numpy and array stacking, but user1202136's solution is much better.
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
def rgba(rgb_img, alpha):
'''
' takes an rgb ndarray r x c x 3 of dtype=uint8
' and adds an alpha 0-255 to each pixel
'''
rows = len(rgb_img) # get image dimensions
columns = len(rgb_img[0])
rgb_flat = rgb_img.reshape([rows * columns, 3]) # list of rgb pixels
a = np.zeros([rows*columns, 1], dtype=np.uint8) # alpha for each pixel
a.fill(alpha)
rgba = np.column_stack([rgb_flat, a]) # place 4th column
return rgba.reshape([rows, columns, 4]) # reform into r x c x 4
def pad_with_transparent_pixels(rgba_img):
'''
' takes an rgba image r x c
' and places within a buffer of [ 0 0 0 0] to become square,
' with sides = diagonal of img
'''
rows = len(rgba_img) # get image dimensions
columns = len(rgba_img[0])
diag = (rows**2 + columns**2)**0.5
diag = int(diag) + 1
top_pad_height = (diag-rows)/2 + 1
left_pad_width = (diag-columns)/2 + 1
top_pad = np.zeros([top_pad_height, diag, 4], dtype=np.uint8)
left_pad = np.zeros([rows, left_pad_width, 4], dtype=np.uint8)
right_pad = np.zeros([rows,
# assures total width of top_pad for row_stack:
diag - left_pad_width - columns,
4 ],
dtype=np.uint8)
center = np.column_stack([left_pad, rgba_img, right_pad])
return np.row_stack([top_pad, center, top_pad])
def clean_rotate(rgba_img,angle):
rows = len(rgba_img)
columns = len(rgba_img[0])
diag = (rows**2 + columns**2)**.5
diag = int(diag)
pad_img = pad_with_transparent_pixels(rgba_img)
rot_img = scipy.ndimage.rotate(pad_img, angle)
rot_img_rows = len(rot_img)
rot_img_columns = len(rot_img[0])
crop_side = max(1,(rot_img_columns - diag) / 2) #max to avoid splicing [:0]
crop_top = max(1,(rot_img_rows - diag) / 2)
print diag, crop_side, crop_top
return rot_img[crop_top:-crop_top,crop_side:-crop_side]
img = plt.imread('C:\\Users\\bbrown\\Desktop\\Maurine.jpg') # read in a jpg
figure, axes = plt.subplots(1, 2) # create 1x2 grid of axes
axes[0].imshow(img) # place image on first axes
rgba_image = rgba(img, 255) # create an opaque rgba image
rot_img = clean_rotate(rgba_image,50)
#make a pattern of 10 images
for i in range(10):
rot_img = clean_rotate(rgba_image,5*i)
axes[1].imshow(rot_img)
plt.show()

Categories

Resources