Array of Matrices in Python - python

as the title says, I'm trying to define a array of matrices (witch represents images) in Python. But, when I try to read a matrix, I got this message: "ImageData instance has no attribute 'getitem'""
I'm starting to study Python these days, so I know that this must be simple for a lot of people, but I don't know what's wrong. This is my code:
ImageData.py
import random
import math
class ImageData:
def __init__ (self, width, height):
self.width = width
self.height = height
self.data = []
for i in range(width):
self.data.append([0] * height)
def set_data (self, x, y, value):
self.data[x][y] = value
def generate_voronoi_diagram (self, seeds):
nx = []
ny = []
nr = []
ng = []
nb = []
for i in range(seeds):
# Generate a cell position
pos_x = random.randrange(self.width)
pos_y = random.randrange(self.height)
nx.append(pos_x)
ny.append(pos_y)
# Save the rgb data
nr.append(random.randrange(256))
ng.append(random.randrange(256))
nb.append(random.randrange(256))
for x in range(self.width):
for y in range(self.height):
# Return the Euclidean norm
d_min = math.hypot(self.width-1, self.height-1)
j = -1
for i in range(seeds):
# The distance from a cell to x, y point being considered
d = math.hypot(nx[i]-x, ny[i]-y)
if d < d_min:
d_min = d
j = i
self.data[x][y] = [nr[j], ng[j], nb[j]]
UncertaintyVisualisaton.py
from PIL import Image
import numpy
import ImageData
def generate_uncertainty_visualisation (images, width, height):
image = Image.new("RGB", (width, height))
putpixel = image.putpixel
r = g = b = []
for i in range(width):
r.append([0] * height)
g.append([0] * height)
b.append([0] * height)
for i in range(len(images)):
image = images[i]
for x in range(width):
for y in range(height):
#Error here
rgb = image[x][y]
r[x][y] += rgb[0]
g[x][y] += rgb[1]
b[x][y] += rgb[2]
for x in range(width):
for y in range(height):
r[x][y] /= len(images)
g[x][y] /= len(images)
b[x][y] /= len(images)
putpixel((x, y), (r[x][y], g[x][y], b[x][y]))
image.save("output.png", "PNG")
if __name__ == "__main__":
width = 10;
height = 10;
entries = []
seeds = numpy.random.poisson(20)
images = 1
for n in range(images):
entry = ImageData.ImageData(width, height)
entry.generate_voronoi_diagram(seeds)
entries.append(entry)
generate_uncertainty_visualisation(entries, width, height)
Any help would be very appreciated.
Thanks.

In UncertaintyVisualisaton.py first you set:
image = Image.new("RGB", (width, height))
And then looping over images you reassign:
image = images[i]
This is probably not want you want.
Also your error:
#Error here
rgb = image[x][y]
is happening because ImageData is not a list. The data attibute in it is:
#no more Error here
rgb = image.data[x][y]

Related

When opening image with PILLOW the image is modified

When opening the postscript image with online tools all the pixels align correctly but when using pillow, the pixels are in different sizes.
[Image of the problem]
[Image of the desired result]
def saveFile(canvas:tk.Canvas):
EpsImagePlugin.gs_windows_binary = r'C:\Program Files\gs\gs9.56.1\bin\gswin64c'
file_name = "img"
canvas.postscript(file=f"images\ps\{file_name}.ps", colormode='color')
psimage=Image.open(f'images\ps\{file_name}.ps')
psimage.save(f'images\png\{file_name}.png', "png")
psimage.close()
Keep in mind the pixels are not the size of a 'real' pixel, they are much bigger and changing the format to 'png' or 'jpg' didn't solve the problem.
If someone knows the solution to this problem, I will greatly appreciate it.
Sorry for the missing information, hopefully this is enough.
Img.ps file as text
And this is how the code generates the .ps file
import tkinter as tk
import random
from save import saveFile
pixels = []
FIELD_SIZE = (1280, 720)
PIXEL_SIZE = 10
class random_pixels():
def draw_full_screen(canvas):
height = round(FIELD_SIZE[1] / PIXEL_SIZE)
width = round(FIELD_SIZE[0] / PIXEL_SIZE)
for y in range(height):
for x in range(width):
color = ["#"+''.join([random.choice('ABCDEF0123456789') for i in range(6)])]
# color = "#444"
x_top_left = x * PIXEL_SIZE + 1
y_top_left = y * PIXEL_SIZE + 1
x_bottom_right = x_top_left + PIXEL_SIZE - 1
y_bottom_right = y_top_left + PIXEL_SIZE - 1
resolution = width * height
util.draw_pixel(canvas, color, x_top_left, x_bottom_right, y_top_left, y_bottom_right, resolution)
canvas.update()
canvas.update()
print("\nPixels drawn!")
print("\nSaving image...")
saveFile(canvas)
canvas.focus_set()
print('\nImage saved!')
class util:
def draw_pixel(canvas:tk.Canvas, color, x0, x, y0, y, pixels_to_draw=1):
pixel = canvas.create_rectangle(x0, y0, x, y, fill=color, outline=color)
pixels.append(pixel)
print(f"{len(pixels)}/{pixels_to_draw} | { round(len(pixels) / pixels_to_draw * 100, 2)}%")
return None
def get_theme(e, canvas, root):
if len(pixels) != 0:
canvas.delete('all')
pixels.clear()
root.focus_set()
if e.char == "g":
random_pixels.draw_full_screen(canvas)
canvas.focus_set()

ModuleNotFoundError: No module named 'preprocessors'

I'm getting module not found error while executing my program.
import numpy as np
import cv2
from preprocessors import x_cord_contour, makeSquare, resize_to_pixel
I am quite sure that you are following along the the Deep Learning course by Rajeev D. And I further guess that you haven't downloaded the VM image as suggested in the video.
The functions x_cord_contour, makeSquare and resize_to_pixel are defined in a custom module. If you are following the course without the VM just copy and paste the functions below into your code and remove the import statement.
import numpy as np
import cv2
def x_cord_contour(contour):
# This function take a contour from findContours
# it then outputs the x centroid coordinates
M = cv2.moments(contour)
return (int(M['m10']/M['m00']))
def makeSquare(not_square):
# This function takes an image and makes the dimenions square
# It adds black pixels as the padding where needed
BLACK = [0,0,0]
img_dim = not_square.shape
height = img_dim[0]
width = img_dim[1]
#print("Height = ", height, "Width = ", width)
if (height == width):
square = not_square
return square
else:
doublesize = cv2.resize(not_square,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
height = height * 2
width = width * 2
#print("New Height = ", height, "New Width = ", width)
if (height > width):
pad = int((height - width)/2)
#print("Padding = ", pad)
doublesize_square = cv2.copyMakeBorder(doublesize,0,0,pad,pad,cv2.BORDER_CONSTANT,value=BLACK)
else:
pad = (width - height)/2
#print("Padding = ", pad)
doublesize_square = cv2.copyMakeBorder(doublesize,pad,pad,0,0,\
cv2.BORDER_CONSTANT,value=BLACK)
doublesize_square_dim = doublesize_square.shape
#print("Sq Height = ", doublesize_square_dim[0], "Sq Width = ", doublesize_square_dim[1])
return doublesize_square
def resize_to_pixel(dimensions, image):
# This function then re-sizes an image to the specificied dimenions
buffer_pix = 4
dimensions = dimensions - buffer_pix
squared = image
r = float(dimensions) / squared.shape[1]
dim = (dimensions, int(squared.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
img_dim2 = resized.shape
height_r = img_dim2[0]
width_r = img_dim2[1]
BLACK = [0,0,0]
if (height_r > width_r):
resized = cv2.copyMakeBorder(resized,0,0,0,1,cv2.BORDER_CONSTANT,value=BLACK)
if (height_r < width_r):
resized = cv2.copyMakeBorder(resized,1,0,0,0,cv2.BORDER_CONSTANT,value=BLACK)
p = 2
ReSizedImg = cv2.copyMakeBorder(resized,p,p,p,p,cv2.BORDER_CONSTANT,value=BLACK)
img_dim = ReSizedImg.shape
height = img_dim[0]
width = img_dim[1]
#print("Padded Height = ", height, "Width = ", width)
return ReSizedImg

Speeding up python image color thresholding/filtering

I am working on a problem where I need to find bounding boxes for white area in an image. Since I am dealing with real-world pictures, I have set a threshold on the RGB values, I create a mask from that and then label it and get bounding box coordinates from that. Here is the code.
import numpy as np
from skimage import io, measure
def bin_labelled_img_to_bboxes(bin_image_labelled):
bboxes = []
for j in np.unique(bin_image_labelled):
if j == 0:
continue
curr = (bin_image_labelled == j)
if np.sum(curr) < 50*50:
continue
indices = np.nonzero(curr)
miny = np.min(indices[0])
minx = np.min(indices[1])
maxy = np.max(indices[0])
maxx = np.max(indices[1])
bboxes.append(((miny, minx), (maxy, maxx)))
return bboxes
class WhiteSeperator(object):
def __init__ (self, img_path):
self.img_path = img_path
self.img = io.imread(self.img_path)
self.bin_image_labelled = np.zeros((self.img.shape[0], self.img.shape[1]))
self.bboxes = []
def get_bin_labelled_img(self):
img = self.img
chan1 = (img[:,:,0] > 200) * (img[:,:,0] <= 255)
chan2 = (img[:,:,0] > 180) * (img[:,:,0] <= 255)
chan3 = (img[:,:,0] > 140) * (img[:,:,0] <= 255)
bin_img = (chan1*chan2*chan3)
bin_image_labelled = measure.label(bin_img)
return bin_image_labelled
def get_white_bboxes(self):
final_white_bboxes = []
self.bin_image_labelled = self.get_bin_labelled_img()
white_bboxes = bin_labelled_img_to_bboxes(self.bin_image_labelled)
for bbox in white_bboxes:
width = bbox[1][1]-bbox[0][1]
height = bbox[1][0]-bbox[0][0]
if height > 80 and width > 200:
self.bboxes.append(bbox)
final_white_bboxes.append(bbox)
return final_white_bboxes
This takes about 3-11 seconds per image for high res images (3000 something x 2000 something). My assumption is that the variance in time per image depends on the number of white bounding boxes found (blaming the bin_labelled_img_to_bboxes function here)
Since I have to do this on video frames, even 3 seconds is super slow. Can the above be done in a more efficient way?

Why is the image being used an image from a previous process?

Quite simply, I'm learning how to edit photos with openCV/numpy.
My question is why is the second function using the image created by the first?
I run two functions - one to color columns in black and white, and a second that colors rows in black and white.
First function runs fine, but the second one uses the image created in the first, so I get rows and columns in black and white.
import cv2
import numpy as np
from matplotlib import pyplot as plt
img_source = "brad.jpg"
def read_image(image_source):
#global img, width, height
img = cv2.imread(image_source, 1)
height, width = img.shape[:2]
print("Image size: x ", width, " y ", height)
return img, width, height
def black_and_white_cols(image_source):
width_adjustment = 100
total_cols = round(width / width_adjustment,0)
edited_image = image_source
bw_image = cv2.imread(img_source, 0)
# The next line is to convert to the right interface
# https://stackoverflow.com/questions/11067962/is-it-possible-to-have-black-and-white-and-color-image-on-same-window-by-using-o
bw_image_b = cv2.cvtColor(bw_image,cv2.COLOR_GRAY2BGR)
for x in range(1, int(total_cols), 2):
top_row = 0
bottom_row = height
left_col = x*width_adjustment
right_col = (x * width_adjustment) + width_adjustment
bw_part = bw_image_b[top_row:bottom_row, left_col:right_col]
edited_image[top_row:bottom_row, left_col:right_col] = bw_part
show_image(edited_image)
def black_and_white_cols(image_source):
width_adjustment = 100
total_cols = round(width / width_adjustment,0)
edited_image = image_source
bw_image = cv2.imread(img_source, 0)
# The next line is to convert to the right interface
# https://stackoverflow.com/questions/11067962/is-it-possible-to-have-black-and-white-and-color-image-on-same-window-by-using-o
bw_image_b = cv2.cvtColor(bw_image,cv2.COLOR_GRAY2BGR)
for x in range(1, int(total_cols), 2):
top_row = 0
bottom_row = height
left_col = x*width_adjustment
right_col = (x * width_adjustment) + width_adjustment
bw_part = bw_image_b[top_row:bottom_row, left_col:right_col]
edited_image[top_row:bottom_row, left_col:right_col] = bw_part
show_image(edited_image)
return edited_image
def black_and_white_rows(image_source):
width_adjustment = 100
edited_image = image_source
total_rows = round(height / width_adjustment,0)
bw_image = cv2.imread(img_source, 0)
bw_image_b = cv2.cvtColor(bw_image,cv2.COLOR_GRAY2BGR)
for x in range(1, int(total_rows), 2):
top_row = x * width_adjustment
bottom_row = (x * width_adjustment) + width_adjustment
left_col = 0
right_col = width
bw_part = bw_image_b[top_row:bottom_row, left_col:right_col]
edited_image[top_row:bottom_row, left_col:right_col] = bw_part
show_image(edited_image)
def show_image(image_source):
cv2.imshow('This is your image', image_source)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
img, width, height = read_image(img_source)
new_image = black_and_white_cols(img)
new_image_2 = black_and_white_rows(img)
This is the image after new_image = black_and_white_cols(img) runs.
and here's after new_image_2 = ... runs.
Why does the second image keep the black and white columns? I'm calling it using the very original img_source image, via read_image. Why is it using the column edited image?
As in the comments, when you do edited_image = image_source, you only copy the pointer to the image array, not cloning the array itself. You can do
edited_image = image_source.copy()
which copies image_source to edited_image.

Python PIL concatenate images

I'm working on a project where I need to concatenate a lot of images (80282). Each image is 256 x 256 pixels, and some of the files are empty (no image), so I need to create a blank image to replace the file. I have the data in this format: data-D0H0-X52773-Y14041
X and Y correspond to the coordinates that I need to concatenate in order. The order is from the top left X52773-Y14314 to the bottom right X52964-Y14041. It is 294 iterations on X and 274 on Y. Here is the code I have written which is not working correctly, I could use any help if you have an idea, currently, my images are not well aligned on Y. For example, the image X10-Y10 is not under the image X10-Y11 as it should. I think I have some problem using correctly the try: and except:
Thanks for you help !
from PIL import Image
width = 75264
height = 70144
new_im = Image.new('RGBA', (75264, 70144))
x_offset = 0
y_offset = 0
coordinate = {}
coordinate['x']=52672
coordinate['y']=14314
#top image line should be from: X52,672-Y14,314 to X52,965-Y14,314
#bottom image line should be from: X52,672-Y14,041 to X52,965-Y14,041
for irow in range(0, 274):
for icol in range(0, 294):
try:
if (x_offset == width):
coordinate['y'] = coordinate['y'] - 1
coordinate['x'] = 52672
img = Image.open("data-D0H0-X"+str(coordinate['x'])+"-Y"+str(coordinate['y'])+".png")
except:
coordinate['x'] = coordinate['x'] + 1
blank = Image.new('RGBA', (256,256))
new_im.paste(blank, (x_offset, y_offset))
x_offset += 256
if (x_offset == width):
x_offset = 0
y_offset += 256
break
new_im.paste(img, (x_offset, y_offset))
x_offset += 256
if (x_offset == width):
x_offset = 0
y_offset += 256
coordinate['x'] = coordinate['x'] + 1
new_im.show()
new_im.save('full_image.png')
EDIT:
Here is the new code I've modified according to your answer. However, I'm still getting an error:
struct.error: 'I' format requires 0 <= number <= 4294967295
Not sure if my coordinate calcul is right now.
CODE:
from PIL import Image
import glob
import imghdr
width = 75264
height = 70144
new_im = Image.new('RGBA', (width, height))
for filename in glob.glob('data-D0H0-X*.png'):
tmp_arr = filename.split('-')
x_coord = int(tmp_arr[2][1:6])
y_coord = int(tmp_arr[3][1:6])
info = imghdr.what(filename)
if (info == "png"):
new_img = Image.open(filename)
else:
new_img = Image.new('RGBA', (256,256))
x_coord = (x_coord-52672)*256
y_coord = (14314-y_coord)*256
print x_coord, y_coord
new_im.paste(new_img, (x_coord, y_coord))
new_im.show()
new_im.save('full_image.png')
Your coordinate arithmetic seems a bit off. Since your images are 256x256 you should never have to inc/dec x and y by 1 as you do in your code.
The code below hasn't been tested but should provide a general outline.
from PIL import Image
import glob
width = 75264
height = 70144
new_im = Image.new('RGBA', (width, height))
for filename in glob.glob('data-D0H0-X*.png'):
tmp_arr = filename.split('-')
x_coord = int(tmp_arr[2][1:])
y_coord = int(tmp_arr[3][1:])
small_img = Image.open(filename)
new_im.paste(small_img, (x_coord, y_coord))
new_im.show()
new_im.save('full_image.png')

Categories

Resources