I have a static image that I would like to animate to appear like this (except starting from a black image, not a white image):
(image is from this post: Create animated gif from static image)
Here is the code:
import random
import imageio
import numpy as np
from PIL import Image
img = Image.open('/Users/tom/Desktop/sink.jpeg')
pixels = img.load()
width, height = img.size
img2 = Image.new('RGB', img.size, color='black')
pixels2 = img2.load()
coord = []
for x in range(width):
for y in range(height):
coord.append((x, y))
images = []
while coord:
x, y = random.choice(coord)
pixels2[x, y] = pixels[x, y]
coord.remove((x, y))
if len(coord) % 500 == 0:
images.append(np.array(img2))
imageio.mimsave('/Users/tom/Desktop/sink.gif', images)
When I run the code, the script never stops/outputs anything. Anyone know why?
Your code works, it is just very slow. If you are okay with a transparent background you can do something like this:
import numpy as np
import imageio.v3 as iio
rng = np.random.default_rng()
px_per_iter = 1000
img = iio.imread("imageio:chelsea.png")
n_pixels = img.shape[0] * img.shape[1]
batches = int(np.ceil(n_pixels / px_per_iter)) # number of frames
pixels = rng.permutation(n_pixels) # order in which pixels are revealed
frames = np.zeros((batches + 1, *img.shape[:2], 4), dtype=np.uint8)
for batch_idx in range(batches):
idx_batch = pixels[px_per_iter*batch_idx:px_per_iter*(batch_idx+1)]
y_idx, x_idx = np.unravel_index(idx_batch, img.shape[:2])
frame = frames[batch_idx+1]
frame[y_idx, x_idx, :3] = img[y_idx, x_idx]
frame[y_idx, x_idx, 3] = 255 # make added pixels non-transparent
iio.imwrite("fancy.gif", frames, loop=True)
(500kb GIF)
If you need the black background, you can use something like this; however, be aware that it will produce larger files:
import numpy as np
import imageio.v3 as iio
rng = np.random.default_rng()
px_per_iter = 1000
img = iio.imread("imageio:chelsea.png")
n_pixels = img.shape[0] * img.shape[1]
batches = int(np.ceil(n_pixels / px_per_iter)) # number of frames
pixels = rng.permutation(n_pixels) # order in which pixels are revealed
frames = np.zeros((batches + 1, *img.shape), dtype=np.uint8)
for batch_idx in range(batches):
idx_batch = pixels[px_per_iter*batch_idx:px_per_iter*(batch_idx+1)]
y_idx, x_idx = np.unravel_index(idx_batch, img.shape[:2])
frame = frames[batch_idx+1]
frame[:] = frames[batch_idx]
frame[y_idx, x_idx] = img[y_idx, x_idx]
iio.imwrite("fancy.gif", frames)
(result exceeds 2MB, which is SO's limit)
Related
I am trying to simulate rain using NumPy, they say an image is more than a thousand words so here is a description longer than two thousand words:
I already wrote the code, but I think my implementation is inefficient, so I want to know if NumPy has any builtin functions that can speed up the process:
import numpy as np
from PIL import Image
from random import random, randbytes
def rain(width, strikes=360, color=True, lw=3):
assert not width % 16
height = int(width * 9 / 16)
img = np.zeros((height, width, 3), dtype=np.uint8)
half = height / 2
for i in range(strikes):
x = round(random() * width)
y = round(height - random() * half)
x1 = min(x + lw, width - 1)
if color:
rgb = list(randbytes(3))
else:
rgb = (178, 255, 255)
img[0:y, x:x1] = rgb
return img
img1 = Image.fromarray(rain(1920))
img1.show()
img1.save('D:/rain.jpg', format='jpeg', quality=80, optimize=True)
img2 = Image.fromarray(rain(1920, color=False))
img2.show()
img2.save('D:/rain_1.jpg', format='jpeg', quality=80, optimize=True)
I was able to improve by 2 to 4 times faster.
Since raindrops do not stop in the upper half of the image, the upper half can be stretched out from the lower half after all strikes end.
Since broadcasting tuples is relatively slow, use 32-bit format color instead.
def rain(width=1920, strikes=360, color=True, lw=3):
assert not width % 16
height = int(width * 9 / 16)
img = np.zeros((height, width), dtype=np.uint32)
half = height / 2
upper_bottom = int(half) - 1
alpha = 255 << 24
# Paint background.
# The upper half will always be overwritten and can be skipped.
img[upper_bottom:] = alpha
for i in range(strikes):
x = round(random() * width)
y = round(height - random() * half)
x1 = min(x + lw, width - 1)
if color:
# Pack color into int. See below for details.
rgb = int.from_bytes(randbytes(3), 'big') + alpha
else:
# This is how to pack color into int.
r, b, g = 178, 255, 255
rgb = r + (g << 8) + (b << 16) + alpha
# Only the lower half needs to be painted in this loop.
img[upper_bottom:y, x:x1] = rgb
# The upper half can simply be stretched from the lower half.
img[:upper_bottom] = img[upper_bottom]
# Unpack uint32 to uint8 x4 without deep copying.
img = img.view(np.uint8).reshape((height, width, 4))
return img
Note:
Endianness is ignored. May not work on some platforms.
Performance is greatly degraded if the image width is very large.
If you are going to convert img to PIL.Image, compare its performance too as it is also improved.
Because of the rain overlaps each other (which makes removing for-loop hard) and because the strikes are not so many (which makes the room for improvement small), I find it difficult to optimize further. Hopefully this is enough.
So the easiest way to speed up code using NumPy is to utilize broadcasting and element-by-element operations, so that less efficient for-loops can be avoided. Below is a performance comparison between my algorithm (rain2) and OP’s (rain1):
import numpy.random as npr
from random import random, randbytes
from PIL import Image
import profile
def rain1(width, strikes=360, color=True, lw=3):
assert not width % 16
height = int(width * 9 / 16)
img = np.zeros((height, width, 3), dtype=np.uint8)
half = height / 2
for i in range(strikes):
x = round(random() * width)
y = round(height - random() * half)
x1 = min(x + lw, width - 1)
if color:
rgb = list(randbytes(3))
else:
rgb = (178, 255, 255)
img[0:y, x:x1] = rgb
return img
def rain2(width, strikes=360, color=True, lw=3):
assert not width % 16
height = width*9//16
[inds,] = np.indices((width,))
img = np.zeros((height, width, 4), dtype=np.uint8)
img[:,:,3] = 255
half = height/2
# randint from numpy.random lets you
# define a lower and upper bound,
# and number of points.
x = list(set(npr.randint(0, width-lw-1, (strikes,))))
x = np.sort(x)
y = npr.randint(half, height, (len(x),))
if color:
rgb = npr.randint(0, 255, (len(x), 3), dtype=np.uint8)
else:
rgb = np.array([178, 255, 255], dtype=np.uint8)
for offset in range(lw):
img[:,x+offset,3] = 0
img[:,x+offset,:3] = rgb
for xi, yi in zip(x, y):
img[0:yi,xi:xi+lw,3] = 255
return img
def example_test_old(strikes, disp_im=True):
img1 = Image.fromarray(rain1(1920, strikes=strikes))
if disp_im: img1.show()
img1.save('rain1.jpg', format='jpeg', quality=80, optimize=True)
img2 = Image.fromarray(rain1(1920, strikes=strikes, color=False))
if disp_im: img2.show()
img2.save('rain1.jpg', format='jpeg', quality=80, optimize=True)
def example_test_new(strikes, disp_im=True):
img1 = Image.fromarray(rain2(1920, strikes=strikes))
if disp_im: img1.show()
img1.save('rain2.png', format='png', quality=80, optimize=True)
img2 = Image.fromarray(rain2(1920, strikes=strikes, color=False))
if disp_im: img2.show()
img2.save('rain2.png', format='png', quality=80, optimize=True)
if __name__ == "__main__":
# Execute only if this module is not imported into another script
example_test_old(360)
example_test_new(360)
profile.run('example_test_old(100000, disp_im=False)')
profile.run('example_test_new(100000, disp_im=False)')
On my PC this speeds it up by a factor of 14.5!
Hope this helps.
I want to make a function which generate a dataset, The object will be place on the black image at different position with different angle, different size and place randomly maximum up to 20 time in image. and Save the x,y and angle position in the text file.
The following image is for five objects at different position and angle.
import numpy as np
import cv2
patch=cv2.imread('imagersult.png')
img = np.zeros((2048, 2048, 1), dtype = "uint8")
Here is how you can use the scipy.ndimage module to rotate your patches:
import numpy as np
import cv2
from random import randrange
from scipy import ndimage
def patch_img(img, patch, amt=5):
h, w, _ = img.shape
for _ in range(amt):
p = ndimage.rotate(patch, randrange(360))
p_h, p_w, _ = p.shape
x = randrange(w - p_w)
y = randrange(h - p_h)
seg = img[y: y + p_h, x: x + p_w]
seg[:] = cv2.bitwise_xor(seg, p)
patch = cv2.imread('imagersult.png')
img = np.zeros((2048, 2048, 3), dtype="uint8")
patch_img(img, patch)
cv2.imshow("Image", img)
cv2.waitKey(0)
Outputs for multiple runs:
For grayscale and variation in size of the patches:
import numpy as np
import cv2
from random import randrange, uniform
from scipy import ndimage
def patch_img(img, patch, amt=5):
h, w = img.shape
min_scale = 0.5
max_scale = 2
for _ in range(amt):
patch_h, patch_w = patch.shape
scale = uniform(min_scale, max_scale)
p = ndimage.rotate(cv2.resize(patch, (int(patch_w * scale), int(patch_h * scale))), randrange(360))
p_h, p_w = p.shape
x = randrange(w - p_w)
y = randrange(h - p_h)
seg = img[y: y + p_h, x: x + p_w]
seg[:] = cv2.bitwise_xor(seg, p)
patch = cv2.imread('imagersult.png', 0)
img = np.zeros((2048, 2048), dtype="uint8")
patch_img(img, patch)
cv2.imshow("Image", img)
cv2.imwrite("result.png", img)
cv2.waitKey(0)
Sample output:
I have read in some images with the code below. These images are of different sizes. In order to get them to equal sizes, I would like to add a black frame around the images. I found some code to do this for a single image but not for a list as in my case.
import cv2
import numpy
import glob
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
folders = glob.glob(r'path\to\images\*')
imagenames_list = []
for folder in folders:
for f in glob.glob(folder+'/*.png'):
imagenames_list.append(f)
read_images = []
for image in imagenames_list:
read_images.append(cv2.imread(image, cv2.IMREAD_GRAYSCALE))
To add a black frame for a single picture I used this code:
from PIL import Image
import numpy as np
old_im = Image.open('path/to/single/picture/*.png')
old_size = old_im.size
print(old_size)
new_size = (500, 500)
print(new_size)
new_im = Image.new("RGB", new_size)
x = int((new_size[0]-old_size[0])/2)
y = int((new_size[1]-old_size[1])/2)
new_im.paste(old_im, (x,y))
Image read by OpenCV are just numpy arrays. You can just use numpy slicing to copy:
def makeborder(cv2img, new_width, new_height):
'''
cv2img: an image returned by cv2.imread()
'''
# gray scale or BGR/BGRA
if len(cv2img.shape) == 2:
new_shape = (new_height, new_width)
else:
new_shape = (new_height, new_width, cv2img.shape[-1])
new_img = np.zeros(new_shape, dtype=cv2img.dtype)
# compute the offsets, similar to your x & y
offset_height = (new_height - cv2img.shape[0])//2
offset_weight = (new_width - cv2img.shape[1])//2
# should check offset_height >= 0 and offset_weight >= 0
# but we skip here
# ...
# now we just use numpy slicing to copy
new_img[offset_height:offset_height + cv2img.shape[0],
offset_width: offset_width + cv2img.shape[1]] \
= cv2img
return new_img
Here I use the PIL Library to read and manipulate images. I am confused, how to create a new image from the list of arrays containing binary pixel data, after being converted to binary images.
I have tried it, but the resulting image is of type RGB, not a binary image. The following is the code that I wrote:
from PIL import Image
import numpy as np
img = Image.open('data_train/ga.jpeg')
pixels = img.load()
width, height = img.size
all_pixels = []
for x in range(width):
for y in range(height):
hpixel = pixels[x, y]
img_gray = (0.2989 * hpixel[0]) + (0.5870 * hpixel[1]) + (0.1140 * hpixel[2])
if img_gray >= 110:
all_pixels.append('1')
else:
all_pixels.append('0')
data_isi = {'0': 0,
'1': 255}
data = [data_isi[letter] for letter in all_pixels]
img_new = Image.fromarray(data)
img_new.save('data_train/gabiner.jpeg')
Updated Answer
As you are required to use a for loop, you could go with something more like this:
#!/usr/bin/env python3
from PIL import Image
# Load image and get dimensions
img = Image.open('start.jpg').convert('RGB')
width, height = img.size
# Actually load input pixels, else PIL is too lazy
imi = img.load()
# List of result pixels
imo = []
for y in range(height):
for x in range(width):
R, G, B = imi[x, y]
gray = (0.2989 * R) + (0.5870 * G) + (0.1140 * B)
if gray >= 110:
imo.append(255)
else:
imo.append(0)
# Make output image and put output pixels into it
result = Image.new('L', (width,height))
result.putdata(imo)
# Save result
result.save('result.png')
Which turns this start image:
Into this result:
Original Answer
You appear to be converting the image to greyscale and thresholding at 110, which can be done much more simply, and faster, like this:
#!/usr/local/bin/python3
from PIL import Image
# Load image and make greyscale
im = Image.open('image.png').convert('L')
# Threshold to make black and white
thr = im.point(lambda p: p > 110 and 255)
# Save result
thr.save('result.png')
I have a collection of individual images in a folder and want to display them in a custom grid (the size and shape of which will vary but i'll use 4*16 in the code below).
My current code uses matplotlib and numpy but it is very slow (>1min for 64 images) and the resolution of the final image is poor.
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os
def make_array(folder):
filename_list = [];
im_list = [];
workingdir = os.getcwd();
if folder != "":
workingdir += "/"+folder
for file in os.listdir(workingdir):
if file.endswith(".JPG"):
filename_list.append(file);
filename_list.sort();
os.chdir(workingdir)
for i in range(0,16):
im_list.append(np.asarray(Image.open(filename_list[i]).convert('RGB')));
im_list.append(np.asarray(Image.open(filename_list[i+16]).convert('RGB')));
im_list.append(np.asarray(Image.open(filename_list[i+32]).convert('RGB')));
im_list.append(np.asarray(Image.open(filename_list[i+48]).convert('RGB')));
return np.array(im_list)
def gallery(array, ncols=4):
nindex, height, width, intensity = array.shape
nrows = nindex//ncols
assert nindex == nrows*ncols
# want result.shape = (height*nrows, width*ncols, intensity)
result = (array.reshape(nrows, ncols, height, width, intensity)
.swapaxes(1,2)
.reshape(height*nrows, width*ncols, intensity))
return result
def plot_array(gallery, name):
f = plt.figure()
f.set_size_inches(30, 120)
axes = plt.gca()
plt.xticks([])
plt.yticks([])
plt.imshow(gallery)
plt.show()
f.savefig(name, bbox_inches='tight')
# EDIT TO MATCH THE DESIRED PARAMETERS
#Note: The images will be ploted in the 'writing order' left to right then top to bottom
name = "4_days_per_particle"; #Name of the output file (.png)
folder="Pictures_4days" #Name of folder containing the pictures in the working directory (if not cwd itself)
#Save initial working directory
mainDir = os.getcwd();
#Creates the array of images
array = make_array(folder)
#Reorders the axis to shape the gallery
gal = gallery(array)
#Plots and saves the figure
plot_array(gal, name)
#Cleanup directory
os.chdir(mainDir);
How can I achieve the same result faster and control the output resolution (up to keeping the original resolution of the image files)?
Thank you!
I ended up finding a cleaner way to do this using OpenCV inspired from this gist:
https://gist.github.com/pgorczak/95230f53d3f140e4939c#file-imgmatrix-py
In my experience, this approach is somewhat faster and bypassing matplotlib enables full control of the output resolution.
In addition, cv2.resize() can be used to rescale the image if necessary and the IMWRITE_JPEG_QUALITY argument can be used to set the JPEG export quality as a handle to control filesize.
import itertools
import cv2
import os
import numpy as np
#User defined variables
dirname = "my_directory" #Name of the directory containing the images
name = "my_image_name" + ".jpg" #Name of the exported file
margin = 20 #Margin between pictures in pixels
w = 8 # Width of the matrix (nb of images)
h = 8 # Height of the matrix (nb of images)
n = w*h
filename_list = []
for file in os.listdir(dirname):
if file.endswith(".JPG"):
filename_list.append(file)
filename_list.sort();
print(filename_list)
imgs = [cv2.imread(os.getcwd()+"/"+dirname+"/"+file) for file in filename_list]
#Define the shape of the image to be replicated (all images should have the same shape)
img_h, img_w, img_c = imgs[0].shape
#Define the margins in x and y directions
m_x = margin
m_y = margin
#Size of the full size image
mat_x = img_w * w + m_x * (w - 1)
mat_y = img_h * h + m_y * (h - 1)
#Create a matrix of zeros of the right size and fill with 255 (so margins end up white)
imgmatrix = np.zeros((mat_y, mat_x, img_c),np.uint8)
imgmatrix.fill(255)
#Prepare an iterable with the right dimensions
positions = itertools.product(range(h), range(w))
for (y_i, x_i), img in zip(positions, imgs):
x = x_i * (img_w + m_x)
y = y_i * (img_h + m_y)
imgmatrix[y:y+img_h, x:x+img_w, :] = img
resized = cv2.resize(imgmatrix, (mat_x//3,mat_y//3), interpolation = cv2.INTER_AREA)
compression_params = [cv2.IMWRITE_JPEG_QUALITY, 90]
cv2.imwrite(name, resized, compression_params)