Related
I'm currently trying to create an image preprocessor that adds specific noise for AI training. In this case, I'm trying to add contour lines over the top of my input image that resemble sketch lines.
So far I have been able to get these lines drawn on my image, but they are very sharp and pixelated, which obviously doesn't resemble real artist-drawn lines. I need some way to apply a slight blur to soften these edges, but so far I have not been able to do this.
Here is a visual guide to what I am trying to achieve:
I need to find the contours in image (A) and draw them onto a new layer with alpha channel (B). I then need to blur these lines (C) and paste it back onto the original image (D).
Here is the section I'm having problems with:
# Convert the image to a NumPy array
augmented_image = np.array(augmented_image)
augmented_shape = augmented_image.shape
# Convert image back to color
grey_image = color.rgb2gray(augmented_image)
# Detect the contours of the image using the Canny edge detector
edges = feature.canny(grey_image, sigma=3)
# Create a blank image with dimensions 256 x 256
blank_image = np.zeros((256, 256, 4))
# Create a copy of the image to draw the contours on and convert to 4 layers rgba
alpha = np.ones((augmented_image.shape[0], augmented_image.shape[1], 1), dtype=augmented_image.dtype) * 255
augmented_image = np.concatenate([augmented_image, alpha], axis=2)
# Iterate over the contours
for contour in measure.find_contours(edges, 0.8):
# Set offset
offset = 10
# Select a random point along the contour
point = np.random.randint(0, len(contour))
start_row, start_col = contour[point]
start_row = start_row + offset
start_col = start_col + offset
start_row = np.clip(start_row, 0, augmented_shape[0] - 1)
start_col = np.clip(start_col, 0, augmented_shape[0] - 1)
# Select a random point along the contour that is not the same as the first point
point = np.random.randint(0, len(contour))
while point == start_row:
point = np.random.randint(0, len(contour))
end_row, end_col = contour[point]
end_row = end_row + offset
end_col = end_col + offset
end_row = np.clip(end_row, 0, augmented_shape[0] - 1)
end_col = np.clip(end_col, 0, augmented_shape[0] - 1)
# Draw the line on the image using the draw.line function
rr, cc = draw.line(int(start_row), int(start_col), int(end_row), int(end_col))
blank_image[rr, cc] = 30
# Smooth the contour lines using the gaussian function
blank_image = filters.gaussian(blank_image, sigma=1)
# Make sure image is same data-type
blank_image = blank_image.astype(augmented_image.dtype)
# Create a mask for the contour lines
blank_alpha = blank_image[:, :, 3:]
mask = np.any(blank_alpha > 0, axis=2)
# Apply the smooth image to the masked region of the original image
augmented_image[mask] = blank_image[mask]
# Convert image back to 3 layers rgb
augmented_image = augmented_image[:, :, :3]
I know that the problem lies somewhere in the 'mask' variable definition. Something about it being a boolean type just pastes a line of pure black squares on my image rather than the expected blurred line. No amount of messing with layer order or adding extra layers to copy from has fixed this.
Doing this process without trying to blur the lines works great, minus the fact that it's very pixelated and doesn't fit the style of the training data. Blurring the image without trying to re-combine anything produces an adequate blurred line as well, however the entire image is blurred.
Here's what I can produce without the blurring process, and a rough idea of what I would like the final product to look like (made in Photoshop)
It's only when I try to mask and combine that this becomes a problem. I will post the full code below for anyone to run on their own system:
import random
import numpy as np
import skimage
from skimage.transform import rotate, resize
from skimage import draw, feature, color, measure, filters, util
from skimage.util import random_noise
import PIL
from PIL import Image
import os
import argparse
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from pathlib import Path
import imghdr
# Set parser args
parser = argparse.ArgumentParser()
parser.add_argument("--dirty_dir", help="path to folder containing dirty images")
parser.add_argument("--clean_dir", help="path to folder containing clean images")
parser.add_argument("--dirty_savedir", help="path to dirty output folder")
parser.add_argument("--clean_savedir", help="path to clean output folder")
a = parser.parse_args()
# Set folder paths
dirty_dir = a.dirty_dir + '/'
clean_dir = a.clean_dir + '/'
dirty_savedir = a.dirty_savedir + '/'
clean_savedir = a.clean_savedir + '/'
print(f"Source Folder: {dirty_dir}")
print(f"Source Folder: {clean_dir}")
print(f"Output Folder: {dirty_savedir}")
print(f"Output Folder: {clean_savedir}")
def augment_image(image, filename, clean_dir):
for i in range(8):
# Create list for clean imgs
clean_list = []
# Randomly select a 256x256 region
w, h = image.size[0], image.size[1]
print(w,h)
top = random.randint(0, h - 256)
left = random.randint(0, w - 256)
right = left + 256
bottom = top + 256
dims = [left, top, right, bottom]
print(f'{filename} dimensions:{dims}')
# Add dimensions to clean_list
clean_list.extend(dims)
augmented_image = image.crop((dims))
print(f'{filename} shape: {augmented_image.size}')
# Randomly rotate the image by 90, 180, or 270 degrees
angle = random.choice([0, 90, 180, 270])
augmented_image = augmented_image.rotate(angle)
# Add angle to list
clean_list.append(angle)
# Randomly flip the image horizontally
flip_lr = random.choice([True, False])
if flip_lr == True:
augmented_image = augmented_image.transpose(Image.FLIP_LEFT_RIGHT)
clean_list.append("flip_lr")
else:
clean_list.append("none")
# Randomly flip the image vertically
flip_tb = random.choice([True, False])
if flip_tb == True:
augmented_image = augmented_image.transpose(Image.FLIP_TOP_BOTTOM)
clean_list.append("flip_tb")
else:
clean_list.append("none")
# Convert the image to a NumPy array
augmented_image = np.array(augmented_image)
augmented_shape = augmented_image.shape
# Convert image back to color
grey_image = color.rgb2gray(augmented_image)
# Detect the contours of the image using the Canny edge detector
edges = feature.canny(grey_image, sigma=3)
# Create a blank image with dimensions 256 x 256
blank_image = np.zeros((256, 256, 4))
# Create a copy of the image to draw the contours on and convert to 4 layers rgba
alpha = np.ones((augmented_image.shape[0], augmented_image.shape[1], 1), dtype=augmented_image.dtype) * 255
augmented_image = np.concatenate([augmented_image, alpha], axis=2)
# Iterate over the contours
for contour in measure.find_contours(edges, 0.8):
# Set offset
offset = 10
# Select a random point along the contour
point = np.random.randint(0, len(contour))
start_row, start_col = contour[point]
start_row = start_row + offset
start_col = start_col + offset
start_row = np.clip(start_row, 0, augmented_shape[0] - 1)
start_col = np.clip(start_col, 0, augmented_shape[0] - 1)
# Select a random point along the contour that is not the same as the first point
point = np.random.randint(0, len(contour))
while point == start_row:
point = np.random.randint(0, len(contour))
end_row, end_col = contour[point]
end_row = end_row + offset
end_col = end_col + offset
end_row = np.clip(end_row, 0, augmented_shape[0] - 1)
end_col = np.clip(end_col, 0, augmented_shape[0] - 1)
# Draw the line on the image using the draw.line function
rr, cc = draw.line(int(start_row), int(start_col), int(end_row), int(end_col))
blank_image[rr, cc] = 30
# Smooth the contour lines using the gaussian function
blank_image = filters.gaussian(blank_image, sigma=1)
# Make sure image is same data-type
blank_image = blank_image.astype(augmented_image.dtype)
# Create a mask for the contour lines
blank_alpha = blank_image[:, :, 3:]
mask = np.any(blank_alpha > 0, axis=2)
# Apply the smooth image to the masked region of the original image
augmented_image[mask] = blank_image[mask]
# Convert image back to 3 layers rgb
augmented_image = augmented_image[:, :, :3]
## Add more noise types (lines, wrinkles, color)/make noise random chance to occur ##
# Add random noise to the image
noise = random_noise(augmented_image, mode='pepper', amount=0.011)
# Convert the noisy image back to a PIL image
augmented_image = np.random.random_sample(augmented_image.shape) * 255
augmented_image = np.array(255 * noise, dtype=np.uint8)
augmented_image = Image.fromarray(augmented_image)
# Save file
augmented_image.save(dirty_savedir + '_' + str(i) + '_' + filename)
print(clean_list)
# Function to mirror edits onto clean images
def clean_aug(clean_dir, clean_list):
# Open clean directory
for filename in os.listdir(f"{clean_dir}"):
# Rule out any weird Mac files
if not filename.startswith("._"):
with Image.open(clean_dir + filename) as image:
# Define clean dimensions
clean_dims = clean_list[0:4]
# Crop image
clean_augmented = image.crop((clean_dims))
# Rotate clean image
clean_augmented = clean_augmented.rotate(clean_list[4])
# Flip clean image
if clean_list[5] == 'flip_lr':
clean_augmented = clean_augmented.transpose(Image.FLIP_LEFT_RIGHT)
if clean_list[6] == 'flip_tb':
clean_augmented = clean_augmented.transpose(Image.FLIP_TOP_BOTTOM)
# Save clean images
clean_augmented.save(clean_savedir + '_' + str(i) + '_' + filename)
print("Clean alterations copied successfully")
clean_aug(clean_dir, clean_list)
# Clean up unnecessary files
def file_scrub():
dirty_dir = dirty_savedir
image_extensions = [".png", ".jpg"] # add there all your images file extensions
img_type_accepted_by_tf = ["bmp", "gif", "jpeg", "png"]
for filepath in Path(dirty_dir).rglob("*"):
if filepath.suffix.lower() in image_extensions:
img_type = imghdr.what(filepath)
if img_type is None:
print(f"{filepath} is not an image")
elif img_type not in img_type_accepted_by_tf:
print(f"{filepath} is a {img_type}, not accepted by TensorFlow")
def image_aug(dirty_dir, clean_dir):
for filename in os.listdir(f"{dirty_dir}"):
# Check if the filename starts with "._"
if not filename.startswith("._"):
with Image.open(dirty_dir + filename) as image:
# Open the image
augment_image(image, filename, clean_dir)
image_aug(dirty_dir, clean_dir)
file_scrub()
Apologies for the cumbersome codebase. I just wanted to minimize the risk of discrepancies arising from an attempt at concatenating my script. If there is any clarification I can provide please let me know!
I have 15 tiles or tiff files a folder and I would like combine it as a single file with all the images as one tiff image. All the tiles should be stitched as a single tiff image. How do I do that?
What I tried so far?
import imageio
import os
path = "path/to/dir"
image_path_list = os.listdir(path)
with imageio.get_writer("new_image.tif") as new_image:
for image_path in image_path_list:
image = imageio.imread(path+image_path)
new_image.append_data(image)
This saves as a separate image in a tiff file. I would like to stitch all the images together and save it like the following:
1,2,3...,15 represent the tiles. Needs to be stitched as a single image.
It seems from your comments that you are prepared to consider a non-Python solution, so I used ImageMagick in the Terminal to montage 15 images as follows:
magick montage -tile 3x -geometry +0+0 09*tif result.tif
To demonstrate how you can lay out 5 images across instead of 3, add a different background and affect the horizontal and vertical spacing differently, here is a variation:
magick montage -background magenta -tile 5x -geometry +5+15 09*tif result.tif
Just FYI, I made the 15 randomly coloured blocks like this:
for x in {a..o} ; do magick xc: +noise random -scale 80x50\! 09$x.tif ; done
given one directory with 15 images of same size
using PIL (pillow), I ended up with:
from PIL import Image
import os
path_to_file ='tiff-files'
images = []
for i in os.listdir(path_to_file):
with Image.open(path_to_file+'/'+i) as im:
images.append(im.copy())
new_image = Image.new(images[0].mode, (images[0].size[0]*3,images[0].size[1]*5))
new_image.paste(images[0])
new_image.paste(images[1],(images[0].size[0]*1,0))
new_image.paste(images[2],(images[0].size[0]*2,0))
new_image.paste(images[3],(0,images[0].size[1]*1))
new_image.paste(images[4],(images[0].size[0]*1,images[0].size[1]*1))
new_image.paste(images[5],(images[0].size[0]*2,images[0].size[1]*1))
new_image.paste(images[6],(0,images[0].size[1]*2))
new_image.paste(images[7],(images[0].size[0]*1,images[0].size[1]*2))
new_image.paste(images[8],(images[0].size[0]*2,images[0].size[1]*2))
new_image.paste(images[9],(0,images[0].size[1]*3))
new_image.paste(images[10],(images[0].size[0]*1,images[0].size[1]*3))
new_image.paste(images[11],(images[0].size[0]*2,images[0].size[1]*3))
new_image.paste(images[12],(0,images[0].size[1]*4))
new_image.paste(images[13],(images[0].size[0]*1,images[0].size[1]*4))
new_image.paste(images[14],(images[0].size[0]*2,images[0].size[1]*4))
new_image.show()
let me know if it works.....
After Mark Setchell suggestion here a new version, hope it is better
from PIL import Image
import os
path_to_file ='tiff-files'
def stich_tile(path_to_file, xx , yy):
images = []
for i in os.listdir(path_to_file):
images.append(i)
if len(images) >= xx*yy:
pass
else:
raise ValueError('not enough images in path_to_file !!!!!!!!!!!')
sq_x = xx
sq_y = yy
img_x = (Image.open(path_to_file+'/'+images[0]).size[0])
img_y = (Image.open(path_to_file+'/'+images[0]).size[1])
img_mode = (Image.open(path_to_file+'/'+images[0]).mode)
new_image = Image.new(img_mode, (img_x*sq_x, img_y*sq_y))
x = 0
y = 0
cnt = 0
for i in images:
with Image.open(path_to_file+'/'+i) as img:
new_image.paste(img, (x,y))
cnt += 1
x += img_x
if cnt == sq_x:
x = 0
y += img_y
cnt = 0
else:
pass
return new_image
stich_tile(path_to_file, 3, 5).show()
And thinking more along the lines of https://stackoverflow.com/a/68468658/2836621
import numpy as np
from PIL import Image
import os
# path_to_file ='tiff-files'
path_to_file ='tiff-files2'
# path_to_file ='tiff-files3'
image = []
for i in os.listdir(path_to_file):
with Image.open(path_to_file+'/'+i) as im:
image.append(im.copy())
w, h = image[0].size
new_image = np.zeros((4 * h, 3 * w)).astype('uint8')
col = 0
row = -1
for i, img in enumerate(image):
if not i % 3 :
row += 1
col = 0
img = np.array(img)
new_image[row * h: (row + 1) * h, col * w: (col + 1) * w] = img
col += 1
image_pillow = Image.fromarray(new_image, mode = 'L')
image_pillow.save('prova.tif', mode = 'L')
image_pillow.show()
tested with .tif images grayscale 8-bit
modify adding 3 channel for RGB et similia:
new_image = np.zeros((3 * h, 3 * w,3)).astype('uint8')
new_image[row * h: (row + 1) * h,col * w: (col + 1) * w,:] = img
once more the last example as function for 8 bit grayscale images:
import numpy as np
from PIL import Image
import os
path_to_file ='tiff-files'
# path_to_file ='tiff-files2'
# path_to_file ='tiff-files3'
# path_to_file ='tiff-files5'
def stich_img(path_to_file, x , y):
image = []
for i in os.listdir(path_to_file):
image.append(path_to_file+'/'+i)
print(image)
if len(image) >= x*y:
pass
else:
# raise ValueError('not enough images in path_to_file !!!!!!!!!!!')
raise ValueError('EXCEPTION not enough images in path_to_file !!!!!!!!!!!', x*y ,'images needed : ', len(image),'images present !!!')
image = image[:x*y] #-----> riduce lista immagini al numero richiesto
with Image.open(image[0]) as img0:
w, h = img0.size
# new_image = np.zeros((4 * h, 3 * w)).astype('uint8')
new_image = np.zeros((y * h, x * w)).astype('uint8')
col = 0
row = -1
for i, imgs in enumerate(image):
with Image.open(imgs) as img:
if not i % x :
row += 1
col = 0
img = np.array(img)
new_image[row * h: (row + 1) * h, col * w: (col + 1) * w] = img
col += 1
image_pillow = Image.fromarray(new_image, mode = 'L')
return image_pillow
img_stiched = stich_img(path_to_file, 3,5)
# img_stiched.save('prova.tif', mode = 'L')
img_stiched.show()
Read all images in a list. Iterate over this list using two nested for loops. One in range of 3 and one in range of 5. Use numpy.hstack() and numpy.vstack() to make a final 3x5 image assuming that the size of each tile image is same.
Using numpy:
This script accepts generator of images (to work faster with large images). It does not check their size in advance. If image height does not fit row height or if rows have not the same width, it will fail.
#!/usr/bin/env python3
import numpy as np
from imageio import imread, imwrite
from pathlib import Path
def tile_images(images, cols):
"""Tile images of same size to grid with given number of columns.
Args:
images (collection of ndarrays)
cols (int): number of colums
Returns:
ndarray: stitched image
"""
images = iter(images)
first = True
rows = []
i = 0
while True:
try:
im = next(images)
print(f"add image, shape: {im.shape}, type: {im.dtype}")
except StopIteration:
if first:
break
else:
im = np.zeros_like(im) # black background
if first:
row = im # start next row
first = False
else:
row = np.concatenate((row, im), axis=1) # append to row
i += 1
if not i % cols:
print(f"row done, shape: {row.shape}")
rows.append(row) # finished row
first = True
tiled = np.concatenate(rows) # stitch rows
return tiled
def main():
images = (imread(f) for f in Path().glob("*.*") if f.suffix in (".jpg", ".png") if f.name != "new.png")
new = tile_images(images, cols=3)
imwrite("new.png", new)
def test():
im1 = np.arange(65536).reshape(256,256)
im2 = np.arange(65536/2).reshape(128,256)
images = [im1,im1,im1,im2,im2,im2]
# works
new = tile_images(images, 3)
imwrite("new.png", new)
# failes
new = tile_images(images, 2)
imwrite("new2.png", new)
if __name__ == "__main__":
main()
# test()
The following elaborates on #saad_saeed answer.
Note, the following will break:
if your list_of_images doesn't have enough images to build the num_mosaic_rows x num_mosaic_cols mosaic. I've left it to the user to add the handling of this (e.g. adding an if/else).
if each img in your list_of_images doesn't have the same shape
def build_mosaic(list_of_images, num_mosaic_rows, num_mosaic_cols):
list_of_mosaic_rows = []
for row_number in range(num_mosaic_rows):
list_of_mosaic_rows = list_of_images[row_number*num_mosaic_cols,(row_number+1)*num_mosaic_cols]
mosaic = np.vstack(list_of_mosaic_rows)
return mosaic
In below example there are three images on a white background in order. How to achieve this in python using CV2 or PIL or any working code.
Thank you.
Image must be aligned according to aspect ratio.
Input = 3 images with BG,
Output = single image as shown in above picture
UPDATE !!!!
Each and every loop only one image gets pasted on BG.
from PIL import Image
import cv2
import numpy as np
d=0
folder = 'save'
image_paths = []
for path, subdirs, files in os.walk(folder):
for filename in files:
f = os.path.join(path, filename)
if f.endswith(".jpg"):
image_paths.append(f)
if f.endswith(".png"):
image_paths.append(f)
if f.endswith(".JPG"):
image_paths.append(f)
if f.endswith(".PNG"):
image_paths.append(f)
if f.endswith(".jpeg"):
image_paths.append(f)
if f.endswith(".JPEG"):
image_paths.append(f)
for image in image_paths:
image = cv2.imread(image)
r = 720.0 / image.shape[1]
dim = (720, int(image.shape[0] * r))
resized = cv2.resize(image, dim)
#resized = resized[:,:,0]
h, w, z = resized.shape
back = cv2.imread('template.jpg')
yoff = round((1080-h)/4)
xoff = round((1920-w)/6)
d+=1
result = back.copy()
result[yoff:yoff+h, xoff:xoff+w] = resized
#result = np.stack((result)*3)
cv2.imwrite('saves/resized_centered_%d.jpg'%d, result)
So multiple images in input gets pasted in a background but the thing is, i want three images to paste in the background instead of one image in order.
NOTE: THE IMAGE ON TOP IS JUST TO REPRESENT MY HELP !!! YOU CAN TELL ME WHATEVER POSSIBLE APART FROM THAT !!!
This line of code moves the image towards top-left and seated properly but likewise i need two more image to be seated on top-right and as well as bottom.
yoff = round((1080-h)/4)
xoff = round((1920-w)/6)
I assume some template like this:
The "final image" has dimensions (1920, 1080) (cf. your calculations on xoff and yoff). Since you wrote, you want to keep the aspect ratio for each "single image", you'd need to check both cases: Resize w.r.t. to the single image's width, and if the resulting height is too large, re-resize w.r.t. to the single image's height.
What's left is to track the number of single images per final image inside the loop, and set up proper xoff and yoff values for each of the three cases. Maybe, looking at the code here helps more than long explanations:
import cv2
import numpy as np
import os
folder = 'path/to/your/images'
image_paths = []
for path, subdirs, files in os.walk(folder):
for filename in files:
f = os.path.join(path, filename)
if f.endswith((".jpg", ".png", ".JPG", ".PNG", ".jpeg", ".JPEG")):
image_paths.append(f)
d = 0 # Final image counter
e = 0 # Single image counter
back = np.ones((1080, 1920, 3), np.uint8) * 255 # Background
result = back.copy() # Final image
for i, image in enumerate(image_paths):
# Read image
image = cv2.imread(image)
h, w = image.shape[:2]
# First two single images: Enforce subimage with h_max = 480 and w_max = 900
if e <= 1:
r = 900.0 / w
dim = (900, int(h * r))
if dim[1] > 480:
r = 480.0 / h
dim = (int(w * r), 480)
resized = cv2.resize(image, dim)
hr, wr = resized.shape[:2]
x_off = 40
if e == 0:
y_off = 40
else:
y_off = 560
# Third single image: Enforce subimage with h_max = 1000 and w_max = 900
else:
r = 900.0 / w
dim = (900, int(h * r))
if dim[1] > 1000:
r = 1000.0 / h
dim = (int(w * r), 1000)
resized = cv2.resize(image, dim)
hr, wr = resized.shape[:2]
x_off, y_off = 980, 40
# Add single image to final image
result[y_off:y_off + hr, x_off:x_off + wr] = resized
# Increment single image counter
e += 1
# After three single images: Write final image; start new final image
if (e == 3) or (i == (len(image_paths) - 1)):
cv2.imwrite('resized_centered_%d.jpg' % d, result)
result = back.copy()
d += 1
e = 0
For some random images from my StackOverflow archive, I get the following outputs:
If you want to have different sized boxes or margins around or between the single images, just adapt the corresponding values in the code.
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.16299-SP0
Python: 3.9.1
PyCharm: 2021.1.1
NumPy: 1.20.2
OpenCV: 4.5.1
----------------------------------------
Hello I want to reflect an object in the image as in this image[enter image description here][1]
[1]: https://i.stack.imgur.com/N9J3I.jpg How can I get this kind of result?
It is possible that OpenCV does not have good solutions for this, take a closer look at Pillow.
from PIL import Image, ImageFilter
def drop_shadow(image, iterations=3, border=8, offset=(5,5), background_colour=0xffffff, shadow_colour=0x444444):
shadow_width = image.size[0] + abs(offset[0]) + 2 * border
shadow_height = image.size[1] + abs(offset[1]) + 2 * border
shadow = Image.new(image.mode, (shadow_width, shadow_height), background_colour)
shadow_left = border + max(offset[0], 0)
shadow_top = border + max(offset[1], 0)
shadow.paste(shadow_colour, [shadow_left, shadow_top, shadow_left + image.size[0], shadow_top + image.size[1]])
for i in range(iterations):
shadow = shadow.filter(ImageFilter.BLUR)
img_left = border - min(offset[0], 0)
img_top = border - min(offset[1], 0)
shadow.paste(image, (img_left, img_top))
return shadow
drop_shadow(Image.open('boobs.jpg')).save('shadowed_boobs.png')
Here is one way to do the reflection in Python/OpenCV.
One flips the image. Then makes a vertical ramp (gradient) image and puts that into the alpha channel of the flipped image. Then one concatenates the original and the flipped images.
Input:
import cv2
import numpy as np
# set top and bottom opacity percentages
top = 85
btm = 15
# load image
img = cv2.imread('bear2.png')
hh, ww = img.shape[:2]
# flip the input
flip = np.flip(img, axis=0)
# add opaque alpha channel to input
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# make vertical gradient that is bright at top and dark at bottom as alpha channel for the flipped image
gtop = 255*top//100
gbtm = 255*btm//100
grady = np.linspace(gbtm, gtop, hh, dtype=np.uint8)
gradx = np.linspace(1, 1, ww, dtype=np.uint8)
grad = np.outer(grady, gradx)
grad = np.flip(grad, axis=0)
# alternate method
#grad = np.linspace(0, 255, hh, dtype=np.uint8)
#grad = np.tile(grad, (ww,1))
#grad = np.transpose(grad)
#grad = np.flip(grad, axis=0)
# put the gradient into the alpha channel of the flipped image
flip = cv2.cvtColor(flip, cv2.COLOR_BGR2BGRA)
flip[:,:,3] = grad
# concatenate the original and the flipped versions
result = np.vstack((img, flip))
# save output
cv2.imwrite('bear2_vertical_gradient.png', grad)
cv2.imwrite('bear2_reflection.png', result)
# Display various images to see the steps
cv2.imshow('flip',flip)
cv2.imshow('grad',grad)
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ramped (Gradient) Image:
Result:
I want to rotate a gray "test" image and paste it onto a blue background image. Now I just can remove the black color after rotate my gray "test" image, but their is now a white color section. How can I use Python to change the "white" color section to blue?
Here is my code, can someone help me? I'd appreciate it.
dst_im = Image.new("RGBA", (196,283), "blue" )
im = src_im.convert('RGBA')
rot = im.rotate( angle, expand=1 ).resize(size)
f = Image.new( 'RGBA', rot.size, (255,)*4 )
im2 = Image.composite( rot, f, rot )
im2.convert(src_im.mode)
im2_width, im2_height = im2.size
cut_box = (0, 0, im2_width, im2_height )
paste_box = ( left, top, im2_width+left, im2_height+top )
region = im2.crop( cut_box )
dst_im.paste( region, paste_box )
dst_im.save("test.gif")
I have the impression that your code could be simplified as follows:
from PIL import Image
src_im = Image.open("winter3.jpg")
angle = 45
size = 100, 100
dst_im = Image.new("RGBA", (196,283), "blue" )
im = src_im.convert('RGBA')
rot = im.rotate( angle, expand=1 ).resize(size)
dst_im.paste( rot, (50, 50), rot )
dst_im.save("test.png")
This gives the following result:
Another answer using PIL is clearly more succinct. I had a similar problem and had the image in an ndarray. Yipes, mine came out way more complicated than user1202136. I'm posting it only because it demonstrates another solution using numpy and array stacking, but user1202136's solution is much better.
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage
def rgba(rgb_img, alpha):
'''
' takes an rgb ndarray r x c x 3 of dtype=uint8
' and adds an alpha 0-255 to each pixel
'''
rows = len(rgb_img) # get image dimensions
columns = len(rgb_img[0])
rgb_flat = rgb_img.reshape([rows * columns, 3]) # list of rgb pixels
a = np.zeros([rows*columns, 1], dtype=np.uint8) # alpha for each pixel
a.fill(alpha)
rgba = np.column_stack([rgb_flat, a]) # place 4th column
return rgba.reshape([rows, columns, 4]) # reform into r x c x 4
def pad_with_transparent_pixels(rgba_img):
'''
' takes an rgba image r x c
' and places within a buffer of [ 0 0 0 0] to become square,
' with sides = diagonal of img
'''
rows = len(rgba_img) # get image dimensions
columns = len(rgba_img[0])
diag = (rows**2 + columns**2)**0.5
diag = int(diag) + 1
top_pad_height = (diag-rows)/2 + 1
left_pad_width = (diag-columns)/2 + 1
top_pad = np.zeros([top_pad_height, diag, 4], dtype=np.uint8)
left_pad = np.zeros([rows, left_pad_width, 4], dtype=np.uint8)
right_pad = np.zeros([rows,
# assures total width of top_pad for row_stack:
diag - left_pad_width - columns,
4 ],
dtype=np.uint8)
center = np.column_stack([left_pad, rgba_img, right_pad])
return np.row_stack([top_pad, center, top_pad])
def clean_rotate(rgba_img,angle):
rows = len(rgba_img)
columns = len(rgba_img[0])
diag = (rows**2 + columns**2)**.5
diag = int(diag)
pad_img = pad_with_transparent_pixels(rgba_img)
rot_img = scipy.ndimage.rotate(pad_img, angle)
rot_img_rows = len(rot_img)
rot_img_columns = len(rot_img[0])
crop_side = max(1,(rot_img_columns - diag) / 2) #max to avoid splicing [:0]
crop_top = max(1,(rot_img_rows - diag) / 2)
print diag, crop_side, crop_top
return rot_img[crop_top:-crop_top,crop_side:-crop_side]
img = plt.imread('C:\\Users\\bbrown\\Desktop\\Maurine.jpg') # read in a jpg
figure, axes = plt.subplots(1, 2) # create 1x2 grid of axes
axes[0].imshow(img) # place image on first axes
rgba_image = rgba(img, 255) # create an opaque rgba image
rot_img = clean_rotate(rgba_image,50)
#make a pattern of 10 images
for i in range(10):
rot_img = clean_rotate(rgba_image,5*i)
axes[1].imshow(rot_img)
plt.show()