Related
I'm currently trying to create an image preprocessor that adds specific noise for AI training. In this case, I'm trying to add contour lines over the top of my input image that resemble sketch lines.
So far I have been able to get these lines drawn on my image, but they are very sharp and pixelated, which obviously doesn't resemble real artist-drawn lines. I need some way to apply a slight blur to soften these edges, but so far I have not been able to do this.
Here is a visual guide to what I am trying to achieve:
I need to find the contours in image (A) and draw them onto a new layer with alpha channel (B). I then need to blur these lines (C) and paste it back onto the original image (D).
Here is the section I'm having problems with:
# Convert the image to a NumPy array
augmented_image = np.array(augmented_image)
augmented_shape = augmented_image.shape
# Convert image back to color
grey_image = color.rgb2gray(augmented_image)
# Detect the contours of the image using the Canny edge detector
edges = feature.canny(grey_image, sigma=3)
# Create a blank image with dimensions 256 x 256
blank_image = np.zeros((256, 256, 4))
# Create a copy of the image to draw the contours on and convert to 4 layers rgba
alpha = np.ones((augmented_image.shape[0], augmented_image.shape[1], 1), dtype=augmented_image.dtype) * 255
augmented_image = np.concatenate([augmented_image, alpha], axis=2)
# Iterate over the contours
for contour in measure.find_contours(edges, 0.8):
# Set offset
offset = 10
# Select a random point along the contour
point = np.random.randint(0, len(contour))
start_row, start_col = contour[point]
start_row = start_row + offset
start_col = start_col + offset
start_row = np.clip(start_row, 0, augmented_shape[0] - 1)
start_col = np.clip(start_col, 0, augmented_shape[0] - 1)
# Select a random point along the contour that is not the same as the first point
point = np.random.randint(0, len(contour))
while point == start_row:
point = np.random.randint(0, len(contour))
end_row, end_col = contour[point]
end_row = end_row + offset
end_col = end_col + offset
end_row = np.clip(end_row, 0, augmented_shape[0] - 1)
end_col = np.clip(end_col, 0, augmented_shape[0] - 1)
# Draw the line on the image using the draw.line function
rr, cc = draw.line(int(start_row), int(start_col), int(end_row), int(end_col))
blank_image[rr, cc] = 30
# Smooth the contour lines using the gaussian function
blank_image = filters.gaussian(blank_image, sigma=1)
# Make sure image is same data-type
blank_image = blank_image.astype(augmented_image.dtype)
# Create a mask for the contour lines
blank_alpha = blank_image[:, :, 3:]
mask = np.any(blank_alpha > 0, axis=2)
# Apply the smooth image to the masked region of the original image
augmented_image[mask] = blank_image[mask]
# Convert image back to 3 layers rgb
augmented_image = augmented_image[:, :, :3]
I know that the problem lies somewhere in the 'mask' variable definition. Something about it being a boolean type just pastes a line of pure black squares on my image rather than the expected blurred line. No amount of messing with layer order or adding extra layers to copy from has fixed this.
Doing this process without trying to blur the lines works great, minus the fact that it's very pixelated and doesn't fit the style of the training data. Blurring the image without trying to re-combine anything produces an adequate blurred line as well, however the entire image is blurred.
Here's what I can produce without the blurring process, and a rough idea of what I would like the final product to look like (made in Photoshop)
It's only when I try to mask and combine that this becomes a problem. I will post the full code below for anyone to run on their own system:
import random
import numpy as np
import skimage
from skimage.transform import rotate, resize
from skimage import draw, feature, color, measure, filters, util
from skimage.util import random_noise
import PIL
from PIL import Image
import os
import argparse
import cv2
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from pathlib import Path
import imghdr
# Set parser args
parser = argparse.ArgumentParser()
parser.add_argument("--dirty_dir", help="path to folder containing dirty images")
parser.add_argument("--clean_dir", help="path to folder containing clean images")
parser.add_argument("--dirty_savedir", help="path to dirty output folder")
parser.add_argument("--clean_savedir", help="path to clean output folder")
a = parser.parse_args()
# Set folder paths
dirty_dir = a.dirty_dir + '/'
clean_dir = a.clean_dir + '/'
dirty_savedir = a.dirty_savedir + '/'
clean_savedir = a.clean_savedir + '/'
print(f"Source Folder: {dirty_dir}")
print(f"Source Folder: {clean_dir}")
print(f"Output Folder: {dirty_savedir}")
print(f"Output Folder: {clean_savedir}")
def augment_image(image, filename, clean_dir):
for i in range(8):
# Create list for clean imgs
clean_list = []
# Randomly select a 256x256 region
w, h = image.size[0], image.size[1]
print(w,h)
top = random.randint(0, h - 256)
left = random.randint(0, w - 256)
right = left + 256
bottom = top + 256
dims = [left, top, right, bottom]
print(f'{filename} dimensions:{dims}')
# Add dimensions to clean_list
clean_list.extend(dims)
augmented_image = image.crop((dims))
print(f'{filename} shape: {augmented_image.size}')
# Randomly rotate the image by 90, 180, or 270 degrees
angle = random.choice([0, 90, 180, 270])
augmented_image = augmented_image.rotate(angle)
# Add angle to list
clean_list.append(angle)
# Randomly flip the image horizontally
flip_lr = random.choice([True, False])
if flip_lr == True:
augmented_image = augmented_image.transpose(Image.FLIP_LEFT_RIGHT)
clean_list.append("flip_lr")
else:
clean_list.append("none")
# Randomly flip the image vertically
flip_tb = random.choice([True, False])
if flip_tb == True:
augmented_image = augmented_image.transpose(Image.FLIP_TOP_BOTTOM)
clean_list.append("flip_tb")
else:
clean_list.append("none")
# Convert the image to a NumPy array
augmented_image = np.array(augmented_image)
augmented_shape = augmented_image.shape
# Convert image back to color
grey_image = color.rgb2gray(augmented_image)
# Detect the contours of the image using the Canny edge detector
edges = feature.canny(grey_image, sigma=3)
# Create a blank image with dimensions 256 x 256
blank_image = np.zeros((256, 256, 4))
# Create a copy of the image to draw the contours on and convert to 4 layers rgba
alpha = np.ones((augmented_image.shape[0], augmented_image.shape[1], 1), dtype=augmented_image.dtype) * 255
augmented_image = np.concatenate([augmented_image, alpha], axis=2)
# Iterate over the contours
for contour in measure.find_contours(edges, 0.8):
# Set offset
offset = 10
# Select a random point along the contour
point = np.random.randint(0, len(contour))
start_row, start_col = contour[point]
start_row = start_row + offset
start_col = start_col + offset
start_row = np.clip(start_row, 0, augmented_shape[0] - 1)
start_col = np.clip(start_col, 0, augmented_shape[0] - 1)
# Select a random point along the contour that is not the same as the first point
point = np.random.randint(0, len(contour))
while point == start_row:
point = np.random.randint(0, len(contour))
end_row, end_col = contour[point]
end_row = end_row + offset
end_col = end_col + offset
end_row = np.clip(end_row, 0, augmented_shape[0] - 1)
end_col = np.clip(end_col, 0, augmented_shape[0] - 1)
# Draw the line on the image using the draw.line function
rr, cc = draw.line(int(start_row), int(start_col), int(end_row), int(end_col))
blank_image[rr, cc] = 30
# Smooth the contour lines using the gaussian function
blank_image = filters.gaussian(blank_image, sigma=1)
# Make sure image is same data-type
blank_image = blank_image.astype(augmented_image.dtype)
# Create a mask for the contour lines
blank_alpha = blank_image[:, :, 3:]
mask = np.any(blank_alpha > 0, axis=2)
# Apply the smooth image to the masked region of the original image
augmented_image[mask] = blank_image[mask]
# Convert image back to 3 layers rgb
augmented_image = augmented_image[:, :, :3]
## Add more noise types (lines, wrinkles, color)/make noise random chance to occur ##
# Add random noise to the image
noise = random_noise(augmented_image, mode='pepper', amount=0.011)
# Convert the noisy image back to a PIL image
augmented_image = np.random.random_sample(augmented_image.shape) * 255
augmented_image = np.array(255 * noise, dtype=np.uint8)
augmented_image = Image.fromarray(augmented_image)
# Save file
augmented_image.save(dirty_savedir + '_' + str(i) + '_' + filename)
print(clean_list)
# Function to mirror edits onto clean images
def clean_aug(clean_dir, clean_list):
# Open clean directory
for filename in os.listdir(f"{clean_dir}"):
# Rule out any weird Mac files
if not filename.startswith("._"):
with Image.open(clean_dir + filename) as image:
# Define clean dimensions
clean_dims = clean_list[0:4]
# Crop image
clean_augmented = image.crop((clean_dims))
# Rotate clean image
clean_augmented = clean_augmented.rotate(clean_list[4])
# Flip clean image
if clean_list[5] == 'flip_lr':
clean_augmented = clean_augmented.transpose(Image.FLIP_LEFT_RIGHT)
if clean_list[6] == 'flip_tb':
clean_augmented = clean_augmented.transpose(Image.FLIP_TOP_BOTTOM)
# Save clean images
clean_augmented.save(clean_savedir + '_' + str(i) + '_' + filename)
print("Clean alterations copied successfully")
clean_aug(clean_dir, clean_list)
# Clean up unnecessary files
def file_scrub():
dirty_dir = dirty_savedir
image_extensions = [".png", ".jpg"] # add there all your images file extensions
img_type_accepted_by_tf = ["bmp", "gif", "jpeg", "png"]
for filepath in Path(dirty_dir).rglob("*"):
if filepath.suffix.lower() in image_extensions:
img_type = imghdr.what(filepath)
if img_type is None:
print(f"{filepath} is not an image")
elif img_type not in img_type_accepted_by_tf:
print(f"{filepath} is a {img_type}, not accepted by TensorFlow")
def image_aug(dirty_dir, clean_dir):
for filename in os.listdir(f"{dirty_dir}"):
# Check if the filename starts with "._"
if not filename.startswith("._"):
with Image.open(dirty_dir + filename) as image:
# Open the image
augment_image(image, filename, clean_dir)
image_aug(dirty_dir, clean_dir)
file_scrub()
Apologies for the cumbersome codebase. I just wanted to minimize the risk of discrepancies arising from an attempt at concatenating my script. If there is any clarification I can provide please let me know!
Hello I want to reflect an object in the image as in this image[enter image description here][1]
[1]: https://i.stack.imgur.com/N9J3I.jpg How can I get this kind of result?
It is possible that OpenCV does not have good solutions for this, take a closer look at Pillow.
from PIL import Image, ImageFilter
def drop_shadow(image, iterations=3, border=8, offset=(5,5), background_colour=0xffffff, shadow_colour=0x444444):
shadow_width = image.size[0] + abs(offset[0]) + 2 * border
shadow_height = image.size[1] + abs(offset[1]) + 2 * border
shadow = Image.new(image.mode, (shadow_width, shadow_height), background_colour)
shadow_left = border + max(offset[0], 0)
shadow_top = border + max(offset[1], 0)
shadow.paste(shadow_colour, [shadow_left, shadow_top, shadow_left + image.size[0], shadow_top + image.size[1]])
for i in range(iterations):
shadow = shadow.filter(ImageFilter.BLUR)
img_left = border - min(offset[0], 0)
img_top = border - min(offset[1], 0)
shadow.paste(image, (img_left, img_top))
return shadow
drop_shadow(Image.open('boobs.jpg')).save('shadowed_boobs.png')
Here is one way to do the reflection in Python/OpenCV.
One flips the image. Then makes a vertical ramp (gradient) image and puts that into the alpha channel of the flipped image. Then one concatenates the original and the flipped images.
Input:
import cv2
import numpy as np
# set top and bottom opacity percentages
top = 85
btm = 15
# load image
img = cv2.imread('bear2.png')
hh, ww = img.shape[:2]
# flip the input
flip = np.flip(img, axis=0)
# add opaque alpha channel to input
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# make vertical gradient that is bright at top and dark at bottom as alpha channel for the flipped image
gtop = 255*top//100
gbtm = 255*btm//100
grady = np.linspace(gbtm, gtop, hh, dtype=np.uint8)
gradx = np.linspace(1, 1, ww, dtype=np.uint8)
grad = np.outer(grady, gradx)
grad = np.flip(grad, axis=0)
# alternate method
#grad = np.linspace(0, 255, hh, dtype=np.uint8)
#grad = np.tile(grad, (ww,1))
#grad = np.transpose(grad)
#grad = np.flip(grad, axis=0)
# put the gradient into the alpha channel of the flipped image
flip = cv2.cvtColor(flip, cv2.COLOR_BGR2BGRA)
flip[:,:,3] = grad
# concatenate the original and the flipped versions
result = np.vstack((img, flip))
# save output
cv2.imwrite('bear2_vertical_gradient.png', grad)
cv2.imwrite('bear2_reflection.png', result)
# Display various images to see the steps
cv2.imshow('flip',flip)
cv2.imshow('grad',grad)
cv2.imshow('result',result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Ramped (Gradient) Image:
Result:
Here I use the PIL Library to read and manipulate images. I am confused, how to create a new image from the list of arrays containing binary pixel data, after being converted to binary images.
I have tried it, but the resulting image is of type RGB, not a binary image. The following is the code that I wrote:
from PIL import Image
import numpy as np
img = Image.open('data_train/ga.jpeg')
pixels = img.load()
width, height = img.size
all_pixels = []
for x in range(width):
for y in range(height):
hpixel = pixels[x, y]
img_gray = (0.2989 * hpixel[0]) + (0.5870 * hpixel[1]) + (0.1140 * hpixel[2])
if img_gray >= 110:
all_pixels.append('1')
else:
all_pixels.append('0')
data_isi = {'0': 0,
'1': 255}
data = [data_isi[letter] for letter in all_pixels]
img_new = Image.fromarray(data)
img_new.save('data_train/gabiner.jpeg')
Updated Answer
As you are required to use a for loop, you could go with something more like this:
#!/usr/bin/env python3
from PIL import Image
# Load image and get dimensions
img = Image.open('start.jpg').convert('RGB')
width, height = img.size
# Actually load input pixels, else PIL is too lazy
imi = img.load()
# List of result pixels
imo = []
for y in range(height):
for x in range(width):
R, G, B = imi[x, y]
gray = (0.2989 * R) + (0.5870 * G) + (0.1140 * B)
if gray >= 110:
imo.append(255)
else:
imo.append(0)
# Make output image and put output pixels into it
result = Image.new('L', (width,height))
result.putdata(imo)
# Save result
result.save('result.png')
Which turns this start image:
Into this result:
Original Answer
You appear to be converting the image to greyscale and thresholding at 110, which can be done much more simply, and faster, like this:
#!/usr/local/bin/python3
from PIL import Image
# Load image and make greyscale
im = Image.open('image.png').convert('L')
# Threshold to make black and white
thr = im.point(lambda p: p > 110 and 255)
# Save result
thr.save('result.png')
My machine learning algorithm has already learned the 70000 images in the MNIST database. I want to test it on an image not included in the MNIST dataset. However, my predict function cannot read the array representation of my test image.
How do I test my algorithm on an external image?
Why is my code failing?
PS I'm using python3
Error Received:
Traceback (most recent call last):
File "hello_world2.py", line 28, in <module>
print(sgd_clf.predict(arr))
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/linear_model/base.py", line 336, in predict
scores = self.decision_function(X)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/sklearn/linear_model/base.py", line 317, in decision_function
% (X.shape[1], n_features))
ValueError: X has 15 features per sample; expecting 784
Code:
# Common Imports
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.linear_model import SGDClassifier
from PIL import Image
from resizeimage import resizeimage
# loading and learning MNIST data
mnist = fetch_mldata('MNIST original')
x, y = mnist["data"], mnist["target"]
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(x, y)
# loading and converting to array a non-MNIST image of a "5", which is in the same folder
img = Image.open("5.png")
arr = np.array(img)
# trying to predict that the image is a "5"
img = Image.open("5.png")
img = img.convert('L') #makes it greyscale
img = resizeimage.resize_thumbnail(img, [28,28])
arr = np.array(img)
print(sgd_clf.predict(arr)) # ERROR... why????????? How do you fix it?????
It's not simply a matter of resizing, the image needs the digit centered and white on black etc. I've been working on a function to this job. This is the current version that uses opencv, although it could do with further improvement, including using PIL instead of opencv, but it should give an idea of the steps required.
def open_as_mnist(image_path):
"""
Assume this is a color or grey scale image of a digit which has not so far been preprocessed
Black and White
Resize to 20 x 20 (digit in center ideally)
Sharpen
Add white border to make it 28 x 28
Convert to white on black
"""
# open as greyscale
image = cv2.imread(image_path, 0)
# crop to contour with largest area
cropped = do_cropping(image)
# resizing the image to 20 x 20
resized20 = cv2.resize(cropped, (20, 20), interpolation=cv2.INTER_CUBIC)
cv2.imwrite('1_resized.jpg', resized20)
# gaussian filtering
blurred = cv2.GaussianBlur(resized20, (3, 3), 0)
# white digit on black background
ret, thresh = cv2.threshold(blurred, 127, 255, cv2.THRESH_BINARY_INV)
padded = to20by20(thresh)
resized28 = padded_image(padded, 28)
# normalize the image values to fit in the range [0,1]
norm_image = np.asarray(resized28, dtype=np.float32) / 255.
# cv2.imshow('image', norm_image)
# cv2.waitKey(0)
# # Flatten the image to a 1-D vector and return
flat = norm_image.reshape(1, 28 * 28)
# return flat
# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tva = [(255 - x) * 1.0 / 255.0 for x in flat]
return tva
def padded_image(image, tosize):
"""
This method adds padding to the image and makes it to a tosize x tosize array,
without losing the aspect ratio.
Assumes desired image is square
:param image: the input image as numpy array
:param tosize: the final dimensions
"""
# image dimensions
image_height, image_width = image.shape
# if not already square then pad to square
if image_height != image_width:
# Add padding
# The aim is to make an image of different width and height to a sqaure image
# For that first the biggest attribute among width and height are determined.
max_index = np.argmax([image_height, image_width])
# if height is the biggest one, then add padding to width until width becomes
# equal to height
if max_index == 0:
#order of padding is: top, bottom, left, right
left = int((image_height - image_width) / 2)
right = image_height - image_width - left
padded_img = cv2.copyMakeBorder(image, 0, 0,
left,
right,
cv2.BORDER_CONSTANT)
# else if width is the biggest one, then add padding to height until height becomes
# equal to width
else:
top = int((image_width - image_height) / 2)
bottom = image_width - image_height - top
padded_img = cv2.copyMakeBorder(image, top, bottom, 0, 0, cv2.BORDER_CONSTANT)
else:
padded_img = image
# now that it's a square, add any additional padding required
image_height, image_width = padded_img.shape
padding = tosize - image_height
# need to handle where padding is not divisiable by 2
left = top = int(padding/2)
right = bottom = padding - left
resized = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT)
return resized
If you want to read a picture then resize it, please try
In [1]: import PIL.Image as Image
In [2]: img = Image.open('2.jpg', mode='r')
In [3]: img.mode
Out[3]: 'RGB'
In [4]: img.size
Out[4]: (2880, 1800)
In [5]: img_new = img.resize([4000, 4000], Image.ANTIALIAS)
In [6]: img_new2 = img.resize([32, 32], Image.ANTIALIAS)
Docs are here
This is the 2.jpg, sorry, it is not a digit.
This picture is from the Internet, sorry, I forget the source.
If you encounter the mode is 'RGBA', I recommend you transfer it to 'RGB' mode,
newimg = Image.new('RGB', img.size)
newimg.paste(img, mask=img.split()[3])
return newimg
Please try this:
img = Image.open("5.png")
img = img.resize((28,28))
img = img.convert('L') #makes it greyscale
Im trying to use a specific gamma corrected grayscale implementation - Gleam to convert an images pixels to grayscale. How can i do this manually with PIL python?
def tau_gamma_correct(pixel_channel):
pixel_channel = pixel_channel**(1/2.2)
return pixel_channel
##param: rgb
##result: returns grayscale value
def gleam(rgb):
#convert rgb tuple to list
rgblist = list(rgb)
#gamma correct each rgb channel
rgblist[0] = tau_gamma_correct(rgblist[0])
rgblist[1] = tau_gamma_correct(rgblist[1])
rgblist[2] = tau_gamma_correct(rgblist[2])
grayscale = 1/3*(rgblist[0] + rgblist[1] + rgblist[2])
return grayscale
# get a glob list of jpg filenames
files = glob.glob('*.jpg')
for file in files:
file = open(file)
filename = file.name
image = Image.open(file)
pix = image.load()
width, height = image.size
#print(width,height)
for x in range(0, width):
for y in range(0, height):
rgb = pix[x,y]
#print(rgb)
# calc new pixel value and set to pixel
image.mode = 'L'
pix[x,y] = gleam(rgb)
image.save(filename + 'gray.gleam'+'.jpg')
file.close()
SystemError: new style getargs format but argument is not a tuple
It is still expecting the rgb tuple i think.
I found that i could just build another image:
import sys
import os
import glob
import numpy
from PIL import Image
def tau_gamma_correct(pixel_channel):
pixel_channel = pixel_channel**(1/2.2)
return pixel_channel
##param: rgb
##result: returns grayscale value
def gleam(rgb):
#convert rgb tuple to list
rgblist = list(rgb)
#gamma correct each rgb channel
rgblist[0] = tau_gamma_correct(rgblist[0])
print('gleamed red ' + str(rgblist[0]))
rgblist[1] = tau_gamma_correct(rgblist[1])
print('gleamed green ' + str(rgblist[1]))
rgblist[2] = tau_gamma_correct(rgblist[2])
print('gleamed blue ' + str(rgblist[0]))
grayscale = (rgblist[0] + rgblist[1] + rgblist[2])/3
print('grayscale '+ str(grayscale))
return grayscale
# get a glob list of jpg filenames
files = glob.glob('*.jpg')
for file in files:
file = open(file)
filename = file.name
image = Image.open(file)
pix = image.load()
width, height = image.size
new_image = Image.new('L', image.size)
#pixelmatrix = [width][height]
pixelmatrix = numpy.zeros((width, height))
#print(width,height)
for x in range(0, width):
for y in range(0, height):
rgb = pix[x,y]
print('current pixel value: '+str(rgb))
# calc new pixel value and set to pixel
#print(gleam(rgb))
gray = gleam(rgb)
print('changing to pixel value: '+str(gray))
pixelmatrix[x,y] = gray
new_image.save(filename + 'gray.gleam'+'.jpg')
new_image.putdata(pixelmatrix)
file.close()
The problem is that image.mode = 'L' doesn't actually change the type of the image, it just changes the attribute so it's no longer accurate. To change the mode of the image you need to make a new copy with image.convert('L').
Once you have an image in grayscale mode, it won't require a tuple for a pixel value anymore.
Seeing the SystemError: new style getargs format but argument is not a tuple error it seems that you need to return a tuple, which is represented as :
sample_tuple = (1, 2, 3, 4)
So we edit the gleam() function as:
def gleam(rgb):
#convert rgb tuple to list
rgblist = list(rgb)
#gamma correct each rgb channel
rgblist[0] = tau_gamma_correct(rgblist[0])
rgblist[1] = tau_gamma_correct(rgblist[1])
rgblist[2] = tau_gamma_correct(rgblist[2])
grayscale = 1/3*(rgblist[0] + rgblist[1] + rgblist[2])
return (grayscale, )
Keep in mind that while returning a single element tuple you need to represent as :
sample_tuple = (1, )
This is due to the fact that (4) == 4 but (4, ) != 4