How to create semi transparent pattern with python pil? - python

I have found some examples on this site. I would like to create, example 6. Can you help?
Create, as a numpy array, the image of the napkin. The squares have a size of 10×10. You may use the command numpy tile. Save the resulting image to a file.

In a standard grayscale image, black pixels are 0, gray pixels are 128, and white ones are 255:
import numpy as np
import matplotlib.pyplot as plt
# first create one 20 x 20 tile
a1 = np.zeros((20,20), dtype=int)
a1[10:20,0:10] = a1[0:10,10:20] = 128
a1[10:20,10:20] = 255
# fill the whole 100 x 100 area with the tiles
a = np.tile(a1, (5,5))
# plot and save
plt.imshow(a, 'Greys_r')
plt.savefig('pattern.png')

You could do this:
from PIL import Image
import numpy as np
# Make grey 2x2 image
TwoByTwo = np.full((2,2), 128, np.uint8)
# Change top-left to black, bottom-right to white
TwoByTwo[0,0] = 0
TwoByTwo[1,1] = 255
# Tile it
tiled = np.tile(TwoByTwo, (5,5))
# Make into PIL Image, rescale in size and save
Image.fromarray(tiled).resize((100,100), Image.NEAREST).save('result.png')

Related

What is wrong with my focus stacking algorithm?

You can see in the final focus stacked image that the whole image is in focus. However, pieces of the image are missing and I have no clue why. The basic steps of my algorithm are:
Access images. Convert images to grayscale, blur the gray images a bit, then find the Laplacian of these images. I store all Laplaced images in a list.
Cycle through pixels in a blank image using for loops. Every iteration creates a list containing the pixel intensities of the gray, blurred, Laplaced images at that pixel value. Find the max pixel intensity. Then look at the BGR value of the ORIGINAL image where the max pixel intensity came from. Set the BGR value of the blank pixel equal to that of the max-intensity pixel.
Here is my code:
images = glob2.glob("Pics\\step*") # Accesses images in the Pics folder
laps = [] # A list to contain Laplacians of images in Pics
i=0
for image in images:
img = cv.imread(image) # Reads image in Pics
images[i] = img # Bc line 6 only creates a list of image NAMES (ie strings), not actual images, this replaces string w image
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Converts image to grayscale
gauss = cv.GaussianBlur(img, (3,3), 0) # Blurs grayed image a bit
lap = cv.Laplacian(gauss, cv.CV_64F) # Converts blurred, gray image to Laplacian
lap = np.uint8(np.absolute(lap)) # Converts to Laplacian
laps.append(lap) # Adds Laplacian to laps
i += 1
sample = laps[0] # Arbitrarily accesses the first image in laps so that we can get dimensions for line 22
fs = np.zeros((sample.shape[0], sample.shape[1], 3), dtype='uint8') # Creates a blank image with the dimensions of sample
for x in range(sample.shape[0]): # The for loops go through every x and y value
for y in range(sample.shape[1]):
intensities = [laps[0][x,y], laps[1][x,y], laps[2][x,y], laps[3][x,y], laps[4][x,y], laps[5][x,y]] # List of intensities of laplacian images
color = images[intensities.index(max(intensities))][x,y] # Finds BGR value of the x,y pixel in the ORIGINAL image corresponding to the highest intensity
fs[x, y] = color # Sets pixel of blank fs image to the color of the pixel with the strongest intensity
cv.imshow('FS', fs)
Here is what the code produces:
Broken Focus Stacked Image
I was inspired by your code and made this simple script, which seems to work fine. (I do not need to align images.) Using mask to select pixels in focus may be faster, but I haven't tried to compare both versions. I would appreciate any advice on how to improve it.
from pathlib import Path
from imageio import imread, imwrite
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import rgb2hsv, rgb2gray
from skimage import img_as_float, img_as_ubyte
from scipy.ndimage.filters import gaussian_filter
from skimage.filters.rank import gradient
from skimage.morphology import disk
im_dir = Path("test")
sigma = 3
print("_____ load images _____")
fps = [f for f in im_dir.glob("*.jpg")]
print([f.name for f in fps])
images_rgb = [imread(f) for f in fps]
images_rgb_cube = np.array(images_rgb)
print("images_rgb_cube", images_rgb_cube.shape, images_rgb_cube.dtype)
print("_____ images to grey _____")
#images_grey = [rgb2hsv(im)[:,:,2] for im in ims] # slow
images_grey = [rgb2gray(im) for im in images_rgb] # faster
print("_____ get gradients _____")
selection_element = disk(sigma) # matrix of n pixels with a disk shape
grads = [gradient(im, selection_element) for im in images_grey]
grads = np.array(grads)
print("grads", grads.shape, grads.dtype)
print("_____ get mask _____")
mask_grey = grads.max(axis=0, keepdims=1) == grads # https://stackoverflow.com/questions/47678252/mask-from-max-values-in-numpy-array-specific-axis
mask_rgb = np.repeat(mask_grey[:, :, :, np.newaxis], 3, axis=3)
print("mask_rgb", mask_rgb.shape, mask_rgb.dtype)
print("_____ apply mask _____")
image_sharp = images_rgb_cube * mask_rgb
image_sharp = image_sharp.max(axis=0)
print("image_sharp", image_sharp.shape, image_sharp.dtype)
print("_____ save image _____")
imwrite(im_dir / "stacked.jpeg", image_sharp)
plt.imshow(image_sharp)
plt.show()
print("_____ save masks _____")
print("mask_grey", mask_grey.shape, mask_grey.dtype)
for i in range(mask_grey.shape[0]):
mask = mask_grey[i]
fp = im_dir / "{}_mask.jpeg".format(fps[i].stem)
imwrite(fp, img_as_ubyte(mask))
print("saved", fp, mask.shape, mask.dtype)

How to detect white space in an image in opencv Python?

I have an image:
I have to detect the white space in between and basically partition it into two parts like this-
This is what I have coded so far... but it does detect only the black lines and not the middle white region.
import numpy as np
import cv2
img = cv2.imread('12.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
median = cv2.medianBlur(gray,5)
minLineLength = 250
maxLineGap = 100
lines = cv2.HoughLinesP(edges,0.3,np.pi/180,250,minLineLength,maxLineGap)
for line in lines:
x1,y1,x2,y2 =line[0]
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('newwhite.png',img)
I have a simple solution based on mean values along axis. I prefer scikit-image against opencv but you can use cv2.
import matplotlib.pyplot
import numpy as np
import skimage.io
import skimage.color
import skimage.morphology
import scipy.signal
img = skimage.io.imread('12.png')
gray = skimage.color.rgb2gray(img)
# Create some large dark area with the text, 10 is quite big!
eroded = skimage.morphology.erosion(gray, skimage.morphology.square(5))
# Compute mean values along axis 0 or 1
hist = np.mean(eroded, axis=0)
# Search large (here 3% of dimension size) and distant (here 20% of dimension size) peaks
scipy.signal.find_peaks(hist, width=len(hist)*3//100, distance=len(hist)*20//100)
Then each peak represents a white line in one dimension of your image

How to change the pixel colour of an image with PIL?

I'd like to change a pixel and for some reason this isn't working.
from PIL import Image
import numpy
im = Image.open("art\\PlanetX#1.25.png")
a = numpy.asarray(im)
img = Image.fromarray(a)
pixels = img.load()
pixels[0, 0] = (255, 0, 0, 255)
What should happen is the top left corner of the PNG should be set as red. I get the ValueError: Image is readonly error.
If you want to change just a few odd pixels, you can use the rather slow putpixel() like this:
from PIL import Image
# Create blue 30x15 image
im = Image.new('RGB',(30,15),color='blue')
# Change single pixel at 10,0 to red
im.putpixel((10,0),(255,0,0))
Alternatively, you can convert the entire image to a Numpy array and make many more changes, much faster with Numpy functions:
from PIL import Image
import numpy as np
# Create blue 30x15 image
im = Image.new('RGB',(30,15),color='blue')
# Convert to Numpy array
na = np.array(im)
# Change single pixel at 10,0 to green
na[0,10] = (0,255,0)
# Change whole row to red
na[3] = (255,0,0)
# Change whole column to yellow
na[:,8] = (255,255,0)
# Convert back to PIL Image and save
Image.fromarray(na).save('result.png')

Image to 0,1 text

I need to convert an image to text or image that consists of only 0 and 1. Is there any way to do this programmatically, preferably on Python?
Here is my try:
Step 1: Open the image:
from PIL import Image
srcImage = Image.open("src.jpg")
Step 2: Greyscale the image:
grayImage = srcImage.convert('L')
Step 3: Binarize the image:
binarizedImage = grayImage.point(lambda x: 0 if x<128 else 255, '1')
Now, I am stuck converting black points to 1, and white points to 0 and save this to text file with image height converted to lines (in this example: 174 pixels to 174 lines) and image width converted to text length (in this example: 310 pixels to 310 character length) or larger image with 0 instead of white points and 1 instead of black points.
Solution to both cases would have been appreciated a lot.
Full binarization code (modified version of the PIL way of binarizing):
from PIL import Image
srcImage = Image.open("src.jpg")
grayImage = srcImage.convert('L')
binarizedImage = grayImage.point(lambda x: 0 if x<128 else 255, '1')
binarizedImage.save("binarized.png")
You can use numpy library for this
from PIL import Image
from scipy.ndimage import zoom
import numpy as np
srcImage = Image.open("src.jpg")
grayImage = col.convert('L')
array = np.array(grayImage)
array = zoom(array, 310/174)
np.savetxt("binarized.txt", array<128, fmt="%d")
there np.array convert PIL Image to numpy array format, zoom interpolate array with given scale, array < 128 create binary array and fmt="%d" set that result will be saved as integer

crop image in skimage?

I'm using skimage to crop a rectangle in a given image, now I have (x1,y1,x2,y2) as the rectangle coordinates, then I had loaded the image
image = skimage.io.imread(filename)
cropped = image(x1,y1,x2,y2)
However this is the wrong way to crop the image, how would I do it in the right way in skimage
This seems a simple syntax error.
Well, in Matlab you can use _'parentheses'_ to extract a pixel or an image region. But in Python, and numpy.ndarray you should use the brackets to slice a region of your image, besides in this code you is using the wrong way to cut a rectangle.
The right way to cut is using the : operator.
Thus,
from skimage import io
image = io.imread(filename)
cropped = image[x1:x2,y1:y2]
One could use skimage.util.crop() function too, as shown in the following code:
import numpy as np
from skimage.io import imread
from skimage.util import crop
import matplotlib.pylab as plt
A = imread('lena.jpg')
# crop_width{sequence, int}: Number of values to remove from the edges of each axis.
# ((before_1, after_1), … (before_N, after_N)) specifies unique crop widths at the
# start and end of each axis. ((before, after),) specifies a fixed start and end
# crop for every axis. (n,) or n for integer n is a shortcut for before = after = n
# for all axes.
B = crop(A, ((50, 100), (50, 50), (0,0)), copy=False)
print(A.shape, B.shape)
# (220, 220, 3) (70, 120, 3)
plt.figure(figsize=(20,10))
plt.subplot(121), plt.imshow(A), plt.axis('off')
plt.subplot(122), plt.imshow(B), plt.axis('off')
plt.show()
with the following output (with original and cropped image):
You can crop image using skimage just by slicing the image array like below:
image = image_name[y1:y2, x1:x2]
Example Code :
from skimage import io
import matplotlib.pyplot as plt
image = io.imread(image_path)
cropped_image = image[y1:y2, x1:x2]
plt.imshow(cropped_image)
you can go ahead with the Image module of the PIL library
from PIL import Image
im = Image.open("image.png")
im = im.crop((0, 50, 777, 686))
im.show()

Categories

Resources