I am using putalpha function for my project. But I have a problem.
When I don't use the putalpha:
enter image description here
When I use the putalpha:
enter image description here
How can I solve this problem ?
Code:
def add_logo(pos, size=5, rotation=0, alpha=255):
mainim = Image.open("resim.png").convert("RGB")
logoim = Image.open("pawpink.png").convert("RGBA")
logoim = logoim.rotate(rotation, expand=1)
logoim.putalpha(alpha)
#Calculate size
width, height = mainim.size
width = width / size
oran = (logoim.size[0] / logoim.size[1])
height = (width * (oran ** -1))
logoim = logoim.resize((int(width), int(height)))
mainim.paste(logoim, box=pozisyon_getir_resim(pos), mask=logoim)
return mainim
Images:
cat.png
logo.png
I found this excellent article Watermark with PIL (Python recipe) and was able to get your program to work.
Here is my version (complete, tested):
import PIL.Image
import PIL.ImageEnhance
def pozisyon_getir_resim(pos):
return (pos, pos)
def reduce_opacity(im, opacity):
"""Returns an image with reduced opacity."""
assert opacity >= 0 and opacity <= 1
if im.mode != 'RGBA':
im = im.convert('RGBA')
else:
im = im.copy()
alpha = im.split()[3]
alpha = PIL.ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
def add_logo(pos, size=5, rotation=0, alpha=255):
mainim = PIL.Image.open("cat.png").convert("RGB")
logoim = PIL.Image.open("logo.png").convert("RGBA")
logoim = logoim.rotate(rotation, expand=1)
logoim = reduce_opacity(logoim, alpha/255.0)
# Calculate size
width, height = mainim.size
width = width / size
oran = (logoim.size[0] / logoim.size[1])
height = (width * (oran ** -1))
logoim = logoim.resize((int(width), int(height)))
if mainim.mode != 'RGBA':
mainim.convert('RGBA')
layer = PIL.Image.new('RGBA', mainim.size, (0, 0, 0, 0))
layer.paste(logoim, pozisyon_getir_resim(pos))
return PIL.Image.composite(layer, mainim, layer)
mainim = add_logo(32, 5, 0, 127)
mainim.save('cat_with_logo.png', 'PNG')
Result:
Related
I'm very new to programming pytho my 3rd month. I'm making a desktop program that is supposed to show appointments and sayings. I already have a function process that opens a window in full screen and displays the slide show.
But I want to place the slide show in a tkinter window so that I can add other labels next to the show.
this is my code i want to call the function in a tkinter window so that i can assign it to a button.
import cv2
import numpy as np
import glob
import os
import random
class Image:
def __init__(self, filename, time=200, size=800):
self.size = size
self.time = time
self.shifted = 1.0
self.img = cv2.imread(filename)
self.height, self.width, _ = self.img.shape
if self.width < self.height:
self.height = int(self.height*size/self.width)
self.width = size
self.img = cv2.resize(self.img, (self.width, self.height))
self.shift = self.height - size
self.shift_height = True
else:
self.width = int(self.width*size/self.height)
self.height = size
self.shift = self.width - size
self.img = cv2.resize(self.img, (self.width, self.height))
self.shift_height = False
self.delta_shift = self.shift/self.time
def reset(self):
if random.randint(0, 1) == 0:
self.shifted = 0.0
self.delta_shift = abs(self.delta_shift)
else:
self.shifted = self.shift
self.delta_shift = -abs(self.delta_shift)
def get_frame(self):
if self.shift_height:
roi = self.img[int(self.shifted):int(self.shifted) + self.size, :, :]
else:
roi = self.img[:, int(self.shifted):int(self.shifted) + self.size, :]
self.shifted += self.delta_shift
if self.shifted > self.shift:
self.shifted = self.shift
if self.shifted < 0:
self.shifted = 0
return roi
def process():
text = f'xXxxxxxXXXXXxxxxxXx'
coordinates = (650, 1100)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 2
color = (255, 0, 255)
thickness = 3
filenames = glob.glob(os.path.join(path, "*"))
cnt = 0
images = []
for filename in filenames:
img = Image(filename)
images.append(img)
if cnt > len(images):
break
cnt += 1
prev_image = images[random.randrange(0, len(images))]
prev_image.reset()
while True:
while True:
img = images[random.randrange(0, len(images))]
if img != prev_image:
break
img.reset()
for i in range(100):
alpha = i/100
beta = 1.0 - alpha
dst = cv2.addWeighted(img.get_frame(), alpha, prev_image.get_frame(), beta, 0.0)
dst = cv2.putText(dst, text, coordinates, font, fontScale, color, thickness,cv2.LINE_AA)
cv2.imshow('Slideshow', dst)
if cv2.waitKey(10) == ord('q'):
cv2.destroyWindow('Slideshow')
return
prev_image = img
for _ in range(100):
cv2.imshow('Slideshow', img.get_frame())
if cv2.waitKey(10) == ord('q'):
cv2.destroyWindow('Slideshow')
return
def start():
cnt = 0
images = []
path = 'pictures'
filenames = glob.glob(os.path.join(path, "*"))
showWindow = tk.Tk()
showWindow.attributes('-fullscreen', True)
showWindow.mainloop()
I tried to display the text in opencv in the pictures but I had problems keeping the text in the exercises. That's why I want to display the whole thing in a Tkinter window so I can do the classification with grid. because I don't just want to display a single image but a slide show
Does the method not work here(**) or I just don't understand it could someone help me.
(**)=
#Import the tkinter library
from tkinter import *
import numpy as np
import cv2
from PIL import Image, ImageTk
#Create an instance of tkinter frame
show_Winow = Tk()
win.geometry("700x550")
#Load the image
img = cv2.imread('tutorialspoint.png')
#Rearrange colors
blue,green,red = cv2.split(img)
img = cv2.merge((red,green,blue))
im = Image.fromarray(img)
imgtk = ImageTk.PhotoImage(image=im)
#Create a Label to display the image
Label(show_Window, image= imgtk).pack()
show_Window.mainloop()
Question summary: My expected result is to be able to frame the watermark diagonally and regardless of the width and height of the background image, it always fits within the limits and with dimensions that maintain the quality of the logo.
In order not to lose the quality of the image that I use as a watermark, I had to readjust its size from the width of the image where I am going to paste it, but as you can see in the image that I will publish below the code, when the image has a too small height, the watermark exceeds the limits of the image.
If I need to adjust the watermark according to the width of the other image to maintain the quality, how should I go about adjusting both the height and width so that the watermark fits perfectly inside the other image regardless of its height or width?
from PIL import Image
def watermark_with_transparency(input_image_path,
output_image_path,
watermark_image_path):
TRANSPARENCY = 20
angle = 30
base_image = Image.open(input_image_path)
w_img, h_img = base_image.size
basewidth = w_img
watermark = Image.open(watermark_image_path)
wpercent = (basewidth/float(watermark.size[0]))
hsize = int((float(watermark.size[1])*float(wpercent)))
watermark = watermark.resize((basewidth,hsize), Image.ANTIALIAS)
watermark = watermark.rotate( angle, expand=1 )
w_logo, h_logo = watermark.size
center_y = int(h_img / 2)
center_x = int(w_img / 2)
top_y = center_y - int(h_logo / 2)
left_x = center_x - int(w_logo / 2)
if watermark.mode!='RGBA':
alpha = Image.new('L', (w_img, h_img), 255)
watermark.putalpha(alpha)
paste_mask = watermark.split()[3].point(lambda i: i * TRANSPARENCY / 100.)
base_image.paste(watermark, (left_x,top_y), mask=paste_mask)
base_image.show()
base_image.save(output_image_path)
if __name__ == '__main__':
watermark_with_transparency(
'jogos_de_hoje_na_tv.png',
'watermark_create.png',
'logo_com_transparencia.png'
)
Current result:
I tried add doing it for the width like this:
basehight = h_img
hpercent = (basehight/float(watermark.size[0]))
wsize = int((float(watermark.size[0])*float(hpercent)))
watermark = watermark.resize((wsize,hsize), Image.ANTIALIAS)
But the result was a watermark with a lot of height and without any quality in the size adjustment.
I change your code like this:
def watermark_with_transparency(input_image_path,
output_image_path,
watermark_image_path):
TRANSPARENCY = 20
angle = 30
base_image = Image.open(input_image_path)
base_image.show()
w_img, h_img = base_image.size
basewidth = w_img
watermark = Image.open(watermark_image_path)
watermark = watermark.rotate(angle, expand=True)
wpercent = (basewidth / float(watermark.size[0]))
hpercent = h_img / float(watermark.size[1])
if wpercent < hpercent:
hsize = int((float(watermark.size[1]) * float(wpercent)))
watermark = watermark.resize((basewidth, hsize), Image.ANTIALIAS)
else:
wsize = int((float(watermark.size[0]) * float(hpercent)))
watermark = watermark.resize((wsize, h_img), Image.ANTIALIAS)
w_logo, h_logo = watermark.size
center_y = int(h_img / 2)
center_x = int(w_img / 2)
top_y = center_y - int(h_logo / 2)
left_x = center_x - int(w_logo / 2)
if watermark.mode != 'RGBA':
alpha = Image.new('L', (w_img, h_img), 255)
watermark.putalpha(alpha)
paste_mask = watermark.split()[3].point(lambda i: i * TRANSPARENCY / 100.)
base_image.paste(watermark, (left_x, top_y), mask=paste_mask)
base_image.show()
base_image.save(output_image_path)
The idea is that when your resize your watermark you have to check which factor (height or width) is the smallest and you want to take it. Otherwise, in your first version of the code the resizing did not take into account that the new height could be larger than the input image height.
I tried resize functions with openCV and PIL respectively. And I found that the performance in PIL is way better. But according to the instruction, the method of interpolation is similar (e.g. bicubic).
Could someone provide any insight?
My complete code is below. I just want to resize the original image and padding it by pasting (for YOLO input).
import numpy as np
from PIL import Image
def letterbox_image_np(image, size):
iw, ih = image.shape[1], image.shape[0]
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
# resize image according to (416,416) & orig size
image = cv.resize(imgNP, dsize=(nw, nh), interpolation=cv.INTER_CUBIC)
# plt.imshow(image), plt.show()
new_image = 128 * np.ones((h, w, 3), dtype=np.uint8)
# plt.imshow(new_image), plt.show()
n = np.array(new_image)
offset_h = (h - nh) // 2
offset_w = (w - nw) // 2
new_image[offset_h:offset_h + nh, offset_w:offset_w + nw] = image
return new_image
def letterbox_image_pil(image, size):
iw, ih = image.size # as "Image" object
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
# resize image according to (416,416) & orig size
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
# plt.imshow(new_image), plt.show()
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
return new_image
if __name__ == '__main__':
# np_frame
imgNP = cv.imread(r"C:\ProgamData\global_dataset\img_vid\down.jpg")
# pil_frame
imgPIL = cv.cvtColor(imgNP, cv.COLOR_BGR2RGB)
imgPIL = Image.fromarray(np.uint8(imgPIL))
size = (416, 416)
# methodNP
newImg1 = letterbox_image_np(imgNP, size)
newImg1 = cv.cvtColor(newImg1, cv.COLOR_BGR2RGB)
# methodPIL
newImg2 = letterbox_image_pil(imgPIL, size)
plt.imshow(newImg1), plt.show()
plt.imshow(newImg2), plt.show()
pass
Example by by openCV
]
Example by by PIL
]
I'm getting module not found error while executing my program.
import numpy as np
import cv2
from preprocessors import x_cord_contour, makeSquare, resize_to_pixel
I am quite sure that you are following along the the Deep Learning course by Rajeev D. And I further guess that you haven't downloaded the VM image as suggested in the video.
The functions x_cord_contour, makeSquare and resize_to_pixel are defined in a custom module. If you are following the course without the VM just copy and paste the functions below into your code and remove the import statement.
import numpy as np
import cv2
def x_cord_contour(contour):
# This function take a contour from findContours
# it then outputs the x centroid coordinates
M = cv2.moments(contour)
return (int(M['m10']/M['m00']))
def makeSquare(not_square):
# This function takes an image and makes the dimenions square
# It adds black pixels as the padding where needed
BLACK = [0,0,0]
img_dim = not_square.shape
height = img_dim[0]
width = img_dim[1]
#print("Height = ", height, "Width = ", width)
if (height == width):
square = not_square
return square
else:
doublesize = cv2.resize(not_square,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
height = height * 2
width = width * 2
#print("New Height = ", height, "New Width = ", width)
if (height > width):
pad = int((height - width)/2)
#print("Padding = ", pad)
doublesize_square = cv2.copyMakeBorder(doublesize,0,0,pad,pad,cv2.BORDER_CONSTANT,value=BLACK)
else:
pad = (width - height)/2
#print("Padding = ", pad)
doublesize_square = cv2.copyMakeBorder(doublesize,pad,pad,0,0,\
cv2.BORDER_CONSTANT,value=BLACK)
doublesize_square_dim = doublesize_square.shape
#print("Sq Height = ", doublesize_square_dim[0], "Sq Width = ", doublesize_square_dim[1])
return doublesize_square
def resize_to_pixel(dimensions, image):
# This function then re-sizes an image to the specificied dimenions
buffer_pix = 4
dimensions = dimensions - buffer_pix
squared = image
r = float(dimensions) / squared.shape[1]
dim = (dimensions, int(squared.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
img_dim2 = resized.shape
height_r = img_dim2[0]
width_r = img_dim2[1]
BLACK = [0,0,0]
if (height_r > width_r):
resized = cv2.copyMakeBorder(resized,0,0,0,1,cv2.BORDER_CONSTANT,value=BLACK)
if (height_r < width_r):
resized = cv2.copyMakeBorder(resized,1,0,0,0,cv2.BORDER_CONSTANT,value=BLACK)
p = 2
ReSizedImg = cv2.copyMakeBorder(resized,p,p,p,p,cv2.BORDER_CONSTANT,value=BLACK)
img_dim = ReSizedImg.shape
height = img_dim[0]
width = img_dim[1]
#print("Padded Height = ", height, "Width = ", width)
return ReSizedImg
Quite simply, I'm learning how to edit photos with openCV/numpy.
My question is why is the second function using the image created by the first?
I run two functions - one to color columns in black and white, and a second that colors rows in black and white.
First function runs fine, but the second one uses the image created in the first, so I get rows and columns in black and white.
import cv2
import numpy as np
from matplotlib import pyplot as plt
img_source = "brad.jpg"
def read_image(image_source):
#global img, width, height
img = cv2.imread(image_source, 1)
height, width = img.shape[:2]
print("Image size: x ", width, " y ", height)
return img, width, height
def black_and_white_cols(image_source):
width_adjustment = 100
total_cols = round(width / width_adjustment,0)
edited_image = image_source
bw_image = cv2.imread(img_source, 0)
# The next line is to convert to the right interface
# https://stackoverflow.com/questions/11067962/is-it-possible-to-have-black-and-white-and-color-image-on-same-window-by-using-o
bw_image_b = cv2.cvtColor(bw_image,cv2.COLOR_GRAY2BGR)
for x in range(1, int(total_cols), 2):
top_row = 0
bottom_row = height
left_col = x*width_adjustment
right_col = (x * width_adjustment) + width_adjustment
bw_part = bw_image_b[top_row:bottom_row, left_col:right_col]
edited_image[top_row:bottom_row, left_col:right_col] = bw_part
show_image(edited_image)
def black_and_white_cols(image_source):
width_adjustment = 100
total_cols = round(width / width_adjustment,0)
edited_image = image_source
bw_image = cv2.imread(img_source, 0)
# The next line is to convert to the right interface
# https://stackoverflow.com/questions/11067962/is-it-possible-to-have-black-and-white-and-color-image-on-same-window-by-using-o
bw_image_b = cv2.cvtColor(bw_image,cv2.COLOR_GRAY2BGR)
for x in range(1, int(total_cols), 2):
top_row = 0
bottom_row = height
left_col = x*width_adjustment
right_col = (x * width_adjustment) + width_adjustment
bw_part = bw_image_b[top_row:bottom_row, left_col:right_col]
edited_image[top_row:bottom_row, left_col:right_col] = bw_part
show_image(edited_image)
return edited_image
def black_and_white_rows(image_source):
width_adjustment = 100
edited_image = image_source
total_rows = round(height / width_adjustment,0)
bw_image = cv2.imread(img_source, 0)
bw_image_b = cv2.cvtColor(bw_image,cv2.COLOR_GRAY2BGR)
for x in range(1, int(total_rows), 2):
top_row = x * width_adjustment
bottom_row = (x * width_adjustment) + width_adjustment
left_col = 0
right_col = width
bw_part = bw_image_b[top_row:bottom_row, left_col:right_col]
edited_image[top_row:bottom_row, left_col:right_col] = bw_part
show_image(edited_image)
def show_image(image_source):
cv2.imshow('This is your image', image_source)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
img, width, height = read_image(img_source)
new_image = black_and_white_cols(img)
new_image_2 = black_and_white_rows(img)
This is the image after new_image = black_and_white_cols(img) runs.
and here's after new_image_2 = ... runs.
Why does the second image keep the black and white columns? I'm calling it using the very original img_source image, via read_image. Why is it using the column edited image?
As in the comments, when you do edited_image = image_source, you only copy the pointer to the image array, not cloning the array itself. You can do
edited_image = image_source.copy()
which copies image_source to edited_image.