Can someone tell me how to rotate only part of an image like this:
How to find coordinate / center of this image:
i can rotate all pict using this
from PIL import Image
def rotate_image():
img = Image.open("nime1.png")
img.rotate(45).save("plus45.png")
img.rotate(-45).save("minus45.png")
img.rotate(90).save("90.png")
img.transpose(Image.ROTATE_90).save("90_trans.png")
img.rotate(180).save("180.png")
if __name__ == '__main__':
rotate_image()
You can crop an area of the picture as a new variable. In this case, I cropped a 120x120 pixel box out of the original image. It is rotated by 90 and then pasted back on the original.
from PIL import Image
img = Image.open('./image.jpg')
sub_image = img.crop(box=(200,0,320,120)).rotate(90)
img.paste(sub_image, box=(200,0))
So I thought about this a bit more and crafted a function that applies a circular mask to the cropped image before rotations. This allows an arbitrary angle without weird effects.
def circle_rotate(image, x, y, radius, degree):
img_arr = numpy.asarray(image)
box = (x-radius, y-radius, x+radius+1, y+radius+1)
crop = image.crop(box=box)
crop_arr = numpy.asarray(crop)
# build the cirle mask
mask = numpy.zeros((2*radius+1, 2*radius+1))
for i in range(crop_arr.shape[0]):
for j in range(crop_arr.shape[1]):
if (i-radius)**2 + (j-radius)**2 <= radius**2:
mask[i,j] = 1
# create the new circular image
sub_img_arr = numpy.empty(crop_arr.shape ,dtype='uint8')
sub_img_arr[:,:,:3] = crop_arr[:,:,:3]
sub_img_arr[:,:,3] = mask*255
sub_img = Image.fromarray(sub_img_arr, "RGBA").rotate(degree)
i2 = image.copy()
i2.paste(sub_img, box[:2], sub_img.convert('RGBA'))
return i2
i2 = circle_rotate(img, 260, 60, 60, 45)
i2
You can solve this problem as such. Say you have img = Image.open("nime1.png")
Create a copy of the image using img2 = img.copy()
Create a crop of img2 at the desired location using img2.crop(). You can read how to do this here
Paste img2 back onto img at the appropriate location using img.paste()
Notes:
To find the center coordinate, you can divide the width and height by 2 :)
Related
Have a look at the image and it will give you the better idea what I want to achieve. I want to rotate the image and fill the black part of image just like in required image.
# Read the image
img = cv2.imread("input.png")
# Get the image size
h, w = img.shape[:2]
# Define the rotation matrix
M = cv2.getRotationMatrix2D((w/2, h/2), 30, 1)
# Rotate the image
rotated = cv2.warpAffine(img, M, (w, h))
mask = np.zeros(rotated.shape[:2], dtype=np.uint8)
mask[np.where((rotated == [0, 0, 0]).all(axis=2))] = 255
img_show(mask)
From the code I am able to get the mask of black regions. Now I want to replace these black regions with the image portion as shown in the image 1. Any better solution how can I achieve this.
Use the borderMode parameter of warpAffine.
You want to pass the BORDER_WRAP value.
Here's the result. This does exactly what you described with your first picture.
I have an approach. You can first create a larger image consisting of 3 * 3 times your original image. When you rotate this image and only cut out the center of this large image, you have your desired result.
import cv2
import numpy as np
# Read the image
img = cv2.imread("input.png")
# Get the image size of the origial image
h, w = img.shape[:2]
# make a large image containing 3 copies of the original image in each direction
large_img = np.tile(img, [3,3,1])
cv2.imshow("large_img", large_img)
# Define the rotation matrix. Rotate around the center of the large image
M = cv2.getRotationMatrix2D((w*3/2, h*3/2), 30, 1)
# Rotate the image
rotated = cv2.warpAffine(large_img, M, (w*3, h*3))
# crop only the center of the image
cropped_image = rotated[w:w*2,h:h*2,:]
cv2.imshow("cropped_image", cropped_image)
cv2.waitKey(0)
I am using cvlib for detecting object and I want to be able to save the cropped imaged based on the bbox coordinates.
I have this in my code:
def detect_object(img):
# Open image
image_stream = io.BytesIO(img)
image_stream.seek(0)
file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8)
frame = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# Detection
bbox, label, conf = cv.detect_common_objects(frame)
output_image = draw_bbox(frame, bbox, label, conf)
return output_image, bbox, label, conf
and when I print bbox y get:
[3, -23, 1231, 731]
So, I want to use these coordinates to crop the original image and save only de detected object defined by these coordinates
Something like this:
crop = output_image[bbox[2]:bbox[1], bbox[0]:bbox[3],:]
cv2.imwrite("crop.png", crop)
But when I do this I realised that the crop doesn't contain the desired object, the coordinates are wrong.
How can I fix it? Why am I getting negative coordinates?
My Image is 1280x720 and the desired object occupies approximately the entire image.
I solved it using as reference the comment from #Christoph Rackwitz, i.e handling the offsets:
image_height, image_width, image_channels = output_image.shape
box_xmin, box_ymin, box_xmax, box_ymax = bbox
if box_xmin < 0:
box_xmin = 0
if box_ymin < 0:
box_ymin = 0
if box_xmax > image_width:
box_xmax = image_width
if box_ymax > image_height:
box_ymax = image_height
crop = output_image[box_ymin:box_ymax, box_xmin:box_xmax,:]
cv2.imwrite("crop.jpg", crop)
I am working on panorama with Python OpenCV. Can someone show me how to get rid of the black lines in my final images? I am thinking of maybe I should first check for the color I.e. 0,0,0 before copying it to the atlas image, but I am not quite sure how to do that.
def warpTwoImages(img1, img2, H):
'''warp img2 to img1 with homograph H'''
h1,w1 = img1.shape[:2]
h2,w2 = img2.shape[:2]
pts1 = np.float32([[0,0],[0,h1],[w1,h1],[w1,0]]).reshape(-1,1,2)
pts2 = np.float32([[0,0],[0,h2],[w2,h2],[w2,0]]).reshape(-1,1,2)
pts2_ = cv2.perspectiveTransform(pts2, H)
pts = np.concatenate((pts1, pts2_), axis=0)
[xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
[xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
t = [-xmin,-ymin]
Ht = np.array([[1,0,t[0]],[0,1,t[1]],[0,0,1]]) # translate
result = cv2.warpPerspective(img2, Ht.dot(H), (xmax-xmin, ymax-ymin))
result[t[1]:h1+t[1],t[0]:w1+t[0]] = img1
return result
This answer depends on warpPrespicteve function to work with RGBA.
You can try to use the alpha channel of each image.
Before wrapping convert each image to RGBA (See the code below) were the alpha channel will be 0 for the black lines and for all other pixels it will be 255.
import cv2
import numpy as np
# Read img
img = cv2.imread('i.jpg')
# Create mask from all the black lines
mask = np.zeros((img.shape[0],img.shape[1]),np.uint8)
cv2.inRange(img,(0,0,0),(1,1,1),mask)
mask[mask==0]=1
mask[mask==255]=0
mask = mask*255
b_channel, g_channel, r_channel = cv2.split(img)
# Create a new image with 4 channels the forth channel Aplha will give the opacity for each pixel
newImage = cv2.merge((b_channel, g_channel, r_channel, mask))
I have an image I load with:
im = cv2.imread(filename)
I want to keep data that is in the center of the image. I created a circle as a mask of the area I want to keep.
I created the circle with:
height,width,depth = im.shape
circle = np.zeros((height,width))
cv2.circle(circle,(width/2,height/2),280,1,thickness=-1)
How can I mask out the data outside of the circle from the original image?
masked_data = im * circle
does not work.
Use cv2.bitwise_and and pass the circle as mask.
im = cv2.imread(filename)
height,width,depth = im.shape
circle_img = np.zeros((height,width), np.uint8)
cv2.circle(circle_img,(width/2,height/2),280,1,thickness=-1)
masked_data = cv2.bitwise_and(im, im, mask=circle_img)
cv2.imshow("masked", masked_data)
cv2.waitKey(0)
circle is just a 2D array with 1.0s and 0.0s. Numpy needs help to understand what you want to do with the third dimension of your im so you must give it an extra axis and then your line would work.
masked_data = im * circle[..., np.newaxis]
But note that the masking is simply setting the color to (0, 0, 0) for things outside the circle according to your code if the image lacks an alpha-channel.
However you have another potential problem: circle will be of the default data-type (which probably will be float64 or float32. That's not good for your image, so you should change the line where you create circle to
circle = np.zeros((height, width), dtype=im.dtype)
Using NumPy assignment to an indexed array:
im[circle == 0] = [0, 0, 0]
In this case if you want to have a circular image you must write a new algorithm and first you must be able to access to the coordinates of the pixels. Then you can simply compare pixels that are not within the scope of that circle or not and replace them with some value (or NULL if it's accepted with your image format criteria).
Here is an example:
import cv2
import numpy as np
im = cv2.imread('sss.png')
def facechop(im):
height,width,depth = im.shape
#circle = np.zeros((height,width))
#print circle
x=width/2
y=height/2
circle=cv2.circle(im,(width/2,height/2),180,1,thickness=1)
#newcameramtx, roi=cv2.getOptimalNewCameraMatrix(im,10,(w,h),1,(w,h))
cv2.rectangle(im,(x-180,y-180),(x+180,y+180),(0,0,255),2)
crop_img = im[y-180:y+180,x-180:x+180]
lastim=np.equal(crop_img,circle)
#dd=np.logical_and(crop_img,circle)
for i in range(len(last_im)) :
if last_im[i].all()==False:
crop_img[i]=[0,0,0]
cv2.imshow('im',crop_img)
if __name__ == '__main__':
facechop(im)
while(True):
key = cv2.waitKey(20)
if key in [27, ord('Q'), ord('q')]:
break
I have a large number of images of a fixed size (say 500*500). I want to write a python script which will resize them to a fixed size (say 800*800) but will keep the original image at the center and fill the excess area with a fixed color (say black).
I am using PIL. I can resize the image using the resize function now, but that changes the aspect ratio. Is there any way to do this?
You can create a new image with the desired new size, and paste the old image in the center, then saving it. If you want, you can overwrite the original image (are you sure? ;o)
import Image
old_im = Image.open('someimage.jpg')
old_size = old_im.size
new_size = (800, 800)
new_im = Image.new("RGB", new_size) ## luckily, this is already black!
box = tuple((n - o) // 2 for n, o in zip(new_size, old_size))
new_im.paste(old_im, box)
new_im.show()
# new_im.save('someimage.jpg')
You can also set the color of the new border with a third argument of Image.new() (for example: Image.new("RGB", new_size, "White"))
Yes, there is.
Make something like this:
from PIL import Image, ImageOps
ImageOps.expand(Image.open('original-image.png'),border=300,fill='black').save('imaged-with-border.png')
You can write the same at several lines:
from PIL import Image, ImageOps
img = Image.open('original-image.png')
img_with_border = ImageOps.expand(img,border=300,fill='black')
img_with_border.save('imaged-with-border.png')
And you say that you have a list of images. Then you must use a cycle to process all of them:
from PIL import Image, ImageOps
for i in list-of-images:
img = Image.open(i)
img_with_border = ImageOps.expand(img,border=300,fill='black')
img_with_border.save('bordered-%s' % i)
Alternatively, if you are using OpenCV, they have a function called copyMakeBorder that allows you to add padding to any of the sides of an image. Beyond solid colors, they've also got some cool options for fancy borders like reflecting or extending the image.
import cv2
img = cv2.imread('image.jpg')
color = [101, 52, 152] # 'cause purple!
# border widths; I set them all to 150
top, bottom, left, right = [150]*4
img_with_border = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
Sources: OpenCV border tutorial and
OpenCV 3.1.0 Docs for copyMakeBorder
PIL's crop method can actually handle this for you by using numbers that are outside the bounding box of the original image, though it's not explicitly stated in the documentation. Negative numbers for left and top will add black pixels to those edges, while numbers greater than the original width and height for right and bottom will add black pixels to those edges.
This code accounts for odd pixel sizes:
from PIL import Image
with Image.open('/path/to/image.gif') as im:
old_size = im.size
new_size = (800, 800)
if new_size > old_size:
# Set number of pixels to expand to the left, top, right,
# and bottom, making sure to account for even or odd numbers
if old_size[0] % 2 == 0:
add_left = add_right = (new_size[0] - old_size[0]) // 2
else:
add_left = (new_size[0] - old_size[0]) // 2
add_right = ((new_size[0] - old_size[0]) // 2) + 1
if old_size[1] % 2 == 0:
add_top = add_bottom = (new_size[1] - old_size[1]) // 2
else:
add_top = (new_size[1] - old_size[1]) // 2
add_bottom = ((new_size[1] - old_size[1]) // 2) + 1
left = 0 - add_left
top = 0 - add_top
right = old_size[0] + add_right
bottom = old_size[1] + add_bottom
# By default, the added pixels are black
im = im.crop((left, top, right, bottom))
Instead of the 4-tuple, you could instead use a 2-tuple to add the same number of pixels on the left/right and top/bottom, or a 1-tuple to add the same number of pixels to all sides.
It is important to consider old dimension, new dimension and their difference here. If the difference is odd (not even), you will need to specify slightly different values for left, top, right and bottom borders.
Assume the old dimension is ow,oh and new one is nw,nh.
So, this would be the answer:
import Image, ImageOps
img = Image.open('original-image.png')
deltaw=nw-ow
deltah=nh-oh
ltrb_border=(deltaw/2,deltah/2,deltaw-(deltaw/2),deltah-(deltah/2))
img_with_border = ImageOps.expand(img,border=ltrb_border,fill='black')
img_with_border.save('imaged-with-border.png')
You can load the image with scipy.misc.imread as a numpy array. Then create an array with the desired background with numpy.zeros((height, width, channels)) and paste the image at the desired location:
import numpy as np
import scipy.misc
im = scipy.misc.imread('foo.jpg', mode='RGB')
height, width, channels = im.shape
# make canvas
im_bg = np.zeros((height, width, channels))
im_bg = (im_bg + 1) * 255 # e.g., make it white
# Your work: Compute where it should be
pad_left = ...
pad_top = ...
im_bg[pad_top:pad_top + height,
pad_left:pad_left + width,
:] = im
# im_bg is now the image with the background.
ximg = Image.open(qpath)
xwid,xhgt = func_ResizeImage(ximg)
qpanel_3 = tk.Frame(Body,width=xwid+10,height=xhgt+10,bg='white',bd=5)
ximg = ximg.resize((xwid,xhgt),Image.ANTIALIAS)
ximg = ImageTk.PhotoImage(ximg)
panel = tk.Label(qpanel_3,image=ximg)
panel.image = ximg
panel.grid(row = 2)
from PIL import Image
from PIL import ImageOps
img = Image.open("dem.jpg").convert("RGB")
This part will add black borders at the sides (10% of width)
img_side = ImageOps.expand(img, border=(int(0.1*img.size[0]),0,int(0.1*img.size[0]),0), fill=(0,0,0))
img_side.save("sunset-sides.jpg")
This part will add black borders at the bottom & top (10% of height)
img_updown = ImageOps.expand(img, border=(0,int(0.1*img.size[1]),0,int(0.1*img.size[1])), fill=(0,0,0))
img_updown.save("sunset-top_bottom.jpg")
This part will add black borders at the bottom,top & sides (10% of height-width)
img_updown_side = ImageOps.expand(img, border=(int(0.1*img.size[0]),int(0.1*img.size[1]),int(0.1*img.size[0]),int(0.1*img.size[1])), fill=(0,0,0))
img_updown_side.save("sunset-all_sides.jpg")
img.close()
img_side.close()
img_updown.close()
img_updown_side.close()