I'm trying to divide an image into n equal parts and then compare each blocks to define an "equilibrium" in illustrations.
For example in the image shown below the bottom tends to be similar compared to the top.
I've written this but I got stuck and don't know what to do, any help?
import math
import io
import numpy as np
from sklearn.cluster import KMeans
from PIL import Image
import image_slicer
from scipy import sum, average
tiles = image_slicer.slice('img/eq1.jpg', 2, save=False)
vectors = []
for tile in tiles:
image = tile.image
image = image.convert('RGB')
colors = np.array(image).tolist()
colors = [average(x, -1) for x in colors][0]
vectors.append(colors)
#lista = np.array(tile.image)
#print np.array(tile.image)
image.show()
from sklearn.metrics.pairwise import cosine_similarity
print cosine_similarity(vectors)
okay i kind of solved it writing this:
import math
import io
import numpy as np
from sklearn.cluster import KMeans
from PIL import Image
import image_slicer
from scipy import sum, average
from scipy.linalg import norm
import sklearn.metrics.pairwise
tiles = image_slicer.slice('img/ad.jpg', 4, save=False)
vectors = []
for tile in tiles:
image = tile.image
image = image.convert('RGB')
colors = np.array(image).tolist()
colors = [average(x, -1) for x in colors][0]
vectors.append(colors)
#lista = np.array(tile.image)
#print np.array(tile.image)
# image.show()
from sklearn.metrics.pairwise import cosine_similarity
print np.around(sklearn.metrics.pairwise.manhattan_distances(vectors))
Related
I cannot figure out how to draw a pixel in ipycanvas. I am drawing rectangles instead of pixels and this makes drawing very slow.
Drawing a rectangle using:
canvas.fill_rect
Code to display image in ipycanvas :
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import ipycanvas
from ipycanvas import Canvas
import requests
from io import BytesIO
url = r"https://wallpapercave.com/dwp1x/wp1816238.jpg"
response = requests.get(url)
img = Image.open(BytesIO(response.content))
array = img.tobytes()
canvas = Canvas(width=img.width, height=img.height)
with ipycanvas.hold_canvas():
for i in range(int(len(array)/3)):
r = array[i * 3 + 0] # red
g = array[i * 3 + 1] # green
b = array[i * 3 + 2] # blue
canvas.fill_style = f"#{r:02x}{g:02x}{b:02x}" # setting color
canvas.fill_rect(i%img.width, int(i/img.width), 1, 1) # drawing rectangle
canvas
Output:
I am drawing image pixel by pixel because I want to apply filters in images.
How to draw pixels in ipycanvas?
Not sure if this will help but given you're talking about filtering I'd assume you mean things like convolutions. Numpy and Scipy help a lot and provide various ways of applying these and work well with images from Pillow.
For example:
import requests
from io import BytesIO
from PIL import Image
import numpy as np
from scipy import signal
image_req = requests.get("https://wallpapercave.com/dwp1x/wp1816238.jpg")
image_req.raise_for_status()
image = Image.open(BytesIO(image_req.content))
# create gaussian glur of a given standard deviation
sd = 3
filt = np.outer(*2*[signal.windows.gaussian(int(sd*5)|1, sd)])
filt /= filt.sum()
# interpret image as 3d array
arr = np.array(image)
# apply it to each channel independently, this loop runs in ~0.1 seconds
for chan in range(3):
arr[:,:,chan] = signal.oaconvolve(arr[:,:,chan], filt, mode='same')
# array back into image for display in notebook
Image.fromarray(arr)
This produces an image like:
##This is how I uploaded the list of images
import cv2
import numpy as np
from matplotlib.image import imread
import matplotlib.pyplot as plt
from skimage import color
import os, subprocess, glob, re
from PIL import Image
import glob
import numpy
##this upload data as 11x480x600 shape
list_images = []
for img in glob.glob('../proj/*.jpg'):
ims = cv2.imread(os.path.join(img))
list_images.append(ims)
im_arr = numpy.array(list_images)
im_gry = color.rgb2gray(im_arr)
##this code calculates the mean for all images
##but it returns 2d as 480x600
##I need to keep it as 3d or matrix-like
m_df = im_gry.mean(axis=0)
##I need to change 3d (1x480x600) into 1d list
##for further processing
##but this does not work as
##m_df.shape[2] is missing
image_shape=m_df[0].shape
frames_reshaped = m_df.reshape([
m_df.shape[0]*
m_df.shape[1],m_df.shape[2]])
frames_reshaped.shape
I need to invert the colors of an image in Python using PIL, the problem is that I only have to invert the colors of the right half of the image and I don't know how to do it. Here is an example of how the image should look like.
And here is the code I made, bot it invert the colors of all the image.
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import PIL.ImageOps
image_file = Image.open("Abbildung1.jpg")
image_file.load()
image_data = np.asarray(image_file, dtype=np.uint8)
inverted_image = PIL.ImageOps.invert(image_file)
inverted_image.save("neuesBild.jpg")
You can use numpy to make two parts of the image then apply the transformation and finally combine it.
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import PIL.ImageOps
image_file = Image.open("some_image.jpeg")
image_file.load()
image_data = np.asarray(image_file, dtype=np.uint8)
width = image_data.shape[1]
left_half = image_data[:,0:width//2, :]
right_half = image_data[:,width//2:, :]
inverted_image_right = np.asarray(PIL.ImageOps.invert(Image.fromarray(right_half)))
total_image = np.hstack((left_half, inverted_image_right))
inverted_image = Image.fromarray(total_image)
inverted_image.save("invertion_half.jpeg")
That's it:
from PIL import Image
import PIL.ImageOps
img = Image.open('img.png').convert('RGB')
img.paste(ImageOps.invert(img.crop((img.width/2,0,img.width,img.height))),box=(int(img.width/2),0))
We have croped, inverted and pasted this croped-inverted image back.
Then you can check:
img.show()
I am trying to implement adaptive histogram equalization in python. I take an image and split it into smaller regions and then apply the traditional histogram equalization to it. I then combine the smaller images into one and obtain a final resultant image. The final image appears to be very blocky in nature and has different contrast levels for each individual region. Is there a way I could maintain a uniform contrast for each individual image so that it looks like a single image instead of smaller images stitched together.
import cv2
import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import imsave
from scipy import ndimage
from scipy import misc
import scipy.misc
import scipy
import image_slicer
from image_slicer import join
from PIL import Image
img = 'watch.png'
num_tiles = 25
tiles = image_slicer.slice(img, num_tiles)
for tile in tiles:
img = scipy.misc.imread(tile.filename)
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf *hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'g')
plt.hist(img.flatten(),256,[0,256], color = 'g')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
cdf_m = np.ma.masked_equal(cdf,0)
cdf_o = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_o,0).astype('uint8')
img3 = cdf[img]
cv2.imwrite(tile.filename,img3)
tile.image = Image.open(tile.filename
image = join(tiles)
image.save('watch-join.png')
I reviewed the actual algorithm and came up with the following implementation. I am sure there is a better way to do this. Any suggestions are appreciated.
import numpy as np
import cv2
img = cv2.imread('watch.png',0)
print img
img_size=img.shape
print img_size
img_mod = np.zeros((600, 800))
for i in range(0,img_size[0]-30):
for j in range(0,img_size[1]-30):
kernel = img[i:i+30,j:j+30]
for k in range(0,30):
for l in range(0,30):
element = kernel[k,l]
rank = 0
for m in range(0,30):
for n in range(0,30):
if(kernel[k,l]>kernel[m,n]):
rank = rank + 1
img_mod[i,j] = ((rank * 255 )/900)
im = np.array(img_mod, dtype = np.uint8)
cv2.imwrite('target.png',im)
I have read the image from cifar-10-batches-python
import os
import numpy as np
from PIL import Image
from pylab import *
import matplotlib.pyplot as plt
from scipy.misc import imresize
# read data
data_dir = "F:\\dataSet\\cifar-10-batches-py"
testdata_dir="F:\\dataSet\\cifar-10-batches-py\\test_batch"
da=np.load(testdata_dir)
testdata=da['data']
testlabel=np.array(da['labels'])
train=np.empty((50000,3072))
label=np.empty((50000,))
for i in range(1,2):
str='data_batch_'+np.str(i)
path1=os.path.join(data_dir,str)
data=np.load(path1)
train[10000*(i-1):10000*i,:]=data['data']
label[10000*(i-1):10000*i,]=data['labels']
def intlabel(label):
for i in range(label.shape[0]):
label[i,]=int(float(label[i,]))
return label
def intdata(data):
n=data.shape[0]
for i in range(n):
for j in range(3072):
data[i,j]=int(float(data[i,j]))
return data
label,train=intlabel(label),intdata(train)
train,label=np.array(train),np.array(label)
train = train.reshape(train.shape[0], 3, 32,32)
train = train.astype('float32')
then I don't know how to resize the data.
I used the imreszie function to resize image, but the effect was not good
You can use opencv to pre-process the images-
import cv2
img = cv2.imread('IMAGE_LOCATION')
img_fin = cv2.resize(img, (227, 227))