I am trying to create an algorithm that blends the pixels of an image and I can bring the image as it was before, but I do not know do this.
I'm using python and pil, but I can use other libraries.
Exemple: to and back to
Thank you.
This should do it. There's no error handling, it doesn't follow pep8 standards, it uses slow PIL operations and it doesn't use an argument parsing library. I'm sure there are other bad things about it also.
It works by seeding python's random number generator with an invariant of the image under scrambling. The hash of the size is used. Since the size doesn't changed, a random sequence built on it will be the same for all images that share the same size. That sequence is used as a one-to-one mapping, therefore it's reversible.
The script may be invoked twice from a shell to create two images, "scrambled.png" and "unscrambled.png". "Qfhe3.png" is the source image.
python scramble.py scramble "./Qfhe3.png"
python scramble.py unscramble "./scrambled.png"
#scramble.py
from PIL import Image
import sys
import os
import random
def openImage():
return Image.open(sys.argv[2])
def operation():
return sys.argv[1]
def seed(img):
random.seed(hash(img.size))
def getPixels(img):
w, h = img.size
pxs = []
for x in range(w):
for y in range(h):
pxs.append(img.getpixel((x, y)))
return pxs
def scrambledIndex(pxs):
idx = list(range(len(pxs)))
random.shuffle(idx)
return idx
def scramblePixels(img):
seed(img)
pxs = getPixels(img)
idx = scrambledIndex(pxs)
out = []
for i in idx:
out.append(pxs[i])
return out
def unScramblePixels(img):
seed(img)
pxs = getPixels(img)
idx = scrambledIndex(pxs)
out = list(range(len(pxs)))
cur = 0
for i in idx:
out[i] = pxs[cur]
cur += 1
return out
def storePixels(name, size, pxs):
outImg = Image.new("RGB", size)
w, h = size
pxIter = iter(pxs)
for x in range(w):
for y in range(h):
outImg.putpixel((x, y), next(pxIter))
outImg.save(name)
def main():
img = openImage()
if operation() == "scramble":
pxs = scramblePixels(img)
storePixels("scrambled.png", img.size, pxs)
elif operation() == "unscramble":
pxs = unScramblePixels(img)
storePixels("unscrambled.png", img.size, pxs)
else:
sys.exit("Unsupported operation: " + operation())
if __name__ == "__main__":
main()
Related
So I'm trying to resize an image and maintain its ratio so that it perfectly fits in 1980x1080 in the moviepy library.
Currently, I'm doing this with a function like this:
def FitClip(size):
#size is basicly clip.size
clipRes = size
#print(size)
v = ''
if clipRes[0] >= clipRes[1]:
toresize = 1980
v = 'h'
else:
toresize = 1080
v = 'v'
return [toresize, v]
and I'm calling it like this:
def generate_clip_var(clip_name, start_time):
clip_audio = AudioFileClip(f"out/{clip_name}.mp3").set_start(start_time + 2)
clip_video = ImageClip(f"out/{clip_name}.jpg").set_duration(1).set_start(start_time)
if FitClip(clip_video.size)[1] == 'v':
clip_video = ImageClip(f"out/{clip_name}.jpg").set_duration(clip_audio.duration + 1).set_position("center").set_audio(clip_audio).resize(height = FitClip(clip_video.size)[0]).set_start(start_time)
else:
clip_video = ImageClip(f"out/{clip_name}.jpg").set_duration(clip_audio.duration + 1).set_position("center").set_audio(clip_audio).resize(width = FitClip(clip_video.size)[0]).set_start(start_time)
return [clip_audio, clip_video]
My problem is that whenever image is too small or too big it just goes outside bounds.
help
You could try the moviepy native function resize():
from moviepy.video.fx.resize import resize
def generate_clip_var(clip_name, start_time):
clip_audio = AudioFileClip(f"out/{clip_name}.mp3").set_start(start_time + 2)
clip_video = ImageClip(f"out/{clip_name}.jpg").set_duration(1).set_start(start_time)
# Resize the clip_video object to fit within a 1980x1080 frame while maintaining its aspect ratio
clip_video = resize(clip_video, width=1980, height=1080)
# Set the duration and audio of the resized clip_video object
clip_video = clip_video.set_duration(clip_audio.duration + 1).set_position("center").set_audio(clip_audio).set_start(start_time)
return [clip_audio, clip_video]
I am trying to read all the touching pixels with the same color in a image.
For that I use reccursive functions. When I check one pixel, I look on the right, left, top and bottom if the pixel close to it is the same color. If it is I add it to an array otherwise I don't.
The code is as follow:
vimport tkinter as tk
from PIL import Image
import sys
sys.setrecursionlimit(200000)
## WINDOWS
# to launch in debug mode
imgToDraw = Image.open('assets-test\\smile-face.png')
# to launch normaly
# imgToDraw = Image.open('..\\assets-test\\smile-face.png')
## LINUX
# imgToDraw = Image.open('../assets-test/smile-face.png')
imgPixels = imgToDraw.load()
imgWidth = imgToDraw.size[0]
imgHeight = imgToDraw.size[1]
# an element is a part of the image, it's a bunch of pixels with approximately the same color
# and each pixel touch at least one other pixel of the same element
elements = [];
isPixelChecked = [[ False for y in range( imgWidth ) ] for x in range( imgHeight )]
# min tolerable difference between two colors to consider them the same
# the higher the value is the more colors will be considered the same
COLOR_TOLERANCE = 10
reccursionCount = 0
class Element:
def __init__(self, color):
self.pixels = [];
self.color = color;
def addPixel(self, pixel):
self.pixels.append(pixel);
class Pixel:
def __init__(self, x, y, color):
self.x = x # x position of the pixel
self.y = y # y position of the pixel
self.color = color # color is a tuple (r,g,b)
def cutImageInElements():
global element
completeElement(element.pixels)
def completeElement(elemPixels):
global reccursionCount
global isPixelChecked
reccursionCount += 1
nbPixels = len(elemPixels);
xIndex = elemPixels[nbPixels - 1].x
yIndex = elemPixels[nbPixels - 1].y
xRightIdx = elemPixels[nbPixels - 1].x + 1
xLeftIdx = elemPixels[nbPixels - 1].x - 1
yBottomIdx = elemPixels[nbPixels - 1].y + 1
yTopIdx = elemPixels[nbPixels - 1].y - 1
isPixelChecked[xIndex][yIndex] = True
if((xRightIdx < imgWidth) and isPixelChecked[xRightIdx][yIndex] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xRightIdx, yIndex])):
pixelAppended = Pixel(xRightIdx, yIndex, imgPixels[xRightIdx, yIndex])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
if((xLeftIdx >= 0) and isPixelChecked[xLeftIdx][yIndex] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xLeftIdx, yIndex])):
pixelAppended = Pixel(xLeftIdx, yIndex, imgPixels[xLeftIdx, yIndex])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
if((yBottomIdx < imgHeight) and isPixelChecked[xIndex][yBottomIdx] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xIndex, yBottomIdx])):
pixelAppended = Pixel(xIndex, yBottomIdx, imgPixels[xIndex, yBottomIdx])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
if((yTopIdx >= 0) and isPixelChecked[xIndex][yTopIdx] == False):
if(isColorAlmostSame(imgPixels[elemPixels[0].x, elemPixels[0].y], imgPixels[xIndex, yTopIdx])):
pixelAppended = Pixel(xIndex, yTopIdx, imgPixels[xIndex, yTopIdx])
elemPixels.append(pixelAppended)
completeElement(elemPixels)
def isColorAlmostSame(pixel1, pixel2):
redDiff = abs(pixel1[0] - pixel2[0])
greenDiff = abs(pixel1[1] - pixel2[1])
blueDiff = abs(pixel1[2] - pixel2[2])
if(redDiff < COLOR_TOLERANCE and greenDiff < COLOR_TOLERANCE and blueDiff < COLOR_TOLERANCE):
return True
else:
return False
def printPixelsArr(pixelsArr):
for x in range(0, len(pixelsArr)):
print(pixelsArr[x].x, pixelsArr[x].y, pixelsArr[x].color)
if __name__ == '__main__':
pixel = Pixel(0, 0, imgPixels[0, 0]);
element = Element(pixel.color);
element.addPixel(pixel);
cutImageInElements();
print("NbReccursive call: ", reccursionCount)
This code works for small images of size 100x100 but crashes with an image of 400x400 with the error "terminated by signal SIGSEGV (Address boundary error)" when I launch the program on wsl2. When I run the program on cmd or powershell it just crashes but with no error code/msg.
I cannot understand why it would work with some size of images and not others. I can only think that the memory runs out or something but in the task manager the program uses almost no memory.
Not sure why that's failing, but that much recursion in Python isn't a great idea. I'd suggest reading about tail recursion that other languages use to make some recursive algorithms consume constant stack space. Note that your algorithm is not tail recursive, so this optimisation wouldn't help even if Python supported it.
I hacked together the following flood fill implementation. It uses Numpy so that it's only 10x slower than Pillow's ImageDraw.floodfill.
import numpy as np
def floodfill(im, row, col, threshold):
similar = np.mean(np.abs(im - im[row, col]), 2) < threshold
mask = np.zeros_like(similar)
mask[row, col] = 1
m2 = mask.copy()
while True:
m2[:,:] = mask
m2[1:,:] |= mask[:-1]
m2[:-1,:] |= mask[1:]
m2[:,1:] |= mask[:,:-1]
m2[:,:-1] |= mask[:,1:]
m2 &= similar
if np.all(m2 == mask):
return mask
mask[:,:] = m2
As an example of using this, you could do;
import requests
from io import BytesIO
res = requests.get("https://picsum.photos/300")
res.raise_for_status()
src = Image.open(BytesIO(res.content))
mask = floodfill(np.array(src, int), 10, 10, 40)
where the random image I got and the output mask are:
I am currently experimenting with the pytest module to create unit tests for a project I'm working on. I'm trying to test the 'add_point' method which draws an ellipse based on a set of pixels. What I want to do is inspect 'draw' to ensure that the ellipse has been created successfully. Unfortunately I don't know how to go about this, so any help will be appreciated. Here's my code so far:
(A) TheSlicePreviewMaker.py
import os, Image, ImageDraw, ImageFont
from json_importer import json_importer
class SlicePreviewer(object):
def __init__(self):
self.screen_size = (470, 470)
self.background_colour = (86,0,255)
self.platform_fill_colour = (100, 100, 100)
self.platform_outline_colour = (0, 0, 0)
self.platform_window = (0,0,469,469)
self.point_colour = (0,0,255)
self.config_object = json_importer("ConfigFile.txt")
self.image = None
def initialise_image(self):
self.image = Image.new('RGB',self.screen_size,self.background_colour)
draw = ImageDraw.Draw(self.image)
draw.rectangle(self.platform_window,outline=self.platform_outline_colour,fill=self.platform_fill_colour)
del draw
def add_point(self, px, py):
x1 = px - 1
y1 = py - 1
x2 = px + 1
y2 = py + 1
draw = ImageDraw.Draw(self.image)
draw.ellipse((x1,y1,x2,y2),outline=self.point_colour,fill=self.point_colour)
return draw #del draw
def save_image(self, file_name):
self.image.save(file_name, "BMP")
(B) test_TheSlicePreviewMaker.py
from TheSlicePreviewMaker import SlicePreviewer
slice_preview = SlicePreviewer()
class TestSlicePreviewer:
def test_init(self):
'''check that the config file object has been created on init'''
assert slice_preview.config_object != None
def test_initialise_image(self):
'''verify if the image has been successfully initialised'''
assert slice_preview.image.mode == 'RGB'
def test_add_point(self):
'''has the point been drawn successfully?'''
draw = slice_preview.add_point(196,273)
assert something
import pytest
if __name__ == '__main__':
pytest.main("--capture=sys -v")
SN: I've run TheSlicePreviewMaker.py separately to check the bitmap file it produces, so I know that the code works. What I want to achieve is unit test this so that each time I don't have to go check the bitmap.
One approach is to manually inspect the generated image and if looks OK to you, save it next to the test and use a image diffing algorithm (for example ImageChops.difference) to obtain a threshold value that you can use to make sure future test runs are still drawing the same image.
For example:
# contents of conftest.py
from PIL import ImageChops, ImageDraw, Image
import pytest
import os
import py.path
import math
import operator
def rms_diff(im1, im2):
"""Calculate the root-mean-square difference between two images
Taken from: http://snipplr.com/view/757/compare-two-pil-images-in-python/
"""
h1 = im1.histogram()
h2 = im2.histogram()
def mean_sqr(a,b):
if not a:
a = 0.0
if not b:
b = 0.0
return (a-b)**2
return math.sqrt(reduce(operator.add, map(mean_sqr, h1, h2))/(im1.size[0]*im1.size[1]))
class ImageDiff:
"""Fixture used to make sure code that generates images continues to do so
by checking the difference of the genereated image against known good versions.
"""
def __init__(self, request):
self.directory = py.path.local(request.node.fspath.dirname) / request.node.fspath.purebasename
self.expected_name = (request.node.name + '.png')
self.expected_filename = self.directory / self.expected_name
def check(self, im, max_threshold=0.0):
__tracebackhide__ = True
local = py.path.local(os.getcwd()) / self.expected_name
if not self.expected_filename.check(file=1):
msg = '\nExpecting image at %s, but it does not exist.\n'
msg += '-> Generating here: %s'
im.save(str(local))
pytest.fail(msg % (self.expected_filename, local))
else:
expected = Image.open(str(self.expected_filename))
rms_value = rms_diff(im, expected)
if rms_value > max_threshold:
im.save(str(local))
msg = '\nrms_value %s > max_threshold of %s.\n'
msg += 'Obtained image saved at %s'
pytest.fail(msg % (rms_value, max_threshold, str(local)))
#pytest.fixture
def image_diff(request):
return ImageDiff(request)
Now you can use the image_diff fixture in your tests. For example:
def create_image():
""" dummy code that generates an image, simulating some actual code """
im = Image.new('RGB', (100, 100), (0, 0, 0))
draw = ImageDraw.Draw(im)
draw.ellipse((10, 10, 90, 90), outline=(0, 0, 255),
fill=(255, 255, 255))
return im
def test_generated_image(image_diff):
im = create_image()
image_diff.check(im)
The first time your run this test, it will fail with this output:
================================== FAILURES ===================================
____________________________ test_generated_image _____________________________
image_diff = <test_foo.ImageDiff instance at 0x029ED530>
def test_generated_image(image_diff):
im = create_image()
> image_diff.check(im)
E Failed:
E Expecting image at X:\temp\sandbox\img-diff\test_foo\test_generated_image.png, but it does not exist.
E -> Generating here: X:\temp\sandbox\img-diff\test_generated_image.png
You can then manually check the image and if everything is OK, move it to a directory with the same name as the test file, with the name of the test as the file name plus ".png" extension. From now one whenever the test runs, it will check that the image is similar within an acceptable amount.
Suppose you change the code and produce a slightly different image, the test will now fail like this:
================================== FAILURES ===================================
____________________________ test_generated_image _____________________________
image_diff = <test_foo.ImageDiff instance at 0x02A4B788>
def test_generated_image(image_diff):
im = create_image()
> image_diff.check(im)
E Failed:
E rms_value 2.52 > max_threshold of 0.0.
E Obtained image saved at X:\temp\sandbox\img-diff\test_generated_image.png
test_foo.py:63: Failed
========================== 1 failed in 0.03 seconds ===========================
The code needs some polishing but should be a good start. You can find a version of this code here.
Cheers,
I'm trying to build a veeery simple tracker for 2D objects using python wrapper for OpenCV (cv2).
I've only noticed 3 functions:
KalmanFilter (constructor)
.predict()
.correct(measurement)
My idea is to create a code to check if kalman is working like this:
kf = cv2.KalmanFilter(...)
# set initial position
cv2.predict()
corrected_position = cv2.correct([measurement_x, measurement_y])
I've found some examples using the cv wrapper but not the cv2...
Thanks in advance!
if you're using opencv2.4, then it's bad news: the KalmanFilter is unusable, since you cannot set the transition (or any other) Matrix.
for opencv3.0 it works correctly, like this:
import cv2, numpy as np
meas=[]
pred=[]
frame = np.zeros((400,400,3), np.uint8) # drawing canvas
mp = np.array((2,1), np.float32) # measurement
tp = np.zeros((2,1), np.float32) # tracked / prediction
def onmouse(k,x,y,s,p):
global mp,meas
mp = np.array([[np.float32(x)],[np.float32(y)]])
meas.append((x,y))
def paint():
global frame,meas,pred
for i in range(len(meas)-1): cv2.line(frame,meas[i],meas[i+1],(0,100,0))
for i in range(len(pred)-1): cv2.line(frame,pred[i],pred[i+1],(0,0,200))
def reset():
global meas,pred,frame
meas=[]
pred=[]
frame = np.zeros((400,400,3), np.uint8)
cv2.namedWindow("kalman")
cv2.setMouseCallback("kalman",onmouse);
kalman = cv2.KalmanFilter(4,2)
kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32)
kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03
#kalman.measurementNoiseCov = np.array([[1,0],[0,1]],np.float32) * 0.00003
while True:
kalman.correct(mp)
tp = kalman.predict()
pred.append((int(tp[0]),int(tp[1])))
paint()
cv2.imshow("kalman",frame)
k = cv2.waitKey(30) &0xFF
if k == 27: break
if k == 32: reset()
I am trying to trying to teach myself python from the net. Have no previous prg experience. need help on how to write a function to remove red from an imgae using the 'image' module in python. The code i tried to write is below ...do help
import image
img = image.Image("luther.jpg")
newimg = image.EmptyImage(img.getWidth(),img.getHeight())
win = image.ImageWin()
def no_red():
for col in range(img.getWidth()):
for row in range(img.getHeight()):
p = img.getPixel(col,row)
newred = 0
newgreen = p.getGreen()
newblue = p.getBlue()
newpixel = image.Pixel(newred,newgreen,newblue)
return newimg.setPixel(col,row,newpixel)
print (newimg.getPixel(45,52))
win.exitonclick()
what am i doing wrong? Any guidance will be helpful :)
Regards >>
You have to call newimg.setPixel inside the for loops, otherwise it'll only replace one pixel all the way at the end of the image:
def no_red():
for col in range(img.getWidth()):
for row in range(img.getHeight()):
p = img.getPixel(col,row)
newred = 0
newgreen = p.getGreen()
newblue = p.getBlue()
newpixel = image.Pixel(newred,newgreen,newblue)
newimg.setPixel(col,row,newpixel)
You also aren't calling no_red anywhere. I recommend writing something like this instead:
def no_red(image):
new_image = ... # make a copy of image
# original code that removes red subpixels
return new_image
And then calling:
new_image = no_red(original_image)
for i in range(img.getWidth()):
for j in range(img.getHeight()):
old_pixel = img.getPixel(i, j)
new_pixel = image.Pixel(0, old_pixel.getGreen(), old_pixel.getBlue())
new_image.setPixel(i, j, new_pixel)
This is the simplest way to do it in my opinion
If you don't want to get stuck with the loops, you may find the following lines more useful. The following lines removes RED from the image test.
def removeColour():
img = Image.open('test.png').convert('RGB')
source = img.split()
R, G, B = 0, 1, 2
out = source[R].point(lambda i: i * 0)
source[R].paste(out, None, None)
img = Image.merge(img.mode, source)
img.save('testNoRed.png')
img.show()