So I'm trying to create a flood fill algorithm and I keep getting a recursion error with this. The algorithm seems to have infinite recursion and I cannot pinpoint why. I have looked all over the internet and I cannot find a solution as it seems like my program is correct according to most sources. There seems to be something wrong however. This is the edited version of the code. The error message is still maximum recursions.
Can I get some help?
from PIL import Image, ImageTk
from random import *
w= 75
h= w
flood = Image.new("RGB", (w,h), (0,0,0))
x = 0
y = 0
count = 0
colorlist = []
i = 0
while x < w -1:
y = 0
while y < h-1:
r = random()
if r < .25:
flood.putpixel((x,y), (0,0,0))
else:
flood.putpixel((x,y), (255,255,255))
y += 1
x += 1
x = 0
y = 0
while x < w-1:
y = 0
while y < h-1:
r = random()
if x == 0 or y == 0 or x == w-1 or y ==h-1:
flood.putpixel((x,y), (0,0,0))
y += 1
x += 1
def floodfill(x,y, d,e,f, g,h,i, image, count):
count+=1
(a,b,c) = image.getpixel((x,y))
if (a,b,c) == (255,255,255):
(j,k,l) = image.getpixel((x-1,y))
(m,n,o) = image.getpixel((x+1, y))
(p,q,r) = image.getpixel((x,y-1))
(s,t,u) = image.getpixel((x,y+1))
if count > 990:
return
if (a,b,c) == (255,255,255):
image.putpixel((x,y), (g,h,i))
floodfill(x-1, y, d,e,f, g,h,i, image, count)
floodfill(x+1, y, d,e,f, g,h,i, image, count)
floodfill(x, y-1, d,e,f, g,h,i, image, count)
floodfill(x, y+1, d,e,f, g,h,i, image,count)
floodfill(2,2, 0,0,0,255,0,0,flood, 0)
flood.save("flood.png")
print("done")
Python has a tendency to throw a maximum recursion depth exceeded error, even if the algorithm doesn't recurse infinitely and would eventually halt on its own. There are two solutions to this: increase the recursion limit, or switch to an iterative algorithm.
You can raise your recursion limit with sys.setrecursionlimit. Choose a number higher than the worst-case recursion depth of your algorithm. In your case, that would be the number of pixels in your image, length * height.
Changing your algorithm into an iterative one is fairly simple, since it doesn't really matter in what order you paint the pixels, as long as you get them all at least once. A set is very well suited to holding unique non-ordered data, so let's use that to store the pixels we need to paint.
def floodFill(x,y, d,e,f, g,h,i, image):
toFill = set()
toFill.add((x,y))
while not toFill.empty():
(x,y) = toFill.pop()
(a,b,c) == image.getpixel((x,y))
if not (a,b,c) == (255, 255, 255):
continue
image.putpixel((x,y), (g,h,i))
toFill.add((x-1,y))
toFill.add((x+1,y))
toFill.add((x,y-1))
toFill.add((x,y+1))
image.save("flood.png")
If you do use the iterative method, be sure to put bound checking in it. Otherwise, it might run forever! Or at least until your hard drive is filled by one gigantic toFill set.
Instead of recursion, why not flood-fill in a depth-first manner? Recursion uses an implicit stack anyway so you've nothing to lose.
And yes, as pointed out in the comments, you should be checking for x and y being out of bounds.
This has not been tested but is based mostly off the code you provided. It should work and provides an alternative method of implementing the floodfill algorithm. The function could be more efficient.
import PIL
import random
import collections
WHITE = 255, 255, 255
BLACK = 0, 0, 0
RED = 255, 0, 0
def main(width, height):
flood = PIL.Image.new('RGB', (width, height), BLACK)
# Create randomly generated walls
for x in range(width):
for y in range(height):
flood.putpixel((x, y), BLACK if random.random() < 0.15 else WHITE)
# Create borders
for x in range(width):
for y in range(height):
if x in {0, width - 1} or y in {0, height - 1}:
flood.putpixel((x, y), BLACK)
floodfill(50, 25, RED, image)
# Save image
image.save('flood.png')
def floodfill(x, y, color, image):
# if starting color is different from desired color
# create a queue of pixels that need to be changed
# while there are pixels that need their color changed
# change the color of the pixel to what is desired
# for each pixel surrounding the curren pixel
# if the new pixel has the same color as the starting pixel
# record that its color needs to be changed
source = image.getpixel((x, y))
if source != color:
pixels = collections.deque[(x, y)]
while pixels:
x, y = place = pixels.popleft()
image.putpixel(place, color)
for x_offset in -1, 1:
x_offset += x
for y_offset in -1, 1:
y_offset += y
new_place = x_offset, y_offset
if image.getpixel(new_place) == source:
pixels.append(new_place)
if __name__ == '__main__':
main(100, 50)
Related
I have an image such as this one, which is only black and white:
I would like to obtain only the flooded area of the image with the border using cv2.floodfill, like so (pardon my Paint skills):
Here's my current code:
# Copy the image.
im_floodfill = cv2.resize(actual_map_image, (500, 500)).copy()
# Floodfill from point (X, Y)
cv2.floodFill(im_floodfill, None, (X, Y), (255, 255, 255))
# Display images.
cv2.imshow("Floodfilled Image", im_floodfill)
cv2.waitKey(0)
The output I get is equal to the original image. How can I get only the flooded area with borders?
EDIT: I want to floodfill from any white point inside the "arena", like the red dot (X,Y) in the image. I wish to have only the outer border of the small circles inside the arena and the inner border of the outside walls.
EDIT2: I'm halfway there with this:
# Resize for test purposes
actual_map_image = cv2.resize(actual_map_image, (1000, 1000))
actual_map_image = cv2.cvtColor(actual_map_image, cv2.COLOR_BGR2GRAY)
h, w = actual_map_image.shape[:2]
flood_mask = np.zeros((h+2, w+2), dtype=np.uint8)
connectivity = 8
flood_fill_flags = (connectivity | cv2.FLOODFILL_FIXED_RANGE | cv2.FLOODFILL_MASK_ONLY | 255 << 8)
# Copy the image.
im_floodfill = actual_map_image.copy()
# Floodfill from point inside arena, not inside a black dot
cv2.floodFill(im_floodfill, flood_mask, (h/2 + 20, w/2 + 20), 255, None, None, flood_fill_flags)
borders = []
for i in range(len(actual_map_image)):
borders.append([B-A for A,B in zip(actual_map_image[i], flood_mask[i])])
borders = np.asarray(borders)
borders = cv2.bitwise_not(borders)
# Display images.
cv2.imshow("Original Image", cv2.resize(actual_map_image, (500, 500)))
cv2.imshow("Floodfilled Image", cv2.resize(flood_mask, (500, 500)))
cv2.imshow("Borders", cv2.resize(borders, (500, 500)))
cv2.waitKey(0)
I get this:
However, I feel like this is the wrong way of getting the borders, and they are incomplete.
I think the easiest, and fastest, way to do this is to flood-fill the arena with mid-grey. Then extract just the grey pixels and find their edges. That looks like this, but bear in mind more than half the lines are comments and debug statements :-)
#!/usr/bin/env python3
import cv2
# Load image as greyscale to use 1/3 of the memory and processing time
im = cv2.imread('arena.png', cv2.IMREAD_GRAYSCALE)
# Floodfill arena area with value 128, i.e. mid-grey
floodval = 128
cv2.floodFill(im, None, (150,370), floodval)
# DEBUG cv2.imwrite('result-1.png', im)
# Extract filled area alone
arena = ((im==floodval) * 255).astype(np.uint8)
# DEBUG cv2.imwrite('result-2.png', arena)
# Find edges and save
edges = cv2.Canny(arena,100,200)
# DEBUG cv2.imwrite('result-3.png',edges)
Here are the 3 steps of debug output showing you the sequence of processing:
result-1.png looks like this:
result-2.png looks like this:
result-3.png looks like this:
By the way, you don't have to write any Python code to do this, as you can just do it in the Terminal with ImageMagick which is included in most Linux distros and is available for macOS and Windows. The method used here corresponds exactly to the method I used in Python above:
magick arena.png -colorspace gray \
-fill gray -draw "color 370,150 floodfill" \
-fill white +opaque gray -canny 0x1+10%+30% result.png
How about dilating and xor
kernel = np.ones((3,3), np.uint8)
dilated = cv2.dilate(actual_map_image, kernel, iterations = 1)
borders = cv2.bitwise_xor(dilated, actual_map_image)
That will give you only the borders, I'm not clear if you want the circle borders only or also the interior borders, you should be able to remove borders you don't want based on size.
You can remove the exterior border with a size threshold, define a function like this:
def size_threshold(bw, minimum, maximum):
retval, labels, stats, centroids = cv.connectedComponentsWithStats(bw)
for val in np.where((stats[:, 4] < minimum) + (stats[:, 4] > maximum))[0]:
labels[labels==val] = 0
return (labels > 0).astype(np.uint8) * 255
result = size_threshold(borders, 0, 500)
Replace 500 with the a number larger than borders you want to keep and smaller than the border you want to lose.
I had to create my own Flood Fill implementation to get what I wanted. I based myself on this one.
def fill(data, start_coords, fill_value, border_value, connectivity=8):
"""
Flood fill algorithm
Parameters
----------
data : (M, N) ndarray of uint8 type
Image with flood to be filled. Modified inplace.
start_coords : tuple
Length-2 tuple of ints defining (row, col) start coordinates.
fill_value : int
Value the flooded area will take after the fill.
border_value: int
Value of the color to paint the borders of the filled area with.
connectivity: 4 or 8
Connectivity which we use for the flood fill algorithm (4-way or 8-way).
Returns
-------
filled_data: ndarray
The data with the filled area.
borders: ndarray
The borders of the filled area painted with border_value color.
"""
assert connectivity in [4,8]
filled_data = data.copy()
xsize, ysize = filled_data.shape
orig_value = filled_data[start_coords[0], start_coords[1]]
stack = set(((start_coords[0], start_coords[1]),))
if fill_value == orig_value:
raise ValueError("Filling region with same value already present is unsupported. Did you already fill this region?")
border_points = []
while stack:
x, y = stack.pop()
if filled_data[x, y] == orig_value:
filled_data[x, y] = fill_value
if x > 0:
stack.add((x - 1, y))
if x < (xsize - 1):
stack.add((x + 1, y))
if y > 0:
stack.add((x, y - 1))
if y < (ysize - 1):
stack.add((x, y + 1))
if connectivity == 8:
if x > 0 and y > 0:
stack.add((x - 1, y - 1))
if x > 0 and y < (ysize - 1):
stack.add((x - 1, y + 1))
if x < (xsize - 1) and y > 0:
stack.add((x + 1, y - 1))
if x < (xsize - 1) and y < (ysize - 1):
stack.add((x + 1, y + 1))
else:
if filled_data[x, y] != fill_value:
border_points.append([x,y])
# Fill all image with white
borders = filled_data.copy()
borders.fill(255)
# Paint borders
for x,y in border_points:
borders[x, y] = border_value
return filled_data, borders
The only thing I did was adding the else condition. If the point does not have a value equal to orig_value or fill_value, then it is a border, so I append it to a list that contains the points of all borders. Then I only paint the borders.
I was able to get the following images with this code:
# Resize for test purposes
actual_map_image = cv2.resize(actual_map_image, (500, 500))
actual_map_image = cv2.cvtColor(actual_map_image, cv2.COLOR_BGR2GRAY)
h, w = actual_map_image.shape[:2]
filled_data, borders = fill(actual_map_image, [h/2 + 20, w/2 + 20], 127, 0, connectivity=8)
cv2.imshow("Original Image", actual_map_image)
cv2.imshow("Filled Image", filled_data)
cv2.imshow("Borders", borders)
The one on the right was what I was aiming for. Thank you all!
I'm trying to setup a path-finder in which I pass a maze (array of 1/0's with 1 being obstacles), start point/end point and for it to return the optimal path.
I have code from the following as my base, with the 'main' function modified as shown below.
https://medium.com/#nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
def main():
maze = [[0,1,0,0,...],[0,0,0,0...],[...]...,[...]] #Example 2D List
start = (4, 33)
end = (200, 200)
path = astar(maze, start, end)
print(path)
#Create blank image for openCV
img = np.zeros((221,221,3), np.uint8)
x, y = 0, 0
red = [0, 0, 255]
#Draw obstacles
for row in maze:
y+=1
x=0
for value in row:
x+=1
if value == 1: img[y, x]=red
#Draw path
for x, y in path:
img[y, x] = (255, 0, 0)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Full maze used to make the map is here: https://pastebin.com/wT6dGQnj
this is a simplified case of a larger project, so list has been 'hard coded' here.
Below is the output, which seems to be incorrect as the path crosses multiple obstacles:
Results
I think the problem is in how you're drawing the walls, not the path. You start with y=1 and also seem to switch the x and y there. I used the 10x10 maze in the original code, and things looked correct once I changed the wall-drawing part as follows:
n = 10
img = np.zeros((n,n,3), np.uint8)
x, y = 0, 0
red = [0, 0, 255]
# Draw the walls in red. This is the part I changed.
for i in range(n):
for j in range(n):
if maze[i][j] == 1:
img[i,j] = red
# Draw the path in blue
for x, y in path:
img[x, y] = (255, 0, 0)
There's probably a more efficient way though you can map maze to img without doing two nested for loops like I did here.
What I'm trying to do in this example is wrap an image around a circle, like below.
To wrap the image I simply calculated the x,y coordinates using trig.
The problem is the calculated X and Y positions are rounded to make them integers. This causes the blank pixels in seen the wrapped image above. The x,y positions have to be an integer because they are positions in lists.
I've done this again in the code following but without any images to make things easier to see. All I've done is create two arrays with binary values, one array is black the other white, then wrapped one onto the other.
The output of the code is.
import math as m
from PIL import Image # only used for showing output as image
width = 254.0
height = 24.0
Ro = 40.0
img = [[1 for x in range(int(width))] for y in range(int(height))]
cir = [[0 for x in range(int(Ro * 2))] for y in range(int(Ro * 2))]
def shom_im(img): # for showing data as image
list_image = [item for sublist in img for item in sublist]
new_image = Image.new("1", (len(img[0]), len(img)))
new_image.putdata(list_image)
new_image.show()
increment = m.radians(360 / width)
rad = Ro - 0.5
for i, row in enumerate(img):
hyp = rad - i
for j, column in enumerate(row):
alpha = j * increment
x = m.cos(alpha) * hyp + rad
y = m.sin(alpha) * hyp + rad
# put value from original image to its position in new image
cir[int(round(y))][int(round(x))] = img[i][j]
shom_im(cir)
I later found out about the Midpoint Circle Algorithm but I had worse result with that
from PIL import Image # only used for showing output as image
width, height = 254, 24
ro = 40
img = [[(0, 0, 0, 1) for x in range(int(width))]
for y in range(int(height))]
cir = [[(0, 0, 0, 255) for x in range(int(ro * 2))] for y in range(int(ro * 2))]
def shom_im(img): # for showing data as image
list_image = [item for sublist in img for item in sublist]
new_image = Image.new("RGBA", (len(img[0]), len(img)))
new_image.putdata(list_image)
new_image.show()
def putpixel(x0, y0):
global cir
cir[y0][x0] = (255, 255, 255, 255)
def drawcircle(x0, y0, radius):
x = radius
y = 0
err = 0
while (x >= y):
putpixel(x0 + x, y0 + y)
putpixel(x0 + y, y0 + x)
putpixel(x0 - y, y0 + x)
putpixel(x0 - x, y0 + y)
putpixel(x0 - x, y0 - y)
putpixel(x0 - y, y0 - x)
putpixel(x0 + y, y0 - x)
putpixel(x0 + x, y0 - y)
y += 1
err += 1 + 2 * y
if (2 * (err - x) + 1 > 0):
x -= 1
err += 1 - 2 * x
for i, row in enumerate(img):
rad = ro - i
drawcircle(int(ro - 1), int(ro - 1), rad)
shom_im(cir)
Can anybody suggest a way to eliminate the blank pixels?
You are having problems filling up your circle because you are approaching this from the wrong way – quite literally.
When mapping from a source to a target, you need to fill your target, and map each translated pixel from this into the source image. Then, there is no chance at all you miss a pixel, and, equally, you will never draw (nor lookup) a pixel more than once.
The following is a bit rough-and-ready, it only serves as a concept example. I first wrote some code to draw a filled circle, top to bottom. Then I added some more code to remove the center part (and added a variable Ri, for "inner radius"). This leads to a solid ring, where all pixels are only drawn once: top to bottom, left to right.
How you exactly draw the ring is not actually important! I used trig at first because I thought of re-using the angle bit, but it can be done with Pythagorus' as well, and even with Bresenham's circle routine. All you need to keep in mind is that you iterate over the target rows and columns, not the source. This provides actual x,y coordinates that you can feed into the remapping procedure.
With the above done and working, I wrote the trig functions to translate from the coordinates I would put a pixel at into the original image. For this, I created a test image containing some text:
and a good thing that was, too, as in the first attempt I got the text twice (once left, once right) and mirrored – that needed a few minor tweaks. Also note the background grid. I added that to check if the 'top' and 'bottom' lines – the outermost and innermost circles – got drawn correctly.
Running my code with this image and Ro,Ri at 100 and 50, I get this result:
You can see that the trig functions make it start at the rightmost point, move clockwise, and have the top of the image pointing outwards. All can be trivially adjusted, but this way it mimics the orientation that you want your image drawn.
This is the result with your iris-image, using 33 for the inner radius:
and here is a nice animation, showing the stability of the mapping:
Finally, then, my code is:
import math as m
from PIL import Image
Ro = 100.0
Ri = 50.0
# img = [[1 for x in range(int(width))] for y in range(int(height))]
cir = [[0 for x in range(int(Ro * 2))] for y in range(int(Ro * 2))]
# image = Image.open('0vWEI.png')
image = Image.open('this-is-a-test.png')
# data = image.convert('RGB')
pixels = image.load()
width, height = image.size
def shom_im(img): # for showing data as image
list_image = [item for sublist in img for item in sublist]
new_image = Image.new("RGB", (len(img[0]), len(img)))
new_image.putdata(list_image)
new_image.save("result1.png","PNG")
new_image.show()
for i in range(int(Ro)):
# outer_radius = Ro*m.cos(m.asin(i/Ro))
outer_radius = m.sqrt(Ro*Ro - i*i)
for j in range(-int(outer_radius),int(outer_radius)):
if i < Ri:
# inner_radius = Ri*m.cos(m.asin(i/Ri))
inner_radius = m.sqrt(Ri*Ri - i*i)
else:
inner_radius = -1
if j < -inner_radius or j > inner_radius:
# this is the destination
# solid:
# cir[int(Ro-i)][int(Ro+j)] = (255,255,255)
# cir[int(Ro+i)][int(Ro+j)] = (255,255,255)
# textured:
x = Ro+j
y = Ro-i
# calculate source
angle = m.atan2(y-Ro,x-Ro)/2
distance = m.sqrt((y-Ro)*(y-Ro) + (x-Ro)*(x-Ro))
distance = m.floor((distance-Ri+1)*(height-1)/(Ro-Ri))
# if distance >= height:
# distance = height-1
cir[int(y)][int(x)] = pixels[int(width*angle/m.pi) % width, height-distance-1]
y = Ro+i
# calculate source
angle = m.atan2(y-Ro,x-Ro)/2
distance = m.sqrt((y-Ro)*(y-Ro) + (x-Ro)*(x-Ro))
distance = m.floor((distance-Ri+1)*(height-1)/(Ro-Ri))
# if distance >= height:
# distance = height-1
cir[int(y)][int(x)] = pixels[int(width*angle/m.pi) % width, height-distance-1]
shom_im(cir)
The commented-out lines draw a solid white ring. Note the various tweaks here and there to get the best result. For instance, the distance is measured from the center of the ring, and so returns a low value for close to the center and the largest values for the outside of the circle. Mapping that directly back onto the target image would display the text with its top "inwards", pointing to the inner hole. So I inverted this mapping with height - distance - 1, where the -1 is to make it map from 0 to height again.
A similar fix is in the calculation of distance itself; without the tweaks Ri+1 and height-1 either the innermost or the outermost row would not get drawn, indicating that the calculation is just one pixel off (which was exactly the purpose of that grid).
I think what you need is a noise filter. There are many implementations from which I think Gaussian filter would give a good result. You can find a list of filters here. If it gets blurred too much:
keep your first calculated image
calculate filtered image
copy fixed pixels from filtered image to first calculated image
Here is a crude average filter written by hand:
cir_R = int(Ro*2) # outer circle 2*r
inner_r = int(Ro - 0.5 - len(img)) # inner circle r
for i in range(1, cir_R-1):
for j in range(1, cir_R-1):
if cir[i][j] == 0: # missing pixel
dx = int(i-Ro)
dy = int(j-Ro)
pix_r2 = dx*dx + dy*dy # distance to center
if pix_r2 <= Ro*Ro and pix_r2 >= inner_r*inner_r:
cir[i][j] = (cir[i-1][j] + cir[i+1][j] + cir[i][j-1] +
cir[i][j+1])/4
shom_im(cir)
and the result:
This basically scans between two ranges checks for missing pixels and replaces them with average of 4 pixels adjacent to it. In this black white case it is all white.
Hope it helps!
Hi I'm trying to to OCR for this sample of numbers https://drive.google.com/folderview?id=0B68PDhV5SW8BTjd0T0FqTG94cG8&usp=sharing
I make database of numbers, I take screenshot of number 1, 2, 3.......
Later for recognize the numbers I take screenshot and compare with my database screenshots.
The code works pecfect but I have one lazy problem, numbers could be 0.00 to 999.99 so I need take all this screenshots and I not able to create the numbers so I think I need find other solution.
I think if I can broke screenshots between . (100.99 = 100 and 99) I only need 999 samples in my database.
So you think could be good solution?
News!!!
I continue searching and finally I found solution with pytesseract
Few things, I need resize height of images to min 25 pixels for 100% good results.
If I save image with png format don't work but with jpg work prefect.
If I open png images with paint and save without change nothing code work perfect with png images. I can not understand this.
I really need work with png because I need code work fast.
Any idea to solve this isue with png format?
import pytesseract
from PIL import Image
x = pytesseract.image_to_string(Image.open('101.jpg'))
y = float(x)
print y
I search code about image segmentation, find contours and connected components.
I found this code to find region of numbers and dot.
Found 1 region in numbers 0,1,6,8 and dot, in others found 2 regions.
I not able to change code for work with my image (number white background black, ) so I change my image colour and I see impossible edit code for fix the problem with regions.
I appreciate your help
I thinking is possible I no need change code, if I able to save every region in different image in order I can do something like this.
i=0
while i < len(regionfound)
if height(region[i] = 13 #(max height)
compare region with dabatabe image of numbers 0,1,6 and 8
if height = 2
region are dot
if height = .....
i+=1
import sys
from PIL import Image, ImageDraw
class Region():
def __init__(self, x, y):
self._pixels = [(x, y)]
self._min_x = x
self._max_x = x
self._min_y = y
self._max_y = y
def add(self, x, y):
self._pixels.append((x, y))
self._min_x = min(self._min_x, x)
self._max_x = max(self._max_x, x)
self._min_y = min(self._min_y, y)
self._max_y = max(self._max_y, y)
def box(self):
return [(self._min_x, self._min_y), (self._max_x, self._max_y)]
def find_regions(im):
width, height = im.size
regions = {}
pixel_region = [[0 for y in range(height)] for x in range(width)]
equivalences = {}
n_regions = 0
#first pass. find regions.
for x in xrange(width):
for y in xrange(height):
#look for a black pixel
if im.getpixel((x, y)) == (0, 0, 0, 255): #BLACK NUMBERS FOR WHITE NUMBER USE (255, 255, 255, 255)
# get the region number from north or west
# or create new region
region_n = pixel_region[x-1][y] if x > 0 else 0
region_w = pixel_region[x][y-1] if y > 0 else 0
max_region = max(region_n, region_w)
if max_region > 0:
#a neighbour already has a region
#new region is the smallest > 0
new_region = min(filter(lambda i: i > 0, (region_n, region_w)))
#update equivalences
if max_region > new_region:
if max_region in equivalences:
equivalences[max_region].add(new_region)
else:
equivalences[max_region] = set((new_region, ))
else:
n_regions += 1
new_region = n_regions
pixel_region[x][y] = new_region
#Scan image again, assigning all equivalent regions the same region value.
for x in xrange(width):
for y in xrange(height):
r = pixel_region[x][y]
if r > 0:
while r in equivalences:
r = min(equivalences[r])
if not r in regions:
regions[r] = Region(x, y)
else:
regions[r].add(x, y)
return list(regions.itervalues())
def main():
im = Image.open(r"0.png")
regions = find_regions(im)
draw = ImageDraw.Draw(im)
for r in regions:
draw.rectangle(r.box(), outline=(255, 0, 0))
del draw
#im.show()
output = file("output.png", "wb")
im.save(output)
output.close()
if __name__ == "__main__":
main()
Is there any way to use ImageMagick (or something similar, anything that will work!) to find areas of an image where the pixels beside each other are a similar color?
Thanks in advance,
Photoshop, Gimp and Plenty of other image processors will do this for you. Programmatically, here is some code in python that accomplishes this:
from PIL import Image, ImageDraw
def inImage(im, px):
x,y = px
return x < im.size[0] and y < im.size[1] and x > 0 and y > 0
def meetsThreshold(im, px, st, threshold):
color = im.getpixel(px)
similar = im.getpixel(st)
for cPortion, sPortion in zip(color,similar):
if abs(cPortion - sPortion) > threshold:
return False
return True
def floodFill(im, fillaroundme, fillWith, meetsThresholdFunction):
imflooded = im.copy()
imflooded.putpixel(fillaroundme, fillwith)
processed = []
toProcess = [fillaroundme]
while len(toProcess) > 0:
edge = toProcess.pop()
processed.append(edge)
x, y = edge
for checkMe in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
if inImage(im, checkMe) and meetsThresholdFunction(im, edge, checkMe):
imflooded.putpixel(checkMe, fillWith)
if checkMe not in toProcess and checkMe not in processed:
toProcess.append(checkMe)
processed.append(edge)
return imflooded
im = Image.open(r"tardis.jpg")
filled = floodFill(im, (120, 220), (255, 0, 0), lambda im, px, st: meetsThreshold(im, px, st, 10))
filled.show()
I got the tardis.jpg from here