Parallel processing image analyzer function in Python - python

I have created a function, imgs_to_df() (which relies on img_to_vec()) that takes a list of URLs that point to a JPG (e.g. https://live.staticflickr.com/65535/48123413937_54bb53e98b_o.jpg), resizes it, and converts the URLs to a dataframe of RGB values, where each row is a different image, and each column is the R, G, or B value of the pixel of the (resized) image.
However, the function is very slow, especially once it gets into lists of hundreds or thousands of links, so I need a way to parallelize or otherwise make the process much, much faster. I'd also like to ensure there is a way to easily to match the URLs back with the RGB vectors after I'm done.
I am very new to parallel processing and everything I have read is just confusing me even more.
from PIL import Image
from io import BytesIO
import urllib.request
import requests
import numpy as np
import pandas as pd
def img_to_vec(jpg_url, resize=True, new_width=300, new_height=300):
""" Takes a URL of an image, resizes it (optional), and converts it to a
vector representing RGB values.
Parameters
----------
jpg_url: String. A URL that points to a JPG image.
resize: Boolean. Default True. Whether image should be resized before calculating RGB.
new_width: Int. Default 300. New width to convert image to before calculating RGB.
new_height: Int. Default 300. New height to conver image to before calculating RGB.
Returns
-------
rgb_vec: Vector of size 3*new_width*new_height for the RGB values in each pixel of the image.
"""
response = requests.get(jpg_url) # Create remote image connection
img = Image.open(BytesIO(response.content)) # Save image connection (NOT actual image)
if resize:
img = img.resize((new_width, new_height))
rgb_img = np.array(img) # Create matrix of RGB values
rgb_vec = rgb_img.ravel() # Flatten 3D matrix of RGB values to a vector
return rgb_vec
# Consider parallel processing here
def imgs_to_df(jpg_urls, common_width=300, common_height=300):
""" Takes a list of jpg_urls and converts it to a dataframe of RGB values.
Parameters
----------
jpg_urls: A list of jpg_urls to be resized and converted to a dataframe of RGB values.
common_width: Int. Default 300. New width to convert all images to before calculating RGB.
common_height: Int. Default 300. New height to convert all images to before calculating RGB.
Returns
-------
rgb_df: Pandas dataframe of dimensions len(jpg_urls) rows and common_width*common_height*3
columns. Each row is a unique jpeg image, and each column is an R/G/B value of
a particular pixel of the resized image
"""
assert common_width>0 and common_height>0, 'Error: invalid new_width or new_height dimensions'
for url_idx in range(len(jpg_urls)):
if url_idx % 100 == 0:
print('Converting url number {urlnum} of {urltotal} to RGB '.format(urlnum=url_idx, urltotal=len(jpg_urls)))
try:
img_i = img_to_vec(jpg_urls[url_idx])
if url_idx == 0:
vecs = img_i
else:
try:
vecs = np.vstack((vecs, img_i))
except:
vecs = np.vstack((vecs, np.array([-1]*common_width*common_height*3)))
print('Warning: Error in converting {error_url} to RGB'.format(error_url=jpg_urls[url_idx]))
except:
vvecs = np.vstack((vecs, np.array([-1]*common_width*common_height*3)))
print('Warning: Error in converting {error_url} to RGB'.format(error_url=jpg_urls[url_idx]))
rgb_df = pd.DataFrame(vecs)
return rgb_df

You can use a ThreadPool as your task is I/O bound.
I'm using concurrent.futures. Your function needs to be re-written so that it takes a single URL and makes it to a df.
I added two snippets, one just simply uses loops and another uses Threading. The second one is much much faster.
from PIL import Image
from io import BytesIO
import urllib.request
import requests
import numpy as np
import pandas as pd
def img_to_vec(jpg_url, resize=True, new_width=300, new_height=300):
""" Takes a URL of an image, resizes it (optional), and converts it to a
vector representing RGB values.
Parameters
----------
jpg_url: String. A URL that points to a JPG image.
resize: Boolean. Default True. Whether image should be resized before calculating RGB.
new_width: Int. Default 300. New width to convert image to before calculating RGB.
new_height: Int. Default 300. New height to conver image to before calculating RGB.
Returns
-------
rgb_vec: Vector of size 3*new_width*new_height for the RGB values in each pixel of the image.
"""
response = requests.get(jpg_url) # Create remote image connection
img = Image.open(BytesIO(response.content)) # Save image connection (NOT actual image)
if resize:
img = img.resize((new_width, new_height))
rgb_img = np.array(img) # Create matrix of RGB values
rgb_vec = rgb_img.ravel() # Flatten 3D matrix of RGB values to a vector
return rgb_vec
# Consider parallel processing here
def imgs_to_df(jpg_url, common_width=300, common_height=300):
assert common_width>0 and common_height>0, 'Error: invalid new_width or new_height dimensions'
try:
img_i = img_to_vec(jpg_url)
vecs = img_i
try:
vecs = np.vstack((vecs, img_i))
except:
vecs = np.vstack((vecs, np.array([-1]*common_width*common_height*3)))
print('Warning: Error in converting {error_url} to RGB'.format(error_url=jpg_urls[url_idx]))
except:
print('failed')
rgb_df = pd.DataFrame(vecs)
return rgb_df
img_urls = ['https://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/Flower_poster_2.jpg/1200px-Flower_poster_2.jpg', 'https://www.tiltedtulipflorist.com/assets/1/14/DimFeatured/159229xL_HR_fd_3_6_17.jpg?114702&value=217',
'https://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/Flower_poster_2.jpg/1200px-Flower_poster_2.jpg', 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/Flower_poster_2.jpg/1200px-Flower_poster_2.jpg']
import time
t1 = time.time()
dfs = []
for iu in img_urls:
df = imgs_to_df(iu)
dfs.append(df)
t2 = time.time()
print(t2-t1)
print(dfs)
# aprroach with multi-threading
import concurrent.futures
t1 = time.time()
with concurrent.futures.ThreadPoolExecutor() as executor:
dfs = [df for df in executor.map(imgs_to_df, img_urls)]
t2 = time.time()
print(t2-t1)
print(dfs)
Out:
3.540484666824341
[ 0 1 2 3 ... 269996 269997 269998 269999
0 240 240 237 251 ... 247 243 243 243
1 240 240 237 251 ... 247 243 243 243
[2 rows x 270000 columns], 0 1 2 3 ... 269996 269997 269998 269999
0 255 255 255 255 ... 93 155 119 97
1 255 255 255 255 ... 93 155 119 97
[2 rows x 270000 columns], 0 1 2 3 ... 269996 269997 269998 269999
0 240 240 237 251 ... 247 243 243 243
1 240 240 237 251 ... 247 243 243 243
[2 rows x 270000 columns], 0 1 2 3 ... 269996 269997 269998 269999
0 240 240 237 251 ... 247 243 243 243
1 240 240 237 251 ... 247 243 243 243
[2 rows x 270000 columns]]
1.2170848846435547
[ 0 1 2 3 ... 269996 269997 269998 269999
0 240 240 237 251 ... 247 243 243 243
1 240 240 237 251 ... 247 243 243 243
[2 rows x 270000 columns], 0 1 2 3 ... 269996 269997 269998 269999
0 255 255 255 255 ... 93 155 119 97
1 255 255 255 255 ... 93 155 119 97
[2 rows x 270000 columns], 0 1 2 3 ... 269996 269997 269998 269999
0 240 240 237 251 ... 247 243 243 243
1 240 240 237 251 ... 247 243 243 243
[2 rows x 270000 columns], 0 1 2 3 ... 269996 269997 269998 269999
0 240 240 237 251 ... 247 243 243 243
1 240 240 237 251 ... 247 243 243 243
[2 rows x 270000 columns]]

Related

Filtering a labeled image by particle area

I have a labeled image of detected particles and a dataframe with the corresponding area of each labeled particle. What I want to do is filter out every particle on the image with an area smaller than a specified value.
I got it working with the example below, but I know there must be a smarter and especially faster way.
For example skipping the loop by comparing the image with the array.
Thanks for your help!
Example:
labels = df["label"][df.area > 5000].to_numpy()
mask = np.zeros(labeled_image.shape)
for label in labels:
mask[labeled_image == label] = 1
Dataframe:
label centroid-0 centroid-1 area
0 1 15 3681 191
1 2 13 1345 390
2 3 43 3746 885
3 4 32 3616 817
4 5 20 4250 137
... ... ... ...
3827 3828 4149 1620 130
3828 3829 4151 852 62
3829 3830 4155 330 236
3830 3831 4157 530 377
3831 3832 4159 3975 81
You can use isin to check equality to several labels. The resulting boolean array can be directly used as the mask after casting to the required type (e.g. int):
labels = df.loc[df.area.gt(5000), 'label']
mask = np.isin(labeled_image, labels).astype(int)

How to obtain the best result from pytesseract?

I'm trying to read text from an image, using OpenCV and Pytesseract, but with poor results.
The image I'm interested in reading the text is: https://www.lubecreostorepratolapeligna.it/gb/img/logo.png
This is the code I am using:
pytesseract.pytesseract.tesseract_cmd = r'D:\Program Files\pytesseract\tesseract.exe'
image = cv2.imread(path_to_image)
# converting image into gray scale image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grey image', gray_image)
cv2.waitKey(0)
# converting it to binary image by Thresholding
# this step is require if you have colored image because if you skip this part
# then tesseract won't able to detect text correctly and this will give incorrect result
threshold_img = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# display image
cv2.imshow('threshold image', threshold_img)
# Maintain output window until user presses a key
cv2.waitKey(0)
# Destroying present windows on screen
cv2.destroyAllWindows()
# now feeding image to tesseract
text = pytesseract.image_to_string(threshold_img)
print(text)
The result of the execution is : ["cu"," ","LUBE"," ","STORE","PRATOLA PELIGNA"]
But the result should be these 7 words: ["cucine", "LUBE", "CREO", "kitchens", "STORE", "PRATOLA", "PELIGNA"]
Is there anyone who could help me to solve this problem ?
Edit, 17.12.2020: Using preprocessing now it recognizes all, but the "O" in CREO. See the stages in ocr8.py. Then ocr9.py demonstrates (but not automated yet) finding the lines of text by the coordinates returned from pytesseract.image_to_boxes(), approcimate size of the letters and inter-symbol distance, then extrapolating one step ahead and searching for a single character (--psm 8).
It happened that Tesseract had actually recognized the "O" in CREO, but it read it as ♀, probably confused by the little "k" below etc.
Since it is a rare and "strange"/unexpected symbol, it could be corrected - replaced automatically (see the function Correct()).
There is a technical detail: Tesseract returns the ANSI/ASCII symbol 12, (0x0C) while the code in my editor was in Unicode/UTF-8 - 9792. So I coded it inside as chr(12).
The latest version: ocr9.py
You mentioned that PRATOLA and PELIGNA have to be given sepearately - just split by " ":
splitted = text.split(" ")
RECOGNIZED
CUCINE
LUBE
STORE
PRATOLA PELIGNA
CRE [+O with correction and extrapolation of the line]
KITCHENS
...
C 39 211 47 221 0
U 62 211 69 221 0
C 84 211 92 221 0
I 107 211 108 221 0
N 123 211 131 221 0
E 146 211 153 221 0
L 39 108 59 166 0
U 63 107 93 166 0
B 98 108 128 166 0
E 133 108 152 166 0
S 440 134 468 173 0
T 470 135 499 173 0
O 500 134 539 174 0
R 544 135 575 173 0
E 580 135 608 173 0
P 287 76 315 114 0
R 319 76 350 114 0
A 352 76 390 114 0
T 387 76 417 114 0
O 417 75 456 115 0
L 461 76 487 114 0
A 489 76 526 114 0
P 543 76 572 114 0
E 576 76 604 114 0
L 609 76 634 114 0
I 639 76 643 114 0
G 649 75 683 115 0
N 690 76 722 114 0
A 726 76 764 114 0
C 21 30 55 65 0
R 62 31 93 64 0
E 99 31 127 64 0
K 47 19 52 25 0
I 61 19 62 25 0
T 71 19 76 25 0
C 84 19 89 25 0
H 96 19 109 25 0
E 113 19 117 25 0
N 127 19 132 25 0
S 141 19 145 22 0
These are from getting "boxes".
Initial message:
I guess that for the area where "cucine" is, an adaptive threshold may segment it better or maybe applying some edge detection first.
Kitchens seems very small, what about trying to enlarge that area/distance.
For the CREO, I guess it's confused with the big and small size of adjacent captions.
For the "O" in creo, you may apply dilate in order to close the gap of the "O".
Edit: I played a bit, but without Tesseract and it needs more work. My goal was to make the letters more contrasting, may need some of these processings to be applied selectively only on the Cucine, maybe applying the recognition in two passes. When getting those partial words "Cu", apply adaptive threshold etc. (below) and OCR on a top rectangle around "CU..."
Binary Threshold:
Adaptive Threshold, Median blur (to clean noise) and invert:
Dilate connects small gaps, but it also destroys detail.
import cv2
import numpy as np
#pytesseract.pytesseract.tesseract_cmd = r'D:\Program Files\pytesseract\tesseract.exe'
path_to_image = "logo.png"
#path_to_image = "logo1.png"
image = cv2.imread(path_to_image)
h, w, _ = image.shape
w*=3; h*=3
w = (int)(w); h = (int) (h)
image = cv2.resize(image, (w,h), interpolation = cv2.INTER_AREA) #Resize 3 times
# converting image into gray scale image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('grey image', gray_image)
cv2.waitKey(0)
# converting it to binary image by Thresholding
# this step is require if you have colored image because if you skip this part
# then tesseract won't able to detect text correctly and this will give incorrect result
#threshold_img = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# display image
threshold_img = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,13,3) #cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11,2)[1]
cv2.imshow('threshold image', threshold_img)
cv2.waitKey(0)
#threshold_img = cv2.GaussianBlur(threshold_img,(3,3),0)
#threshold_img = cv2.GaussianBlur(threshold_img,(3,3),0)
threshold_img = cv2.medianBlur(threshold_img,5)
cv2.imshow('medianBlur', threshold_img)
cv2.waitKey(0)
threshold_img = cv2.bitwise_not(threshold_img)
cv2.imshow('Invert', threshold_img)
cv2.waitKey(0)
#kernel = np.ones((1, 1), np.uint8)
#threshold_img = cv2.dilate(threshold_img, kernel)
#cv2.imshow('Dilate', threshold_img)
#cv2.waitKey(0)
cv2.imshow('threshold image', thrfeshold_img)
# Maintain output window until user presses a key
cv2.waitKey(0)
# Destroying present windows on screen
cv2.destroyAllWindows()
# now feeding image to tesseract
text = pytesseract.image_to_string(threshold_img)
print(text)

Why is Python automatically removing negative values from image array?

I am trying to apply the below convolve method below on the cameraman image. The kernel applied to the image is a 3x3 filter populated with -1/9. I print the values of the cameraman image before applying the convolve method, and all I get are positive values. Next, when I apply the 3x3 negative kernel on the Image, I still get positive values when I print the values of the cameraman image after convolution.
The convolving function:
def convolve2d(image, kernel):
# This function which takes an image and a kernel
# and returns the convolution of them
# Args:
# image: a numpy array of size [image_height, image_width].
# kernel: a numpy array of size [kernel_height, kernel_width].
# Returns:
# a numpy array of size [image_height, image_width] (convolution output).
output = np.zeros_like(image) # convolution output
# Add zero padding to the input image
padding = int(len(kernel)/2)
image_padded=np.pad(image,((padding,padding),(padding,padding)),'constant')
for x in range(image.shape[1]): # Loop over every pixel of the image
for y in range(image.shape[0]):
# element-wise multiplication of the kernel and the image
output[y,x]=(kernel*image_padded[y:y+3,x:x+3]).sum()
return output
And here is the filter I am applying to the image:
filter2= [[-1/9,-1/9,-1/9],[-1/9,-1/9,-1/9],[-1/9,-1/9,-1/9]]
Finally, these are the intial values of the images, and the values after convolution respectively:
[[156 159 158 ... 151 152 152]
[160 154 157 ... 154 155 153]
[156 159 158 ... 151 152 152]
...
[114 132 123 ... 135 137 114]
[121 126 130 ... 133 130 113]
[121 126 130 ... 133 130 113]]
After convolution:
[[187 152 152 ... 154 155 188]
[152 99 99 ... 104 104 155]
[152 99 100 ... 103 103 154]
...
[175 133 131 ... 127 130 174]
[174 132 124 ... 125 130 175]
[202 173 164 ... 172 173 202]]
This is how I call the convolve2d method:
convolved_camManImage= convolve2d(camManImage,filter2)
This might be caused by how numpy dtypes work. As numpy.zeros_like's help says:
Return an array of zeros with the same shape and type as a given
array.
Thus your output might be dtype uint8, which use modulo arithmetics. To check if this is case add print(output.dtype) immediately after output = np.zeros_like(image) line

Speed up while loop matching pattern in array

I have the following data array, with 2 million entries:
[20965 1239 296 231 -1 -1 20976 1239 299 314 147 337
255 348 -1 -1 20978 1239 136 103 241 154 27 293
-1 -1 20984 1239 39 161 180 184 -1 -1 20990 1239
291 31 405 50 569 357 -1 -1 20997 1239 502 25
176 215 360 281 -1 -1 21004 1239 -1 -1 21010 1239
286 104 248 252 -1 -1 21017 1239 162 38 331 240
368 363 321 412 -1 -1 21024 1239 428 323 -1 -1
21030 1239 -1 -1 21037 1239 325 28 353 102 477 189
366 251 143 452 ... ect
This array contains x,y coordinates of photons on a CCD chip, I want to go through the array and add up all these photon events in a matrix with dimensions equal to the CCD chip.
The formatting is as follows: number number x0 y0 x1 y1 -1 -1. The two number entries I don't care too much about, the x0 y0 ect. is what I want to get out. The -1 entries is a delimiter indicating a new frame, after these there is always the 2 'number' entries again.
I have made this code, which does work:
i = 2
pixels = np.int32(data_height)*np.int32(data_width)
data = np.zeros(pixels).reshape(data_height, data_width)
while i < len(rdata):
x = rdata[i]
y = rdata[i+1]
if x != -1 and y != -1:
data[y,x] = data[y,x] + 1
i = i + 2
elif x == -1 and y == -1:
i = i + 4
else:
print "something is wrong"
print i
print x
print y
rdata is my orignal array. data is the resulting matrix which starts out with only zeroes. The while loop starts at the first x coord, at index 2 and then if it finds two consecutive -1 entries it will skip four entries.
The script works fine, but it does take 7 seconds to run. How can I speed up this script? I am a beginner with python, and from the hardest way to learn python I know that while loops should be avoided, but rewriting to a for loop is even slower!
for i in range(2, len(rdata), 2):
x = rdata[i]
y = rdata[i+1]
if x != -1 and y != -1:
px = rdata[i-2]
py = rdata[i-1]
if px != -1 and py != -1:
data[y,x] = data[y,x] + 1
Maybe someone can think of a faster method, something along the lines of np.argwhere(rdata == -1) and use this output to extract the locations of the x and y coordinates?
Update: Thanks for all answers!
I used askewchan's method to conserve frame information, however, as my data file is 300000 frames long I get a memory error when I try to generate a numpy array with dimensions (300000, 640, 480). I could get around this by making a generator object:
def bindata(splits, h, w, data):
f0=0
for i,f in enumerate(splits):
flat_rdata = np.ravel_multi_index(tuple(data[f0:f].T)[::-1], (h, w))
dataslice = np.zeros((w,h), dtype='h')
dataslice = np.bincount(flat_rdata, minlength=pixels).reshape(h, w)
f0 = f
yield dataslice
I then make a tif from the array using a modified version of Gohlke's tifffile.py to generate a tiff file from the data. It works fine, but I need to figure out a way to compress the data as the tiff file is >4gb (at this point the script crashes). I have very sparse arrays, 640*480 all zeros with some dozen ones per frame, the original data file is 4MB so some compression should be possible.
Sounds like all you want is to do some boolean indexing magic to get rid of the invalid frame stuff, and then of course add the pixels up.
rdata = rdata.reshape(-1, 2)
mask = (rdata != -1).all(1)
# remove every x, y pair that is after a pair with a -1.
mask[1:][mask[:-1] == False] = False
# remove first x, y pair
mask[0] = False
rdata = rdata[mask]
# Now need to use bincount, [::-1], since you use data[y,x]:
flat_rdata = np.ravel_multi_index(tuple(rdata.T)[::-1], (data_height, data_width))
res = np.bincount(flat_rdata, minlength=data_height * data_width)
res = res.reshape(data_height, data_width)
Use this to remove the -1s and numbers:
rdata = np.array("20965 1239 296 231 -1 -1 20976 1239 299 314 147 337 255 348 -1 -1 20978 1239 136 103 241 154 27 293 -1 -1 20984 1239 39 161 180 184 -1 -1 20990 1239 291 31 405 50 569 357 -1 -1 20997 1239 502 25 176 215 360 281 -1 -1 21004 1239 -1 -1 21010 1239 286 104 248 252 -1 -1 21017 1239 162 38 331 240 368 363 321 412 -1 -1 21024 1239 428 323 -1 -1 21030 1239 -1 -1 21037 1239 325 28 353 102 477 189 366 251 143 452".split(), dtype=int)
rdata = rdata.reshape(-1,2)
splits = np.where(np.all(rdata==-1, axis=1))[0]
nonxy = np.hstack((splits,splits+1))
data = np.delete(rdata, nonxy, axis=0)[1:]
Now, using part of #seberg's method to convert the x-y lists into arrays, you can make a 3D array where each 'layer' is a frame:
nf = splits.size + 1 # number of frames
splits -= 1 + 2*np.arange(nf-1) # account for missing `-1`s and `number`s
datastack = np.zeros((nf,h,w))
f0 = 0 # f0 = start of the frame
for i,f in enumerate(splits): # f = end of the frame
flat_data = np.ravel_multi_index(tuple(data[f0:f].T)[::-1], (h, w))
datastack[i] = np.bincount(flat_rdata, minlength=h*w).reshape(h, w)
f0 = f
Now, datastack[i] is a 2D array showing the ith frame of your data.
if x0, y0, x1, y1 != -1 could you not do something like filter(lambda a: a != -1, rdata) and then not bother with the ifs? that could speed your code up.

PPM file data cannot be recognized

I'm writing a simple picture editor. It uses PPM files. From what I can tell, I feel like my code should work. However, I get this error
Traceback (most recent call last):
File "/home/zach/Downloads/piceditor (1).py", line 84, in <module>
main()
File "/home/zach/Downloads/piceditor (1).py", line 69, in main
image = Image(Point(100,100), filename)
File "/home/zach/Downloads/graphics.py", line 770, in __init__
self.img = tk.PhotoImage(file=pixmap[0], master=_root)
File "/usr/lib/python3.1/tkinter/__init__.py", line 3272, in __init__
Image.__init__(self, 'photo', name, cnf, master, **kw)
File "/usr/lib/python3.1/tkinter/__init__.py", line 3228, in __init__
self.tk.call(('image', 'create', imgtype, name,) + options)
_tkinter.TclError: couldn't recognize data in image file "pig.ppm"
My code looks like this
def main():
print("Image Editor")
print()
filename = input("name of image file: ")
print()
with open(filename) as f:
formatind = f.readline()
width, height = [int(x) for x in f.readline().split()]
colordepth = f.readline()
array = []
for line in f:
array.append([int(x) for x in line.split()])
win = GraphWin("Image Editor!", width, height)
image = Image(Point(100,100), filename)
Display(image, array, width, height, win)
inf.close()
win.getMouse()
win.close()
main()
And my Display function looks like this
def Display(image, array, width, height, win):
for i in range(width):
for j in range(0, height, 3):
colors = color_rgb(array[i][j], array[i][j+1], array[i][j+2])
image.setPixel(i, j, colors)
image.draw(win)
return
This is the ppm file i'm using
P3
6 8
255
249 249 249 255 255 255 250 250 250 255 255 255 250 250 250 250 250 250 254 255 255 251 255 255
249 251 255 253 249 255 255 248 255 255 234 255 255 242 255 255 245 253 255 246 243 255 253 241
255 255 237 255 255 237 252 255 241 249 255 246 249 255 253 254 255 255 255 252 255 255 248 241
255 251 239 254 247 241 252 254 253 252 255 255 251 255 255 242 242 242 255 255 255 241 241 241
0 0 0 0 0 0 4 4 4 20 20 20 236 236 236 252 252 252 254 255 253 248 255 250
0 0 0 0 0 0 4 4 4 20 20 20 236 236 236 252 252 252 254 255 253 248 255 250
I cannot for the life of me figure out why it won't recognize the data in the file.
Any help would be great. Thanks
Why don't you use the PIL library? In the documents it claims that it can work with PPM files. However I am not familiar with working with PPM files with PIL.
Example: Opening a a PPM file, creating an object from the file that then can be used to edit the file.

Categories

Resources