Image dimensions swapped after sending through websocket - python

I'm making an image viewer interface for a camera I have. The backend is written in python and works like this:
Acquire image as numpy array.
Convert numpy array to jpeg.
Convert jpeg to base64 string.
Send string over websocket.
def image_to_bytes(image):
print('into: ', image.shape)
buf = cv2.imencode('.jpg', image)[1]
dec = cv2.imdecode(buf, cv2.IMREAD_COLOR)
print('outa: ', dec.shape)
return base64.b64encode(buf).decode('utf-8')
async def send_image(websocket: WebSocket):
cam = Camera()
for im in cam:
w, h = im.shape[:2]
resized = cv2.resize(im, (w // 4, h // 4), interpolation=cv2.INTER_LINEAR)
await websocket.send_bytes(image_to_bytes(resized))
However when the javascript frontend receives the image the dimensions are swapped which distorts the image.
socket.onmessage = function(event) {
let im = new Image();
const buf = event.data;
im.src = 'data:image/jpeg;base64,' + buf;
im.onload = function() {
context.drawImage(im, 0, 0);
console.log('w=' + im.width + ', h=' + im.height);
};
};
I know that the dimensions are swapped because I checked the dimensions before I encoded the image. Then I decoded it again to make sure the ecoding process didn't swap width and height. Finally I check the dimension on the JS side and width and height are reversed.
Any idea why the dimensions are getting swapped?

From the documentation of OpenCV:
The shape of an image is accessed by img.shape. It returns a tuple of the number of rows, columns, and channels (if the image is color):
Hence instead of w, h = im.shape[:2] you need h, w = im.shape[:2].

Related

Numpy array to and from ModernGL buffer (open and save with cv2)

I am wanting to:
Open the texture from an image via cv2 instead of via ModernGL's load_texture_2d method.
Save the resulting image (in the write method) via cv2 rather than Pillow.
I currently have the following code:
from pathlib import Path
from array import array
import cv2
import numpy as np
from PIL import Image
import moderngl
import moderngl_window
class ImageProcessing(moderngl_window.WindowConfig):
window_size = 3840 // 2, 2160 // 2
resource_dir = Path(__file__).parent.resolve()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.image_processing = ImageTransformer(self.ctx, self.window_size)
# Not working:
#img = cv2.imread("test6.png")
#self.texture = img.astype('f4')
self.texture = self.load_texture_2d("test6.png")
def render(self, time, frame_time):
# View in Window
self.image_processing.render(self.texture, target=self.ctx.screen)
# Headless
self.image_processing.render(self.texture)
self.image_processing.write("output.png")
class ImageTransformer:
def __init__(self, ctx, size, program=None):
self.ctx = ctx
self.size = size
self.program = None
self.fbo = self.ctx.framebuffer(
color_attachments=[self.ctx.texture(self.size, 4)]
)
# Create some default program if needed
if not program:
self.program = self.ctx.program(
vertex_shader="""
#version 330
in vec2 in_position;
in vec2 in_uv;
out vec2 uv;
void main() {
gl_Position = vec4(in_position, 0.0, 1.0);
uv = in_uv;
}
""",
fragment_shader = """
#version 330
uniform sampler2D image;
in vec2 uv;
out vec4 out_color;
void main() {
vec4 color = texture(image, uv);
// do something with color here
out_color = vec4(color.r, 0, 0, color.a);
}
""",
)
# Fullscreen quad in NDC
self.vertices = self.ctx.buffer(
array(
'f',
[
# Triangle strip creating a fullscreen quad
# x, y, u, v
-1, 1, 0, 1, # upper left
-1, -1, 0, 0, # lower left
1, 1, 1, 1, # upper right
1, -1, 1, 0, # lower right
]
)
)
self.quad = self.ctx.vertex_array(
self.program,
[
(self.vertices, '2f 2f', 'in_position', 'in_uv'),
]
)
def render(self, texture, target=None):
if target:
target.use()
else:
self.fbo.use()
texture.use(0)
self.quad.render(mode=moderngl.TRIANGLE_STRIP)
def write(self, name):
# This doesn't work:
raw = self.fbo.read(components=4, dtype='f4')
buf = np.frombuffer(raw, dtype='f4')
cv2.imwrite("OUTPUT_IMAGE.png", buf)
# But this does:
## image = Image.frombytes("RGBA", self.fbo.size, self.fbo.read())
## image = image.transpose(Image.FLIP_TOP_BOTTOM)
## image.save(name, format="png")
if __name__ == "__main__":
ImageProcessing.run()
Currently, when the code is run as-is, no image is saved whatsoever. The window just hangs and nothing happens. I am not sure if I have something wrong in my code or if the datatypes are wrong.
The pillow code (if you uncomment it) works to save it, but please note: While I could convert to a numpy array from Pillow, I would prefer not to in my use-case.
Clarification: The window loads and shows the image result just fine, but doesn't save correctly in the write method.
There is som code missing in your application
The method load_texture_2d creates a moderngl.Texture object. Hence the method loads the file, creates a texture object and loads the texture image from the CPU to the GPU.
cv2.imread just load the image file to a NumPy array, but it doesn't create a moderngl.Texture object.
You have to generate a moderngl.Texture object from the NumPy array:
img = cv2.imread("test6.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # optional
img = np.flip(img, 0).copy(order='C') # optional
self.texture = self.ctx.texture(img.shape[1::-1], img.shape[2], img)
Before writing the buffer into an image, you must reshape the NumPy array according to the image format. For instance:
raw = self.fbo.read(components=4, dtype='f1')
buf = np.frombuffer(raw, dtype='uint8').reshape((*self.fbo.size[1::-1], 4))
cv2.imwrite("OUTPUT_IMAGE.png", buf)
can you tell, in the write() method, what buf.shape is? I think it's a 1-d array at that point and it probably is height * width * 4 elements long.
imwrite() needs it shaped right. try this before imwrite():
buf.shape = (self.size[1], self.size[0], 4)
that should reshape the data as (height, width, 4) and then the imwrite() should accept it.

'NoneType' object has no attribute 'clip' error in ```cv2_imshow()```

I was trying to create a program which could detect my face expression(from webcam).
However, while displaying my face, I get the following error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-47-e0549b59dd89> in <module>()
47 print("\n\n")
48
---> 49 cv2_imshow(frame)
50 if cv2.waitKey(1) & 0xFF == ord('q'):
51 break
/usr/local/lib/python3.6/dist-packages/google/colab/patches/__init__.py in cv2_imshow(a)
20 image.
21 """
---> 22 a = a.clip(0, 255).astype('uint8')
23 # cv2 stores colors as BGR; convert to RGB
24 if a.ndim == 3:
AttributeError: 'NoneType' object has no attribute 'clip'
I am using Python 3.6 on Google Colab.
I am using cv2_imshow() from Google patches, since Colab does not support cv2.imshow()
Here is my code:
from google.colab.patches import cv2_imshow
from keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
face_classifier = cv2.CascadeClassifier('/content/drive/My Drive/Colab Notebooks/haarcascade_frontalface_default.xml')
classifier = load_model('/content/drive/My Drive/Colab Notebooks/fer_68acc.h5')
class_labels = ['Angry','Happy','Neutral','Sad','Surprise']
cap = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = cap.read()
labels = []
gray = cv2.imread(frame, cv2.IMREAD_GRAYSCALE)
faces = face_classifier.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi,axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
print("\nprediction = ",preds)
label=class_labels[preds.argmax()]
print("\nprediction max = ",preds.argmax())
print("\nlabel = ",label)
label_position = (x,y)
cv2.putText(frame,label,label_position,cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)
else:
cv2.putText(frame,'No Face Found',(20,60),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)
print("\n\n")
cv2_imshow(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Could someone please help?
Unfortunately, I can not run this on my local machine, so it would be helpful if someone gave a solution which can be run on Google Colab.
Thanks
Does the following give you a non-zero size:
print(frame.shape)
If not, then the image is not loading properly. Nonetype means there is nothing stored in the variable called frame
I also faced the same problem. I wanted to detect some objects using yolov4 with my webcam. Then I found (The AI Guy).
He uses the code snippet for Camera Capture, which runs JavaScript code to utilize his computer's webcam. The code snippet will take a webcam photo, which he would then pass into his YOLOv4 model for object detection.
Below is a helper function to take the webcam picture using JavaScript and then run YOLOv4.
Note:- These three single Apostrophe (''') after "js = Javascript("
act as a comment in StackOverflow but in collab code shell it will work
as a code.
Taking photos by webcam:
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
# get photo data
data = eval_js('takePhoto({})'.format(quality))
# get OpenCV format image
img = js_to_image(data)
# call our darknet helper on webcam image
detections, width_ratio, height_ratio = darknet_helper(img, width, height)
# loop through detections and draw them on webcam image
for label, confidence, bbox in detections:
left, top, right, bottom = bbox2points(bbox)
left, top, right, bottom = int(left * width_ratio), int(top * height_ratio), int(right * width_ratio), int(bottom * height_ratio)
cv2.rectangle(img, (left, top), (right, bottom), class_colors[label], 2)
cv2.putText(img, "{} [{:.2f}]".format(label, float(confidence)),
(left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
class_colors[label], 2)
# save image
cv2.imwrite(filename, img)
return filename
try:
filename = take_photo('photo.jpg')
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
Below is another helper function to start up the video stream using similar JavaScript as was used for images. The video stream frames are fed as input to YOLOv4.
# JavaScript to properly create our live video stream using our webcam as input
def video_stream():
js = Javascript('''
var video;
var div = null;
var stream;
var captureCanvas;
var imgElement;
var labelElement;
var pendingResolve = null;
var shutdown = false;
function removeDom() {
stream.getVideoTracks()[0].stop();
video.remove();
div.remove();
video = null;
div = null;
stream = null;
imgElement = null;
captureCanvas = null;
labelElement = null;
}
function onAnimationFrame() {
if (!shutdown) {
window.requestAnimationFrame(onAnimationFrame);
}
if (pendingResolve) {
var result = "";
if (!shutdown) {
captureCanvas.getContext('2d').drawImage(video, 0, 0, 640, 480);
result = captureCanvas.toDataURL('image/jpeg', 0.8)
}
var lp = pendingResolve;
pendingResolve = null;
lp(result);
}
}
async function createDom() {
if (div !== null) {
return stream;
}
div = document.createElement('div');
div.style.border = '2px solid black';
div.style.padding = '3px';
div.style.width = '100%';
div.style.maxWidth = '600px';
document.body.appendChild(div);
const modelOut = document.createElement('div');
modelOut.innerHTML = "<span>Status:</span>";
labelElement = document.createElement('span');
labelElement.innerText = 'No data';
labelElement.style.fontWeight = 'bold';
modelOut.appendChild(labelElement);
div.appendChild(modelOut);
video = document.createElement('video');
video.style.display = 'block';
video.width = div.clientWidth - 6;
video.setAttribute('playsinline', '');
video.onclick = () => { shutdown = true; };
stream = await navigator.mediaDevices.getUserMedia(
{video: { facingMode: "environment"}});
div.appendChild(video);
imgElement = document.createElement('img');
imgElement.style.position = 'absolute';
imgElement.style.zIndex = 1;
imgElement.onclick = () => { shutdown = true; };
div.appendChild(imgElement);
const instruction = document.createElement('div');
instruction.innerHTML =
'<span style="color: red; font-weight: bold;">' +
'When finished, click here or on the video to stop this demo</span>';
div.appendChild(instruction);
instruction.onclick = () => { shutdown = true; };
video.srcObject = stream;
await video.play();
captureCanvas = document.createElement('canvas');
captureCanvas.width = 640; //video.videoWidth;
captureCanvas.height = 480; //video.videoHeight;
window.requestAnimationFrame(onAnimationFrame);
return stream;
}
async function stream_frame(label, imgData) {
if (shutdown) {
removeDom();
shutdown = false;
return '';
}
var preCreate = Date.now();
stream = await createDom();
var preShow = Date.now();
if (label != "") {
labelElement.innerHTML = label;
}
if (imgData != "") {
var videoRect = video.getClientRects()[0];
imgElement.style.top = videoRect.top + "px";
imgElement.style.left = videoRect.left + "px";
imgElement.style.width = videoRect.width + "px";
imgElement.style.height = videoRect.height + "px";
imgElement.src = imgData;
}
var preCapture = Date.now();
var result = await new Promise(function(resolve, reject) {
pendingResolve = resolve;
});
shutdown = false;
return {'create': preShow - preCreate,
'show': preCapture - preShow,
'capture': Date.now() - preCapture,
'img': result};
}
''')
display(js)
def video_frame(label, bbox):
data = eval_js('stream_frame("{}", "{}")'.format(label, bbox))
return data
# start streaming video from webcam
video_stream()
# label for video
label_html = 'Capturing...'
# initialze bounding box to empty
bbox = ''
count = 0
while True:
js_reply = video_frame(label_html, bbox)
if not js_reply:
break
# convert JS response to OpenCV Image
frame = js_to_image(js_reply["img"])
# create transparent overlay for bounding box
bbox_array = np.zeros([480,640,4], dtype=np.uint8)
# call our darknet helper on video frame
detections, width_ratio, height_ratio = darknet_helper(frame, width, height)
# loop through detections and draw them on transparent overlay image
for label, confidence, bbox in detections:
left, top, right, bottom = bbox2points(bbox)
left, top, right, bottom = int(left * width_ratio), int(top * height_ratio), int(right * width_ratio), int(bottom * height_ratio)
bbox_array = cv2.rectangle(bbox_array, (left, top), (right, bottom), class_colors[label], 2)
bbox_array = cv2.putText(bbox_array, "{} [{:.2f}]".format(label, float(confidence)),
(left, top - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
class_colors[label], 2)
bbox_array[:,:,3] = (bbox_array.max(axis = 2) > 0 ).astype(int) * 255
# convert overlay of bbox into bytes
bbox_bytes = bbox_to_bytes(bbox_array)
# update bbox so next frame gets new overlay
bbox = bbox_bytes
If you don't get any benefit from the above code then please check my
collab where I execute this code successfully.
collab link
Use below two functions from the opencv-python package in Google colab:
Import cv2 and cv2_imshow from google.colab.patches as below:
import cv2
from google.colab.patches import cv2_imshow
Read image using cv2 and display same using cv2_imshow as below:
img = cv2.imread('Folder_Name/Img.jpg')
cv2_imshow(img)

Grayscale Image crop and conversion to QPixmap [duplicate]

Suppose I am taking an image from the webcam using opencv.
_, img = self.cap.read() # numpy.ndarray (480, 640, 3)
Then I create a QImage qimg using img:
qimg = QImage(
data=img,
width=img.shape[1],
height=img.shape[0],
bytesPerLine=img.strides[0],
format=QImage.Format_Indexed8)
But it gives an error saying that:
TypeError: 'data' is an unknown keyword argument
But said in this documentation, the constructor should have an argument named data.
I am using anaconda environment to run this project.
opencv version = 3.1.4
pyqt version = 5.9.2
numpy version = 1.15.0
What they are indicating is that the data is required as a parameter, not that the keyword is called data, the following method makes the conversion of a numpy/opencv image to QImage:
from PyQt5.QtGui import QImage, qRgb
import numpy as np
import cv2
gray_color_table = [qRgb(i, i, i) for i in range(256)]
def NumpyToQImage(im):
qim = QImage()
if im is None:
return qim
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
elif len(im.shape) == 3:
if im.shape[2] == 3:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGB888)
elif im.shape[2] == 4:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_ARGB32)
return qim
img = cv2.imread('/path/of/image')
qimg = NumpyToQImage(img)
assert(not qimg.isNull())
or you can use the qimage2ndarray library
When using the indexes to crop the image is only modifying the shape but not the data, the solution is to make a copy
img = cv2.imread('/path/of/image')
img = np.copy(img[200:500, 300:500, :]) # copy image
qimg = NumpyToQImage(img)
assert(not qimg.isNull())
I suspect it is erroring out with TypeError: 'data' is an unknown keyword argument because that is the first argument that it encounters.
The linked class reference is for PyQt4, for PyQt5 it links to C++ documentation at https://doc.qt.io/qt-5/qimage.html, but the similarities are clear.
PyQt4:
QImage.__init__ (self, bytes data, int width, int height, int bytesPerLine, Format format)
Constructs an image with the given width, height and format, that uses an existing memory buffer, data. The width and height must be specified in pixels. bytesPerLine specifies the number of bytes per line (stride).
PyQt5 (C++):
QImage(const uchar *data, int width, int height, int bytesPerLine, QImage::Format format, QImageCleanupFunction cleanupFunction = nullptr, void *cleanupInfo = nullptr)
Constructs an image with the given width, height and format, that uses an existing memory buffer, data. The width and height must be specified in pixels. bytesPerLine specifies the number of bytes per line (stride).
Per the examples at https://www.programcreek.com/python/example/106694/PyQt5.QtGui.QImage, you might try
qimg = QImage(img, img.shape[1], img.shape[0], img.strides[0], QImage.Format_Indexed8)
(without the data=, width=, etc)

`QImage` constructor has unknown keyword `data`

Suppose I am taking an image from the webcam using opencv.
_, img = self.cap.read() # numpy.ndarray (480, 640, 3)
Then I create a QImage qimg using img:
qimg = QImage(
data=img,
width=img.shape[1],
height=img.shape[0],
bytesPerLine=img.strides[0],
format=QImage.Format_Indexed8)
But it gives an error saying that:
TypeError: 'data' is an unknown keyword argument
But said in this documentation, the constructor should have an argument named data.
I am using anaconda environment to run this project.
opencv version = 3.1.4
pyqt version = 5.9.2
numpy version = 1.15.0
What they are indicating is that the data is required as a parameter, not that the keyword is called data, the following method makes the conversion of a numpy/opencv image to QImage:
from PyQt5.QtGui import QImage, qRgb
import numpy as np
import cv2
gray_color_table = [qRgb(i, i, i) for i in range(256)]
def NumpyToQImage(im):
qim = QImage()
if im is None:
return qim
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
elif len(im.shape) == 3:
if im.shape[2] == 3:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGB888)
elif im.shape[2] == 4:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_ARGB32)
return qim
img = cv2.imread('/path/of/image')
qimg = NumpyToQImage(img)
assert(not qimg.isNull())
or you can use the qimage2ndarray library
When using the indexes to crop the image is only modifying the shape but not the data, the solution is to make a copy
img = cv2.imread('/path/of/image')
img = np.copy(img[200:500, 300:500, :]) # copy image
qimg = NumpyToQImage(img)
assert(not qimg.isNull())
I suspect it is erroring out with TypeError: 'data' is an unknown keyword argument because that is the first argument that it encounters.
The linked class reference is for PyQt4, for PyQt5 it links to C++ documentation at https://doc.qt.io/qt-5/qimage.html, but the similarities are clear.
PyQt4:
QImage.__init__ (self, bytes data, int width, int height, int bytesPerLine, Format format)
Constructs an image with the given width, height and format, that uses an existing memory buffer, data. The width and height must be specified in pixels. bytesPerLine specifies the number of bytes per line (stride).
PyQt5 (C++):
QImage(const uchar *data, int width, int height, int bytesPerLine, QImage::Format format, QImageCleanupFunction cleanupFunction = nullptr, void *cleanupInfo = nullptr)
Constructs an image with the given width, height and format, that uses an existing memory buffer, data. The width and height must be specified in pixels. bytesPerLine specifies the number of bytes per line (stride).
Per the examples at https://www.programcreek.com/python/example/106694/PyQt5.QtGui.QImage, you might try
qimg = QImage(img, img.shape[1], img.shape[0], img.strides[0], QImage.Format_Indexed8)
(without the data=, width=, etc)

Converting RGBA Images in a folder and save it to another folder in '.pgm' format

I have a group of RGBA images saved in a folder, my goal is to convert these images into another folder in a pgm format, below is the code:
path1 = file/path/where/image/are/stored
path2 = file/path/where/pgm/images/will/be/saved
list = os.listdir(path1)
for file in listing:
#Transforms an RGBA with channel into an RGB only
image_rgb = Image.open(file).convert('RGB')
#Color separation stains to detect microscopic cells
ihc_hed = rgb2hed(image_rgb)
#Trasnforms the image into a numpy array of the UINT8 Type
cv_img = ihc_hed.astype(np.uint8)
# create color boundaries boundaries detecting black and blue stains
lower = np.array([0,0,0], dtype = "uint8")
upper = np.array([0,0,255], dtype = "uint8")
#calculates the pixel within the specified boundaries and create a mask
mask = cv2.inRange(cv_img, lower, upper)
img = Image.fromarray(mask,'L')
img.save(path2+file,'pgm')
however I get an error stating KeyError: 'PGM', it seems that the 'pgm' format is not in the modes
Thanks for the advice :)
As far as I can see scikit image uses the Python Imaging Library plugin for saving image files. PIL does not support PGM.
Refer to http://effbot.org/imagingbook/decoder.htm for how to write your own file decoder for PIL.
Extract:
import Image, ImageFile
import string
class SpamImageFile(ImageFile.ImageFile):
format = "SPAM"
format_description = "Spam raster image"
def _open(self):
# check header
header = self.fp.read(128)
if header[:4] != "SPAM":
raise SyntaxError, "not a SPAM file"
header = string.split(header)
# size in pixels (width, height)
self.size = int(header[1]), int(header[2])
# mode setting
bits = int(header[3])
if bits == 1:
self.mode = "1"
elif bits == 8:
self.mode = "L"
elif bits == 24:
self.mode = "RGB"
else:
raise SyntaxError, "unknown number of bits"
# data descriptor
self.tile = [
("raw", (0, 0) + self.size, 128, (self.mode, 0, 1))
]
Image.register_open("SPAM", SpamImageFile)
Image.register_extension("SPAM", ".spam")
Image.register_extension("SPAM", ".spa") # dos version

Categories

Resources