Remove image background and extract the object in image - python

I know that there are many threads here about this issue, but I'm not able to solve my problem with those answers. I tried many times with different codes to remove the background of an image like this:
to this:
with the following code:
img2 = Image.open("foo.jpg")
c_red, c_green, c_blue = cv2.split(img2)
img2 = cv2.merge((c_red, c_green, c_blue, mask.astype('float32') / 255.0))
img.paste(img2,(0,0))
or with this code:
img2 = Image.open("foo.jpg")
img2 = img2.convert("RGBA")
datas = img2.getdata()
newData = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newData.append((255, 255, 255, 0))
else:
newData.append(item)
img2.putdata(newData)
or:
threshold=100
dist=5
img2 = Image.open("foo.jpg")
img2 = img2.convert("RGBA")
arr=np.array(np.asarray(img2))
r,g,b,a=np.rollaxis(arr,axis=-1)
mask=((r>threshold)
& (g>threshold)
& (b>threshold)
& (np.abs(r-g)<dist)
& (np.abs(r-b)<dist)
& (np.abs(g-b)<dist)
)
arr[mask,3]=0
img2=Image.fromarray(arr,mode='RGBA')
But none of them does not work. The thing I want to do is remove the background of any color (transparent) and change the borders of an image to its object's border as I showed above. Any help is appreciated.

Here's one way to do it using OpenCV. The idea is to obtain a binary image then use cv2.boundingRect to obtain the bounding rectangle coordinates. We can crop the image using Numpy slicing then add an alpha channel. Here's the results:
Input image
Binary image and region to extract
Extracted ROI
Code
import cv2
import numpy as np
# Load image, convert to grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Obtain bounding rectangle and extract ROI
x,y,w,h = cv2.boundingRect(thresh)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = original[y:y+h, x:x+w]
# Add alpha channel
b,g,r = cv2.split(ROI)
alpha = np.ones(b.shape, dtype=b.dtype) * 50
ROI = cv2.merge([b,g,r,alpha])
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()

Related

Preprocess images using OpenCV for pytesseract OCR

I want to use OCR (pytesseract) to recognize the text located in images like these:
I have thousands of these arrows. Until now the procedure is as follows: I first resize the image (for another process). Then I crop the image to get rid of the most part of the arrow. Next I draw a white rectangle as a frame to remove further noise but still have distance between text and image borders for better text recognition. I resize the image again to ensure a height of capital letters to ~30 px (https://groups.google.com/forum/#!msg/tesseract-ocr/Wdh_JJwnw94/24JHDYQbBQAJ). Finally I binarize the image with a threshold of 150.
Full code:
import cv2
image_file = '001.jpg'
# load the input image and grab the image dimensions
image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
(h_1, w_1) = image.shape[:2]
# resize the image and grab the new image dimensions
image = cv2.resize(image, (int(w_1*320/h_1), 320))
(h_1, w_1) = image.shape
# crop image
image_2 = image[70:h_1-70, 20:w_1-20]
# get image_2 height, width
(h_2, w_2) = image_2.shape
# draw white rectangle as a frame around the number -> remove noise
cv2.rectangle(image_2, (0, 0), (w_2, h_2), (255, 255, 255), 40)
# resize image, that capital letters are ~ 30 px in height
image_2 = cv2.resize(image_2, (int(w_2*50/h_2), 50))
# image binarization
ret, image_2 = cv2.threshold(image_2, 150, 255, cv2.THRESH_BINARY)
# save image to file
cv2.imwrite('processed_' + image_file, image_2)
# tesseract part can be commented out
import pytesseract
config_7 = ("-c tessedit_char_whitelist=0123456789AB --oem 1 --psm 7")
text = pytesseract.image_to_string(image_2, config=config_7)
print("OCR TEXT: " + "{}\n".format(text))
The problem is that the text located in the arrow is never centered. Sometimes I remove part of the text with the method described above (e.g. in image 50A).
Is there a method in image processing to get rid of the arrow in a more elegant way? For instance using contour detection and deletion? I am more interested in the OpenCV part than the tesseract part to recognize the text.
Any help is appreciated.
If you look at the pictures you will see that there is a white arrow in the image which is also the biggest contour (especially if you draw a black border on the image). If you make a blank mask and draw the arrow (biggest contour on the image) then erode it a little bit you can perform a per element bitwise conjunction of the actual image and eroded mask. If it is not clear look at the bottom code and comments and you will see that it is actually pretty simple.
# imports
import cv2
import numpy as np
img = cv2.imread("number.png") # read image
# you can resize the image here if you like - it should still work for both sizes
h, w = img.shape[:2] # get the actual images height and width
img = cv2.resize(img, (int(w*320/h), 320))
h, w = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # transform to grayscale
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] # perform OTSU threhold
cv2.rectangle(thresh, (0, 0), (w, h), (0, 0, 0), 2)
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0] # search for contours
max_cnt = max(contours, key=cv2.contourArea) # select biggest one
mask = np.zeros((h, w), dtype=np.uint8) # create a black mask
cv2.drawContours(mask, [max_cnt], -1, (255, 255, 255), -1) # draw biggest contour on the mask
kernel = np.ones((15, 15), dtype=np.uint8) # make a kernel with appropriate values - in both cases (resized and original) 15 is ok
erosion = cv2.erode(mask, kernel, iterations=1) # erode the mask with given kernel
reverse = cv2.bitwise_not(img.copy()) # reversed image of the actual image 0 becomes 255 and 255 becomes 0
img = cv2.bitwise_and(reverse, reverse, mask=erosion) # per-element bit-wise conjunction of the actual image and eroded mask (erosion)
img = cv2.bitwise_not(img) # revers the image again
# save image to file and display
cv2.imwrite("res.png", img)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
You can try simple Python script:
import cv2
import numpy as np
img = cv2.imread('mmubS.png', cv2.IMREAD_GRAYSCALE)
thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY_INV )[1]
im_flood_fill = thresh.copy()
h, w = thresh.shape[:2]
im_flood_fill=cv2.rectangle(im_flood_fill, (0,0), (w-1,h-1), 255, 2)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(im_flood_fill, mask, (0, 0), 0)
im_flood_fill = cv2.bitwise_not(im_flood_fill)
cv2.imshow('clear text', im_flood_fill)
cv2.imwrite('text.png', im_flood_fill)
Result:

opencv, python, how to read grouped text in boxes

I would like to get from the image in the groups that are on the image
I have managed to remove first contour (as described below), but issue is that when I try to read the text, I have some missing text, I expect that this is because of other contours that have stayed on the image, but while I try to remove them, I loose the grouping or part of text...
for i in range(len(contours)):
if 800 < cv2.contourArea(contours[i]) < 2000:
x, y, width, height = cv2.boundingRect(contours[i])
roi = img[y:y + height, x:x + width]
roi_h = roi.shape[0]
roi_w = roi.shape[1]
resize_roi = cv2.resize(roi,(int(roi_w*6),int(roi_h*6)), interpolation=cv2.INTER_LINEAR)
afterd = cv2.cvtColor(resize_roi, cv2.COLOR_BGR2GRAY)
retim, threshm = cv2.threshold(afterd, 210, 225, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contoursm, hierarchym = cv2.findContours(threshm, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones(resize_roi.shape[:2], dtype="uint8") * 255
for m in range(len(contoursm)):
if 10000 < cv2.contourArea(contoursm[m]) < 33000:
cv2.drawContours(mask, contoursm, m, 0, 7)
afterd = cv2.bitwise_not(afterd)
afterd = cv2.bitwise_and(afterd, afterd, mask=mask)
afterd = cv2.bitwise_not(afterd)
print(pytesseract.image_to_string(afterd, lang='eng', config='--psm 3'))
Instead of dealing with all the boxes, I suggest deleting them by finding connected components, and filling the large clusters with background color.
You may use the following stages:
Convert image to Grayscale, apply threshold, and invert polarity.
Delete all clusters having more than 100 pixels (assume letters are smaller).
Dilate thresh for uniting text areas to single "blocks".
Find contours on the dilated thresh image.
Find bounding rectangles, and apply OCR to the rectangle.
Here is the complete code sample:
import numpy as np
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # I am using Windows
img = cv2.imread('img.png') # Read input image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert to Grayscale.
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # Convert to binary and invert polarity
nlabel,labels,stats,centroids = cv2.connectedComponentsWithStats(thresh, connectivity=8)
thresh_size = 100
# Delete all lines by filling large clusters with zeros.
for i in range(1, nlabel):
if stats[i, cv2.CC_STAT_AREA] > thresh_size:
thresh[labels == i] = 0
# Dilate thresh for uniting text areas to single blocks.
dilated_thresh = cv2.dilate(thresh, np.ones((5,5)))
# Find contours on dilated thresh
contours, hierarchy = cv2.findContours(dilated_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Iterate contours, find bounding rectangles
for c in contours:
# Get bounding rectangle
x, y, w, h = cv2.boundingRect(c)
# Draw green rectangle for testing
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), thickness = 1)
# Get the slice with the text (slice with margins).
afterd = thresh[y-3:y+h+3, x-3:x+w+3]
# Show afterd as image for testing
# cv2.imshow('afterd', afterd)
# cv2.waitKey(100)
# The OCR works only when image is enlarged and black text?
resized_afterd = cv2.resize(afterd, (afterd.shape[1]*5, afterd.shape[0]*5), interpolation=cv2.INTER_LANCZOS4)
print(pytesseract.image_to_string(255 - resized_afterd, lang='eng', config='--psm 3'))
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result strings after OCR:
DF6DF645
RFFTW
2345
2277
AABBA
DF1267
ABCET5456
Input image with green boxes around the text:
Update:
Grouping contours:
For contours contours you may use the hierarchy result of cv2.findContours with cv2.RETR_TREE.
See Contours Hierarchy documentation.
You may use the parent-child relationship for grouping contours.
Here is an incomplete sample code for using the hierarchy:
img = cv2.imread('img.png') # Read input image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert to Grayscale.
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) # Convert to binary and invert polarity
nlabel,labels,stats,centroids = cv2.connectedComponentsWithStats(thresh, connectivity=8)
thresh_boxes = np.zeros_like(thresh)
thresh_size = 100
# Delete all lines by filling large clusters with zeros.
# Make new image that contains only boxes - without text
for i in range(1, nlabel):
if stats[i, cv2.CC_STAT_AREA] > thresh_size:
thresh[labels == i] = 0
thresh_boxes[labels == i] = 255
# Find contours on thresh_boxes, use cv2.RETR_TREE to build tree with hierarchy
contours, hierarchy = cv2.findContours(thresh_boxes, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Iterate contours, and hierarchy
for c, i in zip(contours, range(len(contours))):
h = hierarchy[0, i, :]
h_child = h[2]
# if contours has no child (last level)
if h_child == -1:
h_parent = h[3]
x, y, w, h = cv2.boundingRect(c)
cv2.putText(img, str(h_parent), (x+w//2-4, y+h//2+8), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 0, 255), thickness=2)
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Drawing bounding boxes with Pytesseract / OpenCV

I'm using pytesseract (0.3.2) with openCV (4.1.2) to identify digits in images. While image_to_string is working, image_to_data and image_to_boxes are not. I need to be able to draw the bounding boxes on the images and this has stumped me. I've tried different images, older versions of pytesseract, etc. I'm using Windows and Jupyter Notebooks.
import cv2
import pytesseract
#erosion
def erode(image):
kernel = np.ones((5,5),np.uint8)
return cv2.erode(image, kernel, iterations = 1)
#grayscale
def get_grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#thresholding
def thresholding(image):
#return cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
return cv2.threshold(image, 200, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.imread('my_image.jpg')
pytesseract.pytesseract.tesseract_cmd = r'C:\mypath\tesseract.exe'
gray = get_grayscale(img)
thresh = thresholding(gray)
erode = remove_noise(thresh)
custom_config = r'-c tessedit_char_whitelist=0123456789 --psm 6'
print(pytesseract.image_to_string(erode, config=custom_config))
cv2.imwrite("test.jpg", erode)
#these return nothing
print(pytesseract.image_to_boxes(Image.open('test.jpg')))
print(pytesseract.image_to_data(Image.open('test.jpg')))
Instead of using image_to_boxes, an alternative approach is to simply find contours with cv2.findContours, obtain the bounding rectangle coordinates with cv2.boundingRect, and draw the bounding box with cv2.rectangle
Using this sample input image
Drawn boxes
Result from OCR
1234567890
Code
import cv2
import pytesseract
import numpy as np
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Draw bounding boxes
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
# OCR
data = pytesseract.image_to_string(255 - thresh, lang='eng',config='--psm 6')
print(data)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.waitKey()
Please try the following code:
from pytesseract import Output
import pytesseract
import cv2
image = cv2.imread("my_image.jpg")
#swap color channel ordering from BGR (OpenCV’s default) to RGB (compatible with Tesseract and pytesseract).
# By default OpenCV stores images in BGR format and since pytesseract assumes RGB format,
# we need to convert from BGR to RGB format/mode:
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
pytesseract.pytesseract.tesseract_cmd = r'C:\mypath\tesseract.exe'
custom_config = r'-c tessedit_char_whitelist=0123456789 --psm 6'
results = pytesseract.image_to_data(rgb, output_type=Output.DICT,lang='eng',config=custom_config)
boxresults = pytesseract.image_to_boxes(rgb,output_type=Output.DICT,lang='eng',config=custom_config)
print(results)
print(boxresults)
for i in range(0, len(results["text"])):
# extract the bounding box coordinates of the text region from the current result
tmp_tl_x = results["left"][i]
tmp_tl_y = results["top"][i]
tmp_br_x = tmp_tl_x + results["width"][i]
tmp_br_y = tmp_tl_y + results["height"][i]
tmp_level = results["level"][i]
conf = results["conf"][i]
text = results["text"][i]
if(tmp_level == 5):
cv2.putText(image, text, (tmp_tl_x, tmp_tl_y - 10), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 0, 255), 1)
cv2.rectangle(image, (tmp_tl_x, tmp_tl_y), (tmp_br_x, tmp_br_y), (0, 0, 255), 1)
for j in range(0,len(boxresults["left"])):
left = boxresults["left"][j]
bottom = boxresults["bottom"][j]
right = boxresults["right"][j]
top = boxresults["top"][j]
cv2.rectangle(image, (left, top), (right, bottom), (255, 0, 0), 1)
cv2.imshow("image",image)
cv2.waitKey(0)

Recognize single characters on a page with Tesseract

this image returns empty string;
basically I am trying to make a bot for WOW game, but I am really new to this OCR thing. I cannot make tesseract to read this image; I want an unordered list of characters and if possible coordinates of each square containing them. Is there anyway to do this?
Thank you for your time!
here is my code:
from PIL import Image
import cv2
from pytesseract import image_to_string
column = Image.open('photo.png')
gray = column.convert('L')
blackwhite = gray.point(lambda x: 255 if x < 200 else 0, '1')
blackwhite.save("code_bw.jpg")
print(image_to_string(cv2.imread("code_bw.jpg")))
You need to do some preprocessing to isolate the text characters. A simple approach is to Otsu's threshold to obtain a binary image then we can find contours and filter using aspect ratio + contour area. This will give us the bounding box coordinates of the text where we can draw this onto a mask. We bitwise-and the mask with the input image to get our cleaned image then throw it into OCR. Here's the result:
Detected text characters
Result
Result from OCR
A
A R
P
Code
import cv2
import pytesseract
import numpy as np
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Find contours and filter using aspect ratio and area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
ar = w / float(h)
if area > 1000 and ar > .85 and ar < 1.2:
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), -1)
ROI = original[y:y+h, x:x+w]
# Bitwise-and to isolate characters
result = cv2.bitwise_and(original, mask)
result[mask==0] = 255
# OCR
data = pytesseract.image_to_string(result, lang='eng',config='--psm 6')
print(data)
cv2.imshow('image', image)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey()

Find area with content and get its bouding rect

I'm using OpenCV 4 - python 3 - to find an specific area in a black & white image.
This area is not a 100% filled shape. It may hame some gaps between the white lines.
This is the base image from where I start processing:
This is the rectangle I expect - made with photoshop -:
Results I got with hough transform lines - not accurate -
So basically, I start from the first image and I expect to find what you see in the second one.
Any idea of how to get the rectangle of the second image?
I'd like to present an approach which might be computationally less expensive than the solution in fmw42's answer only using NumPy's nonzero function. Basically, all non-zero indices for both axes are found, and then the minima and maxima are obtained. Since we have binary images here, this approach works pretty well.
Let's have a look at the following code:
import cv2
import numpy as np
# Read image as grayscale; threshold to get rid of artifacts
_, img = cv2.threshold(cv2.imread('images/LXSsV.png', cv2.IMREAD_GRAYSCALE), 0, 255, cv2.THRESH_BINARY)
# Get indices of all non-zero elements
nz = np.nonzero(img)
# Find minimum and maximum x and y indices
y_min = np.min(nz[0])
y_max = np.max(nz[0])
x_min = np.min(nz[1])
x_max = np.max(nz[1])
# Create some output
output = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.rectangle(output, (x_min, y_min), (x_max, y_max), (0, 0, 255), 2)
# Show results
cv2.imshow('img', img)
cv2.imshow('output', output)
cv2.waitKey(0)
cv2.destroyAllWindows()
I borrowed the cropped image from fmw42's answer as input, and my output should be the same (or most similar):
Hope that (also) helps!
In Python/OpenCV, you can use morphology to connect all the white parts of your image and then get the outer contour. Note I have modified your image to remove the parts at the top and bottom from your screen snap.
import cv2
import numpy as np
# read image as grayscale
img = cv2.imread('blackbox.png')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
_,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)
# apply close to connect the white areas
kernel = np.ones((75,75), np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get contours (presumably just one around the outside)
result = img.copy()
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
x,y,w,h = cv2.boundingRect(cntr)
cv2.rectangle(result, (x, y), (x+w, y+h), (0, 0, 255), 2)
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.imshow("Bounding Box", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save resulting images
cv2.imwrite('blackbox_thresh.png',thresh)
cv2.imwrite('blackbox_result.png',result)
Input:
Image after morphology:
Result:
Here's a slight modification to #fmw42's answer. The idea is connect the desired regions into a single contour is very similar however you can find the bounding rectangle directly since there's only one object. Using the same cropped input image, here's the result.
We can optionally extract the ROI too
import cv2
# Grayscale, threshold, and dilate
image = cv2.imread('3.png')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Connect into a single contour and find rect
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=1)
x,y,w,h = cv2.boundingRect(dilate)
ROI = original[y:y+h,x:x+w]
cv2.rectangle(image, (x, y), (x+w, y+h), (36, 255, 12), 2)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()

Categories

Resources