Problem statement:
fundus image, i need to suppress the blood vessels so that they do not appear on the image for the classifier being used.
There were a few thoughts; but easiest approach was to segment out a rough outline of the vasculature. This is achieved.
Next was to get the colour for the surround area and use it to blend it to the white area found; then merge it back to the original image.
Any suggestions for identifying the colour and blending it in. then merging it back to the original image so that the vessels are not visible. One thought was to do a contour of the each white area and find the corresponding colour outside and use it to blend it back.
Any alternate approach is also welcome.
eyepac image - vessels are visible; want to make it the colour of its surrounding so that they are not visible
Here is the code:
def apply_threshold_with_denoising(image):
image = cv2.adaptiveThreshold(image, 250, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
image = cv2.fastNlMeansDenoising(image, 1.5, 5, 5)
return image
def delete_small_components(image, size):
_, blackAndWhite = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(blackAndWhite, None, None, None, 8, cv2.CV_32S)
sizes = stats[1:, -1] # get CC_STAT_AREA component
image = np.zeros(labels.shape, np.uint8)
for i in range(0, nlabels - 1):
if sizes[i] >= 150: # filter small dotted regions
image[labels == i + 1] = 255
return cv2.bitwise_not(image)
def kernel(num1, num2):
return np.ones((num1, num2), np.uint8)
def resize(img):
scale_percent = 50 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
def get_large_vessels(image):
struct_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 4))
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, struct_kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 250:
cv2.drawContours(opening, [c], -1, (0, 0, 0), -1)
return opening
def get_small_vessels(both, large):
large = cv2.dilate(large, kernel(3, 3), iterations=5)
subtract = cv2.subtract(both, large)
return subtract
def remove_background(image, mask):
image = cv2.bitwise_and(cv2.bitwise_not(image), cv2.bitwise_not(image), mask=mask)
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel(2, 2))
return image
img = cv2.imread('2001_left.jpeg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_b = np.array([0, 0, 30])
u_b = np.array([255, 255, 255])
mask = cv2.inRange(hsv, l_b, u_b)
mask = cv2.erode(mask, kernel(2, 2), iterations=5)
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = apply_threshold_with_denoising(grayscale)
kernel22 = cv2.dilate(threshold, kernel(2, 2), iterations=2)
remove_small1kernel22 = delete_small_components(kernel22, 5)
remove_small1kernel22)
dilation = cv2.dilate(threshold, kernel(2, 1), iterations=2)
remove_small1 = delete_small_components(dilation, 150)
dilation = cv2.dilate(threshold, kernel(1, 2), iterations=2)
remove_small2 = delete_small_components(dilation, 150)
merge = cv2.addWeighted(remove_small1, 0.5, remove_small2, 0.5, 0)
threshold_merge = apply_threshold_with_denoising(merge)
remove_small3 = delete_small_components(threshold_merge, 150)
large_vessels = get_large_vessels(remove_background(remove_small3, mask))
cv2.imshow('Large blood vasculature', large_vessels)
small_vessels = get_small_vessels(remove_background(remove_small3, mask), large_vessels)
cv2.imshow('small vasculature', small_vessels)
I tried the above code.
It produces the vessels - small and large.
I need a way to use this map to recolour using a mean colour value outside each contour such that it blends in when put back.
Is there a way to just identify the segment - colour it and then merge back.
Related
I'm trying to get the watermelon from the image. I have tried doing hsv segmentation and grabcut but it doesn't give me the output I wanted. How can I get the watermelon only? Any method will do except for Neural Networks since I'm new to image processing.
# hsv segmentation
lb = (20, 50, 0)
ub = (100, 255, 255)
mask_hsv = cv.inRange(hsv, lb, ub)
image_copy = image.copy()
morph = cv.erode(mask_hsv, (3, 3), iterations=4)
output = cv.bitwise_and(image_copy, image_copy, mask=morph)
After I use hsv I find the largest contour in the image.
draw = image.copy()
contours, h = cv.findContours(morph, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
if contours != 0:
for contour in contours:
area = cv.contourArea(contour)
if max_area < area:
max_area = area
cnt = contour
# else:
# pass
else:
print('No contours found!')
Then I find the boundingRect of the biggest contour and feed it to grabcut function
x, y, h, w = cv.boundingRect(cnt)
rect = (x, y, h, w)
output_rect = image.copy()
mask = np.ones(image.shape[:2], dtype=np.uint8) * cv.GC_PR_BGD
bgdModel = np.zeros((1, 65), dtype=np.float64)
fgdModel = np.zeros((1, 65), dtype=np.float64)
# performs grabCut
cv.grabCut(output_rect, mask, rect, bgdModel, fgdModel, 100, cv.GC_INIT_WITH_RECT)
mask = np.where((mask == 2)|(mask == 0), 0, 1).astype('uint8')
mask *= 255
# applying the genrated mask to the image
output_image = cv.bitwise_and(output_rect, output_rect, mask=mask)
# change black pixels to white
black_pixels = np.where(
(output_image[:, :, 0] == 0) & (output_image[:, :, 1] == 0) & (output_image[:, :, 2] == 0)
)
output_image[black_pixels] = [255, 255, 255]
Original Image
Biggest contour found
Output after grabcut
I got the correct outputs via drawing your detected contours on blank image, then applying erosion and finally finding HoughCircles. Feel Free to change hyper-parameters to obtain better results.
Code:
import cv2
import numpy as np
image = cv2.imread("watermelon.png")
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image_copy = image.copy()
lower_boundary = (20,50, 0)
upper_boundary = (100,255,255)
mask_hsv = cv2.inRange(image_hsv, lower_boundary, upper_boundary)
blank_im = np.zeros((image.shape[0], image.shape[1], 1), dtype=np.uint8)
morph = cv2.erode(mask_hsv, (3,3), iterations=4)
output = cv2.bitwise_and(image, image, mask=morph)
# Finding Contours
contours, _ = cv2.findContours(morph, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
# Rather than using long for loop, this is how you can find the max value with given function
contour = max(contours, key=cv2.contourArea)
else:
print("No contours found!")
# Draw Contours on Blank Image (np.zeros() with same shape as original image)
cv2.drawContours(blank_im, contour, -1, (255,255,255), 10)
# Eroding the Blank Image with Drawn Contours
blank_im = cv2.erode(blank_im, (3,3), iterations=4)
# Find The Possible Circles, (Feel Free to Change Hyperparameters)
circles = cv2.HoughCircles(blank_im, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=0, maxRadius=0)
circles = np.uint16(np.around(circles))
# Draw each circle in green and centers in red
for i in circles[0,:]:
cv2.circle(image, (i[0],i[1]), i[2], (0,255,0), 2)
cv2.circle(image, (i[0], i[1]), 2, (0,0,255), 3)
# Display Images
cv2.imshow('Original Image', image_copy)
cv2.imshow('Drawed Contours On Blank Image', blank_im)
cv2.imshow('Detected Circle', image)
Output:
I have this image:
And, I process it in this way in order to pass it to Tesseract in the best way possible:
sharpened = unsharp_mask(img, amount=1.5)
cv2.imwrite(TEMP_FOLDER + 'sharpened.png', sharpened)
thr = cv2.threshold(sharpened, 220, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# im = cv2.resize(thr, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)
os.makedirs(TEMP_FOLDER, exist_ok=True)
cv2.imwrite(TEMP_FOLDER + 'inverted.png', thr)
inverted = cv2.imread(TEMP_FOLDER + 'inverted.png')
filtered_inverted = remove_black_boundaries(inverted)
filtered_inverted = cv2.resize(filtered_inverted, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# kernel = np.ones((2, 2), np.uint8)
# filtered_inverted = cv2.dilate(filtered_inverted, kernel)
cv2.imwrite(TEMP_FOLDER + 'filtered.png', filtered_inverted)
median = cv2.medianBlur(filtered_inverted, 5)
# median = cv2.cvtColor(median, cv2.COLOR_RGB2GRAY)
# median = cv2.threshold(median, 127, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite(TEMP_FOLDER + 'median.png', median)
The function unsharp_mask is defined as:
def unsharp_mask(image: np.ndarray, kernel_size: Tuple[int] = (5, 5),
sigma: float = 1.0, amount: float = 1.0, threshold: float = 0) -> np.ndarray:
"""Return a sharpened version of the image, using an unsharp mask."""
blurred = cv2.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
And, the function remove_black_boundaries (in this case it is useless since there aren't black boundaries in the image) is defined as:
def remove_black_boundaries(img: np.ndarray) -> np.ndarray:
hh, ww = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, np.ones((3, 3), np.uint8))
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cnt = max(contours, key=cv2.contourArea)
# draw white contour on black background as mask
mask = np.zeros((hh, ww), dtype=np.uint8)
cv2.drawContours(mask, [cnt], 0, (255, 255, 255), cv2.FILLED)
# invert mask so shapes are white on black background
mask_inv = 255 - mask
# create new (white) background
bckgnd = np.full_like(img, (255, 255, 255))
# apply mask to image
image_masked = cv2.bitwise_and(img, img, mask=mask)
# apply inverse mask to background
bckgnd_masked = cv2.bitwise_and(bckgnd, bckgnd, mask=mask_inv)
# add together
result = cv2.add(image_masked, bckgnd_masked)
return result
So I get the sharpened as:
And, I get the inverted (and the filtered) as:
So, the image passed to Tesseract is:
But, what I get from Tesseract is Conteggio: 2900 without the first line. I tried also to resize the image, but I get the same output. Any idea on how I can improve the image sent to Tesseract?
You haven't shown your actual pytesseract code, but without any (pre)processing, I get the correct result solely switching to page segmentation method 6, which is:
Assume a single uniform block of text.
import cv2
import pytesseract
img = cv2.imread('KQ49Y.png', cv2.IMREAD_GRAYSCALE)
text = pytesseract.image_to_string(img, config='--psm 6')
print(text.replace('\f', ''))
# Facebook
# Conteggio: 2900
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.19042-SP0
Python: 3.9.6
PyCharm: 2021.2
OpenCV: 4.5.3
pytesseract: 5.0.0-alpha.20201127
----------------------------------------
I am trying to remove the shadow of the pipe in the following image.
I use the following code to isolate the pipe with the shadow but I cannot find a way to remove the shadow.
img = cv2.imread("myimage.png",cv2.IMREAD_UNCHANGED)
blurred = cv2.GaussianBlur(img, (5, 5), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY_INV)[1]
cnts, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img_X = img.shape[1] / 2
img_Y = img.shape[0]
img_cnts = None
img_distance = None
for c in cnts:
area = cv2.contourArea(c)
if area < 500:
continue
M = cv2.moments(c)
cnt_X = int(M["m10"] / M["m00"])
cnt_Y = int(M["m01"] / M["m00"])
cnt_distance = math.sqrt(sum((px - qx) ** 2.0 for px, qx in zip([cnt_X, cnt_Y], [img_X, img_Y])))
if img_distance == None or img_distance > cnt_distance:
img_cnts = c
img_distance = cnt_distance
mask = np.zeros_like(img) # Create mask where white is what we want, black otherwise
cv2.drawContours(mask, [img_cnts], -1, (255,255,255), -1) # Draw filled contour in mask
out = np.zeros_like(img) # Extract out the object and place into output image
out[mask == 255] = img[mask == 255]
This is the result of the previous code that is pretty close to what I need.
I tried to use cv2.adaptiveThreshold and cv2.createBackgroundSubtractorMOG without luck.
Well, if you apply adaptive-threshold along with bitwise-not operation result will be:
Of course, different blockSize and C parameters you will get different results.
Where: (source)
blockSize: determines the size of the neighbourhood area
C: constant that is subtracted from the mean or weighted sum of the neighbourhood pixels.
Code:
import cv2
img = cv2.imread("pdUqL.png")
gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thr = cv2.adaptiveThreshold(gry, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY_INV, blockSize=33, C=31)
bnt = cv2.bitwise_not(thr)
cv2.imshow("out", bnt)
cv2.waitKey(0)
there is no good way to remove the shadow. you need to fix your lighting. use "softboxes" (indirect lighting), multiple lights from multiple angles, and whenever you have mirror surfaces, arrange lights so they don't reflect off that into the camera.
Need your help. Now I'm writing python script to recognize text in a shape. This shape can be captured from RTSP (IP Camera) at any angle.
For the example see attached file. My code is here, but coords to crop rotated shape is sets manually
import cv2
import numpy as np
def main():
fn = cv2.VideoCapture("rtsp://admin:Admin123-#172.16.10.254")
flag, img = fn.read()
cnt = np.array([
[[64, 49]],
[[122, 11]],
[[391, 326]],
[[308, 373]]
])
print("shape of cnt: {}".format(cnt.shape))
rect = cv2.minAreaRect(cnt)
print("rect: {}".format(rect))
box = cv2.boxPoints(rect)
box = np.int0(box)
print("bounding box: {}".format(box))
cv2.drawContours(img, [box], 0, (0, 255, 0), 2)
img_crop, img_rot = crop_rect(img, rect)
print("size of original img: {}".format(img.shape))
print("size of rotated img: {}".format(img_rot.shape))
print("size of cropped img: {}".format(img_crop.shape))
new_size = (int(img_rot.shape[1]/2), int(img_rot.shape[0]/2))
img_rot_resized = cv2.resize(img_rot, new_size)
new_size = (int(img.shape[1]/2)), int(img.shape[0]/2)
img_resized = cv2.resize(img, new_size)
cv2.imshow("original contour", img_resized)
cv2.imshow("rotated image", img_rot_resized)
cv2.imshow("cropped_box", img_crop)
# cv2.imwrite("crop_img1.jpg", img_crop)
cv2.waitKey(0)
def crop_rect(img, rect):
# get the parameter of the small rectangle
center = rect[0]
size = rect[1]
angle = rect[2]
center, size = tuple(map(int, center)), tuple(map(int, size))
# get row and col num in img
height, width = img.shape[0], img.shape[1]
print("width: {}, height: {}".format(width, height))
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
return img_crop, img_rot
if __name__ == "__main__":
main()
example pic
You may start with the example in the following post.
The code sample detects the license plate, and it also detects your "shape" with text.
After detecting the "shape" with the text, you may use the following stages:
Apply threshold the cropped area.
Find contours, and find the contour with maximum area.
Build a mask, and mask area outside the contour (like in the license plate example).
Use minAreaRect (as fmw42 commented), and get the angle of the rectangle.
Rotate the cropped area (by angle+90 degrees).
Apply OCR using pytesseract.image_to_string.
Here is the complete code:
import cv2
import numpy as np
import imutils
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # I am using Windows
# Read the input image
img = cv2.imread('Admin123.jpg')
# Reused code:
# https://stackoverflow.com/questions/60977964/pytesseract-not-recognizing-text-as-expected/60979089#60979089
################################################################################
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200) #Perform Edge detection
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# Masking the part other than the "shape"
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
cropped = gray[topx:bottomx+1, topy:bottomy+1]
################################################################################
# Apply threshold the cropped area
_, thresh = cv2.threshold(cropped, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Find contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# Get contour with maximum area
c = max(cnts, key=cv2.contourArea)
# Build a mask (same as the code above)
mask = np.zeros(cropped.shape, np.uint8)
new_cropped = cv2.drawContours(mask, [c], 0, 255, -1)
new_cropped = cv2.bitwise_and(cropped, cropped, mask=mask)
# Draw green rectangle for testing
test = cv2.cvtColor(new_cropped, cv2.COLOR_GRAY2BGR)
cv2.drawContours(test, [c], -1, (0, 255, 0), thickness=2)
# Use minAreaRect as fmw42 commented
rect = cv2.minAreaRect(c)
angle = rect[2] # Get angle of the rectangle
# Rotate the cropped rectangle.
rotated_cropped = imutils.rotate(new_cropped, angle + 90)
# Read the text in the "shape"
text = pytesseract.image_to_string(rotated_cropped, config='--psm 3')
print("Extracted text is:\n\n", text)
# Show images for testing:
cv2.imshow('cropped', cropped)
cv2.imshow('thresh', thresh)
cv2.imshow('test', test)
cv2.imshow('rotated_cropped', rotated_cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
OCR output result:
AB12345
DEPARTMENT OF
INFORMATION
COMMUNICATION
TECHNOLOGY
cropped:
thresh:
test:
rotated_cropped:
I have lots of scanned images of handwritten digit inside a rectangle(small one).
Please help me to crop each image containing digits and save them by giving the same name to each row.
import cv2
img = cv2.imread('Data\Scan_20170612_4.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
i = 0
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.09 * peri, True)
if len(approx) == 4:
screenCnt = approx
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
cv2.imwrite('cropped\\' + str(i) + '_img.jpg', img)
i += 1
Here is My Version
import cv2
import numpy as np
fileName = ['9','8','7','6','5','4','3','2','1','0']
img = cv2.imread('Data\Scan_20170612_17.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(gray,kernel,iterations = 2)
kernel = np.ones((4,4),np.uint8)
dilation = cv2.dilate(erosion,kernel,iterations = 2)
edged = cv2.Canny(dilation, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(cnt) for cnt in contours]
rects = sorted(rects,key=lambda x:x[1],reverse=True)
i = -1
j = 1
y_old = 5000
x_old = 5000
for rect in rects:
x,y,w,h = rect
area = w * h
if area > 47000 and area < 70000:
if (y_old - y) > 200:
i += 1
y_old = y
if abs(x_old - x) > 300:
x_old = x
x,y,w,h = rect
out = img[y+10:y+h-10,x+10:x+w-10]
cv2.imwrite('cropped\\' + fileName[i] + '_' + str(j) + '.jpg', out)
j+=1
That's an easy thing if u try. Here's my output- (The image and its one small bit)
What i did?
Resized the image first because it was too big in my screen
Erode, Dilate to remove small dots and thicken the lines
Threshold the image
Flood fill, beginning at the right point
Invert the flood fill
Find contours and draw one at a time which are in range of approximately the
area on the rectangle. For my resized (500x500) image i put Area of
contour in range 500 to 2500 (trial and error anyway).
Find bounding rectangle and crop that mask from main image.
Then save that piece with proper name- which i didn't do.
Maybe, there's a simpler way, but i liked this. Not putting the code because
i made it all clumsy. Will put if u still need it.
Here's how the mask looks when you find contours each at a time
code:
import cv2;
import numpy as np;
# Run the code with the image name, keep pressing space bar
# Change the kernel, iterations, Contour Area, position accordingly
# These values work for your present image
img = cv2.imread("your_image.jpg", 0);
h, w = img.shape[:2]
kernel = np.ones((15,15),np.uint8)
e = cv2.erode(img,kernel,iterations = 2)
d = cv2.dilate(e,kernel,iterations = 1)
ret, th = cv2.threshold(d, 150, 255, cv2.THRESH_BINARY_INV)
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(th, mask, (200,200), 255); # position = (200,200)
out = cv2.bitwise_not(th)
out= cv2.dilate(out,kernel,iterations = 3)
cnt, h = cv2.findContours(out,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in range(len(cnt)):
area = cv2.contourArea(cnt[i])
if(area>10000 and area<100000):
mask = np.zeros_like(img)
cv2.drawContours(mask, cnt, i, 255, -1)
x,y,w,h = cv2.boundingRect(cnt[i])
crop= img[ y:h+y,x:w+x]
cv2.imshow("snip",crop )
if(cv2.waitKey(0))==27:break
cv2.destroyAllWindows()
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
you are using cv2.RETR_LIST to find contours in the image. For your image to get better output use cv2.RETR_EXTERNAL. Before using that first remove black border line from the image.
cv2.RETR_LIST gives you list of all contours for image
cv2.RETR_EXTERNAL gives you only external or outer contours, not internal contours
change line to
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Contours Hierarchy