In the above image, how to get a clean line block, remove the block upper and down around the middle long strip broadband? I have tried projection, but failed.
def hProject(binary):
h, w = binary.shape
hprojection = np.zeros(binary.shape, dtype=np.uint8)
[red is the result][2]
h_h = [0]*h
for j in range(h):
for i in range(w):
if binary[j,i] == 255:
h_h[j] += 1
return h_h
def creat_T_rectangle(h, w, mode='x_up'):
assert mode in ['x_up', 'x_down', 'y_left', 'y_right']
if mode == 'x_up':
up_t = np.ones((h*2, w*3), np.uint8)
up_t[:h, :w] = 0
up_t[:h, 2*w:] = 0
return up_t, (0, h, w, 2*w) # y1, y2, x1, x2
elif mode == 'y_left':
left_t = np.ones((h*3, w*2), np.uint8)
left_t[:h, :w] = 0
left_t[2*h:, :w] = 0
return left_t, (h, 2*h, 0, w)
elif mode == 'x_down':
down_t = np.ones((h*2, w*3), np.uint8)
down_t[h:2*h, :w] = 0
down_t[h:2*h, 2*w:] = 0
return down_t, (h, 2*h, w, 2*w)
elif mode == 'y_right':
right_t = np.ones((h*3, w*2), np.uint8)
right_t[:h, w:2*w] = 0
right_t[2*h:, w:] = 0
return right_t, (h, 2*h, w, 2*w)
else:
raise NotImplementedError
def remove_around_rectangle(markers, bh, bw):
'''
markers:binary image, bh, bw = 5, 5 ...
'''
up_t, up_rect = creat_T_rectangle(bh, bw, mode='x_up')
down_t, down_rect = creat_T_rectangle(bh, bw, mode='x_down')
one_nums = up_t.sum()
i = bh
j = bw
while i < markers.shape[0]-bh:
while j < markers.shape[1]-2*bw:
block = markers[i-bh:i+bh, j-bw:j+2*bw]
inner_up = block * up_t
inner_down = block * down_t
if inner_down.sum()//255 == inner_up.sum()//255 == one_nums:
markers[down_rect[0]+i-bh:down_rect[1]+i-bh, down_rect[2]+j-bw:down_rect[3]+j-bw] = 0
else:
if inner_up.sum()//255 == one_nums:
markers[up_rect[0]+i-bh:up_rect[1]+i-bh, up_rect[2]+j-bw:up_rect[3]+j-bw] = 0
if inner_down.sum()//255 == one_nums:
markers[down_rect[0]+i-bh:down_rect[1]+i-bh, down_rect[2]+j-bw:down_rect[3]+j-bw] = 0
j += bw
i += 1
j = bw
left_t, left_rect = creat_T_rectangle(bh, bw, mode='y_left')
one_nums = left_t.sum()
right_t, right_rect = creat_T_rectangle(bh, bw, mode='y_right')
i = bh
j = bw
while i < markers.shape[0] - 2*bh:
while j < markers.shape[1] - bw:
block = markers[i-bh:i+2*bh, j-bw:j+bw]
inner_left = block * left_t
inner_right = block * right_t
if inner_left.sum()//255 == one_nums == inner_right.sum()//255 :
markers[left_rect[0]+i-bh:left_rect[1]+i-bh, left_rect[2]+j-bw:left_rect[3]+j-bw] = 0
else:
if inner_right.sum()//255 == one_nums:
markers[right_rect[0]+i-bh:right_rect[1]+i-bh, right_rect[2]+j-bw:right_rect[3]+j-bw] = 0
if inner_left.sum()//255 == one_nums :
markers[left_rect[0]+i-bh:left_rect[1]+i-bh, left_rect[2]+j-bw:left_rect[3]+j-bw] = 0
j += bw
i += 1
j = bw
return markers
the above is my code, but it is so slow.
Related
`import cv2
import numpy as np
import time
from tensorflow.keras.models import load_model
sign_model = load_model('best_model.h5')
def detect_lines(image):
tuning min_threshold, minLineLength, maxLineGap is a trial and error process by hand
rho = 1 # precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # degree in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
lines = cv2.HoughLinesP(image, rho, angle, min_threshold, np.array([]), minLineLength=8,
maxLineGap=4)
return lines
def mean_lines(frame, lines):
a = np.zeros_like(frame)
try:
left_line_x = []
left_line_y = []
right_line_x = []
right_line_y = []
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1) # <-- Calculating the slope.
if abs(slope) < 0.5: # <-- Only consider extreme slope
continue
if slope <= 0: # <-- If the slope is negative, left group.
left_line_x.extend([x1, x2])
left_line_y.extend([y1, y2])
else: # <-- Otherwise, right group.
right_line_x.extend([x1, x2])
right_line_y.extend([y1, y2])
min_y = int(frame.shape[0] * (3 / 5)) # <-- Just below the horizon
max_y = int(frame.shape[0]) # <-- The bottom of the image
poly_left = np.poly1d(np.polyfit(
left_line_y,
left_line_x,
deg=1
))
left_x_start = int(poly_left(max_y))
left_x_end = int(poly_left(min_y))
poly_right = np.poly1d(np.polyfit(
right_line_y,
right_line_x,
deg=1
))
right_x_start = int(poly_right(max_y))
right_x_end = int(poly_right(min_y))
cv2.line(a, (left_x_start, max_y), (left_x_end, min_y), [255,255,0], 5)
cv2.line(a, (right_x_start, max_y), (right_x_end, min_y), [255,255,0], 5)
current_pix = (left_x_end+right_x_end)/2
except:
current_pix = 128
return a, current_pix
def region_of_interest(image):
(height, width) = image.shape
mask = np.zeros_like(image)
polygon = np.array([[
(0, height),
(0, 180),
(80, 130),
(256-80,130),
(width, 180),
(width, height),
np.int32)
cv2.fillPoly(mask, polygon, 255)
masked_image = image * (mask)
masked_image[:170,:]=0
return masked_image
def horiz_lines(mask):
roi = mask[160:180, 96:160]
try:
lines = detect_lines(roi)
lines = lines.reshape(-1,2,2)
slope = (lines[:,1,1]-lines[:,0,1]) / (lines[:,1,0]-lines[:,0,0])
if (lines[np.where(abs(slope)<0.2)]).shape[0] != 0:
detected = True
else:
detected = False
except:
detected = False
return detected
def turn_where(mask):
roi = mask[100:190, :]
cv2.imshow('turn where', roi)
lines = detect_lines(roi)
lines = lines.reshape(-1,2,2)
slope = (lines[:,1,1]-lines[:,0,1]) / (lines[:,1,0]-lines[:,0,0])
mean_pix = np.mean(lines[np.where(abs(slope)<0.2)][:,:,0])
return mean_pix
def detect_side(side_mask):
side_pix = np.mean(np.where(side_mask[150:190, :]>0), axis=1)[1]
return side_pix
def detect_sign(frame, hsv_frame):
types = ['left', 'straight', 'right']
mask = cv2.inRange(hsv_frame, np.array([100,160,90]), np.array([160,220,220]))
mask[:30,:]=0
try:
points, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
sorted_points = sorted(points, key=len)
if cv2.contourArea(sorted_points[-1])>30:
x,y,w,h = cv2.boundingRect(sorted_points[-1])
if (x>5) and (x+w<251) and (y>5) and (y+h<251):
sign = frame[y:y+h,x:x+w]
sign = cv2.resize(sign, (25,25))/255
frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
return types[np.argmax(sign_model.predict(sign.reshape(1,25,25,3)))]
else:
return 'nothing'
else:
return 'nothing'
except:
return 'nothing'
def red_sign_state(red_mask):
points, _ = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
sorted_points = sorted(points, key=len)
try:
red_area = cv2.contourArea(sorted_points[-1])
if red_area > 50:
print('red sign detected!')
return True
else:
return False
except:
return False
def stop_the_car(car):
car.setSteering(0)
while car.getSpeed():
car.setSpeed(-100)
car.getData()
car.setSpeed(0)
return True
def turn_the_car(car,s,t):
time1 = time.time()
while((time.time()-time1)<t):
car.getData()
car.setSteering(s)
car.setSpeed(15)
def go_back(car, t):
time1 = time.time()
while((time.time()-time1)<t):
car.getData()
car.setSpeed(-15)
car.setSpeed(0)
`
I am new to Python and I am trying to convert and modify the following code from Matlab to python:
function A = alg_a(RGBimage, n)
[row, column, d] = size(RGBimage);
if (d==3)
HSVimage = rgb2hsv(RGBimage);
V = HSVimage(:,:,3);
else
V =double(RGBimage)/255;
end
V=V(:);
[Vsorted, ix] = sort(V);
s = (row*column)/n;
i=0;
h=[];
while (i < n)
i=i+1;
z = Vsorted(((floor(s*(i-1))+1)):floor(s*i));
Vstart = (s*(i-1))/(row*column);
Vstop = (s*i)/(row*column);
r=z-z(1);
f = (1/n)/(r(size(r,1)));
g = r*f;
if(isnan(g(1)))
g = r + Vstop;
else
g = g + Vstart;
end
h=vertcat(h,g);
end
m(ix)=h;
m=m(:);
if(d==3)
HSVimage(:,:,3) = reshape(m,row,column);
A=hsv2rgb(HSVimage);
else
A=reshape(m,row,column);
end
return;
end
This function implements the SMQT image preprocessing algorithm, and my problem is that I don't really understand how it works, which is why I'm stuck here:
import cv2
import numpy as np
import math
img = cv2.imread("16.bmp")
n = 8
(row, column, dim) = img.shape
if dim == 3:
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
V = hsv_img[:, :, 2]
else:
V = img / 255
V = V.flatten(order='F')
Ix = V.argsort()[::-1]
V_sorted = V[Ix]
s = int(row*column/n)
i = 0
h = []
while i < n:
i += 1
z = V_sorted[math.floor(s*(i-1))+1:math.floor(s*i)]
z = np.array(z)
V_start = (s*(i-1))/(row*column)
V_stop = (s * i) / (row * column)
r = z - z[0]
f = (1 / n) / r[len(r)-1]
g = r * f
if math.isnan(g[0]):
g = r + V_stop
else:
g = g + V_start
if len(h) == 0:
h = g
else:
h = np.vstack((h, g))
M = np.array([])
M[Ix] = h
M = M.flatten(order='F')
if dim == 3:
hsv_img = np.reshape(M, row, column)
img_res = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
else:
img_res = np.reshape(M, row, column)
cv2.imwrite('16_smqt', img_res)
It seems that I wrote a code similar in functionality, but it does not work, and I haven't any idea why. The Matlab code was taken by me from the article, so I have no doubt that it works. Please help me find inaccuracies in my translation from Matlab to python.
I Create function to log transform an image in python. Below is my code..
#Compute Log Only
def logTransform(c, f):
g = c * m.log(float(1+f))
return g
def ImgLogarithmic (img_input, coldepth):
#logarithmatic transform
inputMax = 255
outputMax = 255
c = outputMax/m.log(inputMax+1)
if coldepth != 24:
img_input = img_input.convert('RGB')
img_output = Image.new('RGB', (img_input.size[1], img_input.size[0]))
pixels = img_output.load()
for i in range(img_output.size[0]):
for j in range(img_output.size[1]):
f = img_input.getpixel((i, j))
r = round(logTransform(c, f[0]))
g = round(logTransform(c, f[1]))
b = round(logTransform(c, f[2]))
#b = value * math.log(1 + b)
pixels[i,j] = (r,g,b)
if coldepth == 1:
img_output = img_output.convert("1")
elif coldepth == 8:
img_output = img_output.convert("L")
else:
img_output = img_output.convert("RGB")
return img_output
I create that function to call log transform. But, my code is not working. I think my algorithm is right. Anyone can help? Thank you
I have a typical captcha image which contain only digits.
Ex.
i want to extract 78614 from this image.
I tried few library & code using OCR-Python. But its returning 0.
Sample Code-1
from captcha_solver import CaptchaSolver
solver = CaptchaSolver('browser')
with open('captcha.png', 'rb') as inp:
raw_data = inp.read()
print(solver.solve_captcha(raw_data))
Sample Code-2
from PIL import Image
def p(img, letter):
A = img.load()
B = letter.load()
mx = 1000000
max_x = 0
x = 0
for x in range(img.size[0] - letter.size[0]):
_sum = 0
for i in range(letter.size[0]):
for j in range(letter.size[1]):
_sum = _sum + abs(A[x+i, j][0] - B[i, j][0])
if _sum < mx :
mx = _sum
max_x = x
return mx, max_x
def ocr(im, threshold=200, alphabet="0123456789abcdef"):
img = Image.open(im)
img = img.convert("RGB")
box = (8, 8, 58, 18)
img = img.crop(box)
pixdata = img.load()
letters = Image.open(im)
ledata = letters.load()
# Clean the background noise, if color != white, then set to black.
for y in range(img.size[1]):
for x in range(img.size[0]):
if (pixdata[x, y][0] > threshold) \
and (pixdata[x, y][1] > threshold) \
and (pixdata[x, y][2] > threshold):
pixdata[x, y] = (255, 255, 255, 255)
else:
pixdata[x, y] = (0, 0, 0, 255)
counter = 0;
old_x = -1;
letterlist = []
for x in range(letters.size[0]):
black = True
for y in range(letters.size[1]):
if ledata[x, y][0] <> 0 :
black = False
break
if black :
if True :
box = (old_x + 1, 0, x, 10)
letter = letters.crop(box)
t = p(img, letter);
print counter, x, t
letterlist.append((t[0], alphabet[counter], t[1]))
old_x = x
counter += 1
box = (old_x + 1, 0, 140, 10)
letter = letters.crop(box)
t = p(img, letter)
letterlist.append((t[0], alphabet[counter], t[1]))
t = sorted(letterlist)
t = t[0:5] # 5-letter captcha
final = sorted(t, key=lambda e: e[2])
answer = ""
for l in final:
answer = answer + l[1]
return answer
print(ocr('captcha.png'))
Has anyone had the opportunity to get/extract text from such typical captcha?
You can use machine learning (neural networks) models to solve captchas and it will almost always outperform free OCR or any other method.
Here is a good starting point: https://medium.com/#ageitgey/how-to-break-a-captcha-system-in-15-minutes-with-machine-learning-dbebb035a710
I've a problem with my software in Python. It's a big while cicle where I took a intel realsense (USB camera) stream. Using opencv I make a couple of findContours and I send the results of contours to another software.
The problem is that there is a memory consuption. In fact the RAM usage increase every 2-3 seconds by 0.1%.
II don't know what to do...
This is the code (sorry if it's not beautifull but I'm testing a lot of things)
import numpy as np
import random
import socket
import cv2
import time
import math
import pickle
import httplib, urllib
from xml.etree import ElementTree as ET
import logging
logging.basicConfig(level=logging.INFO)
try:
import pyrealsense as pyrs
except:
print("No pyralsense Module installed!")
#funzione per registrare gli eventi del mouse
def drawArea(event,x,y, flag, param):
global fx,fy,ix,iy
if event == cv2.EVENT_LBUTTONDOWN:
ix,iy = x,y
elif event == cv2.EVENT_LBUTTONUP:
fx,fy = x,y
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
quit = False
read = False
while read == False:
file = open('default.xml', 'r')
tree = ET.parse(file)
root = tree.getroot()
for child in root:
if child.tag == "intel":
intel = int(child[0].text)
elif child.tag == "output":
portOut = int(child[2].text)
elif child.tag =="source":
video_source = child.text
file.close()
root.clear()
ix,iy = -1,-1
fx,fy = -1,-1
timeNP = 10
last = time.time()
smoothing = 0.9
fps_smooth = 30
#video_source = video_source.split(",")
read = True
if RepresentsInt(video_source):
video_source = int(video_source)
if intel == 1:
pyrs.start()
dev = pyrs.Device(video_source)
master = 1
address = ('', 3333)
broadSockListe = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
broadSockListe.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
broadSockListe.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
broadSockListe.bind(('',3333))
while True:
if master == 0:
datas, address = broadSockListe.recvfrom(1024)
if str(datas) == "8000":
separator = ":"
seq = (address[0],"8081")
masterAddr = separator.join(seq)
IP = str([l for l in (
[ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [
[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
params = separator.join(("addUnit",IP,str(portOut),"camera","generalList.xml"))
params = urllib.urlencode({"Python":params})
headers = {}
conn = httplib.HTTPConnection(masterAddr)
conn.request("POST",masterAddr ,params, headers)
params = separator.join(("masterIP",address[0],str(portOut)+"/","default.xml"))
params = urllib.urlencode({"Python":params})
headers = {}
myip = IP + ":8081"
conn = httplib.HTTPConnection(myip)
#eseguo una post al mio server
conn.request("POST", myip, params, headers)
broadSockListe.close()
#imposto master a 1 per dire che l'ho registrato e posso partire col programma
master = 1
read = False
while read == False:
'''# leggo le varie impostazioni dal file default
file = open('default.xml','r+')
tree = ET.parse(file)
root = tree.getroot()
for child in root:
if child.tag == "modifica" and child.text == "1":
child.text = "0"
tree.write('default.xml')
root.clear()
file.close()'''
read = True
prev,prevprev,dirX,dirY = 0,0,0,0
spostamento = 15
UDP_IP = ["", ""]
UDP_PORT = ["", ""]
UDP_IP[0] = "127.0.0.1"
UDP_PORT[0] = 3030
IP_left = "127.0.0.1"
IP_right = "127.0.0.1"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("",portOut))
message = ""
sep = "-"
font = cv2.FONT_HERSHEY_SIMPLEX
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
#rettangoli = [x,y,width,height,angle,box, area, contours]
rettangoli = []
cnt = 0
letto = 0
while True:
now = time.time()
if letto < now - 2 or letto == 0 or now < letto:
letto = now
print(now)
read = False
while read == False:
file = open('default.xml', 'r')
tree = ET.parse(file)
root = tree.getroot()
for child in root:
if child.tag == "output":
UDP_IP[1] = child[0].text
UDP_PORT[1] = int(child[1].text)
if child.tag == "effects":
erode = int(child[0].text)
erodePos = int(child[1].text)
erode2 = int(child[2].text)
erodePos2 = int(child[3].text)
dilate1 = int(child[4].text)
dilatePos1= int(child[5].text)
dilate2 = int(child[6].text)
dilatePos2 = int(child[7].text)
blur = int(child[8].text)
blurPos = int(child[9].text)
if child.tag == "intel":
val1Min = int(child[1].text)
val1Max = int(child[2].text)
val2Min = int(child[3].text)
val2Max = int(child[4].text)
val3Min = int(child[5].text)
val3Max = int(child[6].text)
if child.tag == "modifica":
if child.text == "1":
break
#definisco dimensioni per collisioni
if child.tag == "size":
blobSize= int(child[0].text)
dimBordoBlob= int(child[1].text)
if child.tag == "visualizza":
visualizza= child.text
if child.tag == "feedback":
SFB = int(child.text)
root.clear()
file.close()
read = True
dev.wait_for_frame()
c = dev.colour
c = cv2.cvtColor(c, cv2.COLOR_RGB2BGR)
d = dev.depth * dev.depth_scale * -60
d = d[5:485, 25:635]
d = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_HSV)
c = cv2.resize(c, (320 ,240), interpolation=cv2.INTER_AREA)
d = cv2.resize(d, (320,240), interpolation=cv2.INTER_AREA)
#trasformo i colori in HSV per filtrarli
frame = cv2.cvtColor(d, cv2.COLOR_BGR2HSV)
lower_red = np.array([val1Min, val2Min, val3Min])
upper_red = np.array([val1Max, val2Max, val3Max])
frame = cv2.inRange(frame, lower_red, upper_red)
dimensions = frame.shape
widthStream = dimensions[1]
heightStream = dimensions[0]
roomFrame = np.zeros(( heightStream,widthStream, 3), np.uint8)
roomFrame[:] = (0, 0, 0)
fgmask = frame
halfheight = int(heightStream / 2)
halfwidth = int(widthStream / 2)
for i in range(0, 15):
if erode >= 1 and erodePos == i:
fgmask = cv2.erode(fgmask, kernel, iterations=erode)
if dilate1 >= 1 and dilatePos1 == i:
fgmask = cv2.dilate(fgmask, kernel, iterations=dilate1)
if erode2 >= 1 and erodePos2 == i:
fgmask = cv2.erode(fgmask, kernel, iterations=erode2)
if dilate2 >= 1 and dilatePos2 == i:
fgmask = cv2.dilate(fgmask, kernel, iterations=dilate2)
if blur == 1 and blurPos == 1:
fgmask = cv2.GaussianBlur(fgmask, (5, 5), 0)
if ix > fx:
temp = fx
fx = ix
ix = temp
if iy > fy:
temp = fy
fy = iy
iy = temp
if cnt == 0:
ix,iy = 1,1
fx,fy = widthStream-1,heightStream-1
fgmask, contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rettangoli = []
for cont in contours:
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = rect[1][0]
height = rect[1][1]
angle = rect[2]
if width > height:
angle = 180 + angle
else:
angle = 270 + angle
x, y, w, h = cv2.boundingRect(cont)
centerX = int(w / 2 + x)
centerY = int(h / 2 + y)
M = cv2.moments(cont)
area = int(M['m00'])
if area > blobSize:
if ix < centerX < fx and iy < centerY < fy:
cv2.drawContours(fgmask, [cont], 0, (100, 100, 100), dimBordoBlob)
cv2.drawContours(fgmask, [cont], 0, (255, 255, 255), -1)
rettangoli.append([centerX, centerY, w, h, angle, box, area, cont])
indice = 0
fgmask, contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
if intel == 1:
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2RGB)
rettangoli = []
for cont in contours:
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = rect[1][0]
height = rect[1][1]
angle = rect[2]
if width > height:
angle = 180 + angle
else:
angle = 270 + angle
x, y, w, h = cv2.boundingRect(cont)
centerX = int(w / 2 + x)
centerY = int(h / 2 + y)
M = cv2.moments(cont)
indice += 1
if M['m00'] > blobSize:
if ix < centerX < fx and iy < centerY < fy:
rettangoli.append([centerX, centerY, w, h, angle, box, int(M['m00']), cont])
cv2.drawContours(roomFrame, [cont], 0, (255, 255, 255), -1)
for rett in rettangoli:
seq = (message,np.array_str(rett[7]))
message = sep.join(seq)
temp = 0
while temp < len(UDP_IP):
sock.sendto(bytes(message), (UDP_IP[temp], UDP_PORT[temp]))
temp += 1
message = ""
if SFB == 1:
cv2.imshow("Camera Intel", roomFrame)
if cv2.waitKey(1) & 0xFF == ord('r'):
break
if cv2.waitKey(1) & 0xFF == ord('q'):
quit = True
break
name = "color.jpeg"
cv2.imwrite(name, c)
name = "bn.jpeg"
cv2.imwrite(name, roomFrame)
if intel == 0:
cap.release()
cv2.destroyAllWindows()
You are creating new objects in your while loop. Take now for example, you create a variable and then you assign a new object to it that only lives in that loop. If you declare the variables before your loop the same object will be overwritten instead of re-created.
By just declaring the variables ahead of time with name = None you will be able to make sure you reuse these variables.
I hope this works for you.