I am trying to create a python script that removes the watermark from an image. In order to do that, I have decided to use the cv2.inpaint method. These are the steps that I follow in order to remove the watermark:
Get the image from the user
Ask the user to shade on the watermark
Create a mask out of the user's shading
Inpaint the image with the mask created in order to remove the watermark
This is my code:
import cv2
import numpy as np
drawing = False # true if mouse is pressed
pt1_x , pt1_y = None , None
def nothing(x):
pass
def line_drawing(event,x,y,flags,param):
global pt1_x,pt1_y,drawing
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
cv2.line(img,(pt1_x,pt1_y),(x,y),color=(0,0,255),thickness= cv2.getTrackbarPos('Thickness','Damaged Image'))
cv2.line(mask, (pt1_x, pt1_y), (x, y), color=(255,255,255), thickness=cv2.getTrackbarPos('Thickness', 'Damaged Image'))
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
cv2.line(img,(pt1_x,pt1_y),(x,y),color=(0,0,255),thickness= cv2.getTrackbarPos('Thickness','Damaged Image'))
cv2.line(mask, (pt1_x, pt1_y), (x, y), color=(255,255,255), thickness=cv2.getTrackbarPos('Thickness', 'Damaged Image'))
img = cv2.imread('D:\\Watermark.jpg')
ratio = img.shape[1]/img.shape[0]
height = 600
width = height * ratio
img = cv2.resize(img, (int(width),height))
mask = np.zeros(img.shape[:-1], dtype = np.uint8)
#mask = cv2.integral(mask)
cv2.namedWindow('Mask')
cv2.namedWindow('Damaged Image')
cv2.setMouseCallback('Damaged Image',line_drawing)
cv2.createTrackbar('Thickness','Damaged Image', 10, 50, nothing)
while(1):
cv2.imshow('Damaged Image',img)
cv2.imshow('Mask', mask)
if cv2.waitKey(1) & 0xFF == 27:
break
elif cv2.waitKey(1) & 0xFF == ord('a'):
pass
elif cv2.waitKey(1) & 0xFF == ord('s'):
restored = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)
cv2.namedWindow('Restored Image')
cv2.imshow('Restored Image', restored)
cv2.destroyAllWindows()
This is the image from which I am trying to remove the watermark:
This is the mask:
This is the output:
Why is the output so poor? And what can I do to improve the quality of the output? Any help would be appreciated. Thanks!
Related
I am trying to write some code, where I want to save some images from the webcam on python using opencv and then I want to open each image, and on each image using a bounding box select a region of interest(ROI) that will be cropped out and save this cropped image in a new folder. For this, I am using the cv2.setMouseCallback and drawing a rectangle on each image. My code works, but the cropped images that are stored are very strange.
import cv2
# Opens the inbuilt camera of laptop to capture video.
cap = cv2.VideoCapture(0)
i = 0
training_images = []
def draw_bounding_box(click, x, y, flag_param, parameters):
global x_pt, y_pt, drawing, top_left_point, bottom_right_point, image
if click == cv2.EVENT_LBUTTONDOWN:
drawing = True
print("drawing="+str(drawing))
x_pt, y_pt = x, y
print("x_pt="+str(x_pt))
elif click == cv2.EVENT_MOUSEMOVE:
if drawing:
top_left_point, bottom_right_point = (x_pt,y_pt), (x,y)
image[y_pt:y, x_pt:x] = 255 - image[y_pt:y, x_pt:x]
cv2.rectangle(image, top_left_point, bottom_right_point, (0,255,0), 2)
elif click == cv2.EVENT_LBUTTONUP:
drawing = False
top_left_point, bottom_right_point = (x_pt,y_pt), (x,y)
copy[y_pt:y, x_pt:x] = 255 - copy[y_pt:y, x_pt:x]
cv2.rectangle(image, top_left_point, bottom_right_point, (0,255,0), 2)
bounding_box = (x_pt, y_pt, x-x_pt, y-y_pt)
cropped_im = image[y_pt:y,x_pt:x]
training_images.append(cropped_im)
if len(cropped_im)>0:
cv2.imshow("cropped",cropped_im)
cv2.imwrite('C:/Downloads/testingPictures/Frame'+str(i)+'.jpg',cropped_im)
cv2.waitKey(3000)
else:
print("could not save")
if __name__ == '__main__':
drawing = False
while(cap.isOpened() and i<10):
ret, frame = cap.read()
# This condition prevents from infinite looping
# incase video ends.
if ret == False:
break
# Save Frame by Frame into disk using imwrite method
cv2.imwrite('C:/Downloads/testingPictures/Frame'+str(i)+'.jpg', frame)
i += 1
print(i)
while(i>=1):
image = cv2.imread('C:/Users/shrut/Downloads/testingPictures/Frame'+str(i-1)+'.jpg')
copy = image.copy()
i = i-1
s = len(training_images)
cv2.namedWindow('Frame')
cv2.imshow('Frame', copy)
cv2.setMouseCallback('Frame', draw_bounding_box)
s_curr = len(training_images)
k = cv2.waitKey(5000) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
print(i)
print(len(training_images))
This is what the saved "cropped images" look like
(https://i.stack.imgur.com/KUrEs.jpg)
It looks like multiple bounding boxes superposed on the image. How can I avoid this and get the area from the image instead?
This is my code, I've looked at some tutorials but can't find what I'm looking for
I want to overlay the Frame.png image on my webcam. I tried to add the image directly but it didn't work either. If possible, Is there a way to add an image, not to overlay but to keep the image at a certain coordinate in the live webcam window
import cv2
import numpy as np
def detect_and_save():
alpha = 0.2
beta = 1-alpha
cap = cv2.VideoCapture(0)
sciframe = cv2.imread('Frame.png')
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
ret ,frame = cap.read()
overlay = frame.copy()
output = frame.copy()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray,1.5,5)
cv2.putText(output, "HUD Test",(175, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 50, 50), 3)
cv2
for face in faces:
x,y,w,h = face
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,200,0),-1)
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,0,0),1)
cv2.rectangle(overlay,(x,y-20),(x+w,y),(25,20,0),-1)
cv2.addWeighted(overlay,alpha,output,beta,0,output)
cv2.putText(output,"Human",(x+10,y-10),cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
if not ret:
continue
cv2.imshow("HUD",output)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
cv2.imwrite('./images/CID_{}.png'.format(time.strftime('%d%m%y_%H_%M_%S')),output)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import time
detect_and_save()
You can directly add one image on top of another one at any coordinate easily in opencv.
cap = cv2.VideoCapture(0)
im_height = 50 #define your top image size here
im_width = 50
im = cv2.resize(cv2.imread("Frame.png"), (im_width, im_height))
while (True):
ret, frame = cap.read()
frame[0:im_width, 0:im_height] = im #for top-left corner, 0:50 and 0:50 for my image; select your region here like 200:250
cv2.imshow("live camera", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
I have written a code in python to detect a red color in OpenCV. I am getting proper output,but I want to give user input in the code to detect a specific color. Eg: If I will give blue as a user input, it should show only blue on the output. I also want to add few attributes as a output, such as time of that object get detected and location of the live video. I am new to python and opencv, it would be great if I will get some guidance.
My existing code is as below:
import cv2
import numpy as np
# Capture the input frame from webcam
def get_frame(cap, scaling_factor):
# Capture the frame from video capture object
ret, frame = cap.read()
# Resize the input frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
if __name__=='__main__':
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
# Iterate until the user presses ESC key
while True:
frame = get_frame(cap, scaling_factor)
# Convert the HSV colorspace
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Define 'blue' range in HSV colorspace
lower = np.array([60,100,100])
upper = np.array([180,255,255])
# Threshold the HSV image to get only blue color
mask = cv2.inRange(hsv, lower, upper)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
res = cv2.medianBlur(res, 5)
cv2.imshow('Original image', frame)
cv2.imshow('Color Detector', res)
# Check if the user pressed ESC key
c = cv2.waitKey(5)
if c == 27:
break
cv2.destroyAllWindows()
Your code seems to work correctly. Looking at the equations about converting from RGB to HSV bearing in mind that boundaries in OpenCV are between 0-180 for H channel and 0-255 for S and V channels, we can generalize the code to work for all colors.
import cv2
import numpy as np
# Capture the input frame from webcam
def get_frame(cap, scaling_factor):
# Capture the frame from video capture object
ret, frame = cap.read()
# Resize the input frame
frame = cv2.resize(frame, None, fx=scaling_factor,
fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame
# Define the color range in HSV colorspace
lower1 = np.array([0,100,100])
upper1 = np.array([60,255,255])
lower2 = np.array([120,100,100])
upper2 = np.array([180,255,255])
def color_range(degree):
# Degree is between 0-360
# OpenCV uses 0-180 range for H channel
degree = int(degree/2);
global lower1, upper1, lower2, upper2
if degree < 60:
lower1 = np.array([int(0),100,100])
upper1 = np.array([degree+60,255,255])
lower2 = np.array([degree+120,100,100])
upper2 = np.array([int(180),255,255])
elif degree > 120:
lower1 = np.array([degree-60,100,100])
upper1 = np.array([int(180),255,255])
lower2 = np.array([int(0),100,100])
upper2 = np.array([degree-120,255,255])
else:
lower1 = np.array([degree-60,100,100])
upper1 = np.array([degree+60,255,255])
# ineffective variables
lower2 = np.array([181,100,100])
upper2 = np.array([181,255,255])
if __name__=='__main__':
cap = cv2.VideoCapture(0)
scaling_factor = 0.5
# create trackbar for color change
winname = 'Color Detector'
cv2.namedWindow(winname)
cv2.createTrackbar('Color', winname, 0, 360, color_range)
# Iterate until the user presses ESC key
while True:
frame = get_frame(cap, scaling_factor)
# Convert the HSV colorspace
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Threshold the HSV image to get only desired color
mask1 = cv2.inRange(hsv, lower1, upper1)
mask2 = cv2.inRange(hsv, lower2, upper2)
mask = cv2.bitwise_or(mask1, mask2)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('Original image', frame)
cv2.imshow(winname, res)
# Check if the user pressed ESC key
c = cv2.waitKey(1) & 0xFF
if c == 27:
break
cv2.destroyAllWindows()
cap.release()
i have a problem with my code when i run, the window open and close and i can see the result. ...................................................................
import cv2
import numpy
from os.path import join
imagem = cv2.imread('ImagensLupus/3.jpg')
imagemcinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
class CascadeClassifier:
def __init__(self, glasses=True):
if glasses:
self.eye_cascade = cv2.CascadeClassifier(join('haar', 'haarcascade_eye_tree_eyeglasses.xml'))
else:
self.eye_cascade = cv2.CascadeClassifier(join('haar', 'haarcascade_eye.xml'))
def get_irises_location(self, imagemcinza):
eyes = self.eye_cascade.detectMultiScale(imagemcinza, 1.3, 5) # if not empty - eyes detected
irises = []
for (ex, ey, ew, eh) in eyes:
iris_w = int(ex + float(ew / 2))
iris_h = int(ey + float(eh / 2))
irises.append([numpy.float32(iris_w), numpy.float32(iris_h)])
return numpy.array(irises)
class EyerisDetector:
def __init__(self, image_source, classifier, tracker):
self.tracker = tracker
self.classifier = classifier
self.image_source = image_source
self.irises = []
self.blink_in_previous = False
self.blinks = 0
def run(self):
k = cv2.waitKey(30) & 0xff
while k != 27: # ESC
frame = self.image_source.get_current_frame()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if len(self.irises) >= 2: # irises detected, track eyes
track_result = self.tracker.track(old_gray, gray, self.irises, self.blinks, self.blink_in_previous)
self.irises, self.blinks, self.blink_in_previous, lost_track = track_result
if lost_track:
self.irises = self.classifier.get_irises_location(gray)
else: # cannot track for some reason -> find irises
self.irises = self.classifier.get_irises_location(gray)
old_gray = gray.copy()
self.image_source.release()
cv2.waitKey()
cv2.imshow("Resultado", imagem)
Using cv2.waitKey() will display the intended result for a fraction of time.
If you want the results to be displayed as long as you want it to, pass the value 0 as well. This will display the result until another event is triggered (like a keypress). You can find more on this link
A typical use case would be:
#- program --
cv2.imshow('Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
OR you could display the windows within a while loop until a certain key is pressed. In the following, windows are displayed until the Esc key is pressed:
while(1):
cv2.imshow('image', img)
if cv2.waitKey(0) & 0xFF == 27: #--- press 'ESC' to break
break
cv2.destroyAllWindows()
I have to track a window in a video and need to paste an image on window,I have used camshift to track the window, but it did not track it correct.My window is in brown color so I have given the following color range.
np.array((110,0,0)--lower
np.array((130,255,255)--higher..
I have red many documents in opencv but not able to figure out which method to follow.I am using opencv2.4.9 with python.
Below is the code which I tried.Please help me out to figure out the exact location of window.emphasized text
#!/usr/bin/env python
import numpy as np
import cv2
cap = cv2.VideoCapture("myvideo.mp4")
# take first frame of the video
ret,frame = cap.read()
#print frame
#print ret
# setup initial location of window
r,h,c,w = 157,40,337,40
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((110,0,0)), np.array((130,255,255)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[255],[0,255])
cv2.imshow('img2',roi_hist)
#print roi_hist
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by at least 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
i = 1
while(1):
ret ,frame = cap.read()
if ret == True:
i += 1
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,255],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
#print track_window
# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',frame)
k = cv2.waitKey(200) & 0xff
if k == 27:
break
else:
# print "comes here2";
cv2.imwrite(str(i)+"test.jpg",frame)
#break
else:
break
cv2.destroyAllWindows()
cap.release()