I have been working on these lines, and I want to create it like in picture, but I got complicated. How do I get line symmetrical like in picture below?
This is what I tried so far, not much :/
from PIL import Image, ImageDraw
img = Image.new('RGB', (1000, 1000), (255, 255, 255))
draw = ImageDraw.Draw(img)
for y in range (-4000, 2000, 200):
draw.line(((y, img.size[0]+img.size[0]),
(img.size[1]+img.size[1]+img.size[1], y)), (0, 0, 0), 20)
for x in range (-2000, 1000, 100):
draw.line(((x,-x),(x+img.size[0], -x+img.size[1]+img.size[1])), (0, 0, 0), 20)
I want to create lines like this...
I think you created too complicated calculations and this made problem
Symmetrical lines should have similar calculations but with different sign.
Both should have y from 0 to height
from PIL import Image, ImageDraw
img = Image.new('RGB', (1000, 1000), (255, 255, 255))
draw = ImageDraw.Draw(img)
w = img.size[0]
h = img.size[1]
offset = 400
step = 100
for x in range(-offset, w+offset, step):
draw.line(((x, 0), (x+offset, h)), (0, 0, 0), 20)
draw.line(((x, 0), (x-offset, h)), (0, 0, 0), 20)
img.show()
Gives
And for offset = image_width (offset = 1000) it gives rectangles
EDIT:
Version which use move_x, move_y to move lines.
Because after moving lines it shows end of lines so I use line_width to start and end line outside window.
from PIL import Image, ImageDraw
img = Image.new('RGB', (1000, 1000), (0, 200, 0))
draw = ImageDraw.Draw(img)
w = img.size[0]
h = img.size[1]
line_width = 10 #
move_x = 10
move_y = 50
offset = w # 400
step = 200
for x in range(-offset, w+offset, step):
x1 = x + move_x - move_y - line_width
x2 = x1 + offset + line_width*2
x3 = x + move_x + move_y + line_width
x4 = x3 - offset - line_width*2
draw.line(((x1, 0-line_width), (x2, h+line_width)), (0, 0, 0), line_width)
draw.line(((x3, 0-line_width), (x4, h+line_width)), (0, 0, 0), line_width)
img.show()
img.save('output.png')
Related
I'm testing out a hand tracking volume control script. It runs but when I bring my hand into frame it instantly crashes. I get this error message:
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) // 100 KeyError: 2
If I comment out area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) // 100. It runs the scipt as normal but doesn't give me the volume controlling function.
Here is the complete code:
import cv2
import time
import numpy as np
import HandTrackingModule as htm
import math
from ctypes import cast, POINTER
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
################################
wCam, hCam = 640, 480
################################
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
pTime = 0
detector = htm.HandDetector(detectionCon=0.7, maxHands=1)
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
# volume.GetMute()
# volume.GetMasterVolumeLevel()
volRange = volume.GetVolumeRange()
minVol = volRange[0]
maxVol = volRange[1]
vol = 0
volBar = 400
volPer = 0
area = 0
colorVol = (255, 0, 0)
while True:
success, img = cap.read()
# Find hands
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img, draw=True)
if len(lmList) != 0:
# Filter based on size
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) // 100
#print(area)
if 250 < area < 1000:
#print("Yes")
# Find Disteance between index and thumb
length, img, lineInfo = detector.findDistance(4, 8, img)
#print(length)
# Convert volume
volBar = np.interp(length, [50, 200], [400, 150])
volPer = np.interp(length, [50, 200], [0, 100])
# Reduce reselution to make it smoother
smoothness = 10
volPer = smoothness * round(volPer / smoothness)
# Check fingers for up
fingers = detector.fingersUp()
print(fingers)
# if pinky is down set volume
if not fingers[4]:
volume.SetMasterVolumeLevelScalar(volPer / 100, None)
cv2.circle(img, (lineInfo[4], lineInfo[5]), 15, (0, 225, 0), cv2.FILLED)
colorVol = (225, 0 ,0)
else:
colorVol = (0, 255, 0)
# drawings
cv2.rectangle(img, (50, 150), (85, 400), (225, 0, 0), 3)
cv2.rectangle(img, (50, int(volBar)), (85, 400), (225, 0, 0), cv2.FILLED)
cv2.putText(img, f' {int(volPer)} %', (40, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (225, 0, 0), 3)
cVol = int(volume.GetMasterVolumeLevelScalar() * 100)
cv2.putText(img, f'Vol Set: {int(cVol)}', (400, 50), cv2.FONT_HERSHEY_COMPLEX, 1, colorVol, 3)
# Frame rate
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (40, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 3)
cv2.imshow("image", img)
cv2.waitKey(1)
I've looked everywhere but came up empty handed.
If you know how to fix this please let me know.
I assume HandTrackingModule is this, and it's been updated since you downloaded it since it no longer has a HandDetector class. It does however have a FindHands class with a getPosition() method. I assume this is equivalent to your detector.findPosition().
The method returns a list of hands, so when you index into it you're selecting which hand you want and indexing with 2 will fail unless there are at least 3 hands. If it has multiple hands then the list will be length > 1.
Rename what you've currently called bbox to bboxes, since it's plural, then do:
areas = [
(bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) // 100
for bbox in bboxes
]
for area in areas:
print(area)
I want that when I click on one of the possible squares an X is printed.
I have made a list of all the coordinates of the tic-tac-toe grid. I've also added the centre points of each of the rectangles coordinates. I'm trying to make that if I click within the area of one of the squares that the an X button shows up. The eventual aim is to make that a double-click results in a lodged result.
import matplotlib.pyplot as plt
import pygame
import sys
import pygame
pygame.font.init()
size = 320, 240
black = 0, 0, 0
white = 255,255,255
red = 255, 0, 0
x1y1 = [(100, 0), (100, 300)]
x2y2 = [(200, 0), (200, 300)]
x3y3 = [(0, 100), (300, 100)]
x4y4 = [(0, 200), (300, 200)]
def centroid(coord1, coord2):
xx = 50
yy = 50
coords = []
for a in range(0,3):
for b in range(0,3):
if a + 1 == int(coord1) and b + 1 == int(coord2):
coords += tuple([xx + a*100, yy + b*100])
return tuple(coords)
def fourCorners(a,b,length,width):
center = (a, b)
corner3 = (int(a + length/2), int(b + width/2))
corner2 = (int(a + length/2), int(b - width/2))
corner4 = (int(a - length/2), int(b + width/2))
corner1 = (int(a - length/2), int(b - width/2))
return [corner1 ,corner2 ,corner3 ,corner4]
def withinRect(a,b,corners):
if len(corners) != 4:
print('Pass a list parameter of length 4.')
elif int(corners[0][0]) >= int(a) >= int(corners[1][0]) and int(corners[0][1]) >= int(b) >= int(corners[1][1]):
return True
screen = pygame.display.set_mode((300,300))
screen.fill(white)
pygame.draw.line(screen, (0, 0, 255), x1y1[0], x1y1[1], 3)
pygame.draw.line(screen, (0, 0, 255), x2y2[0], x2y2[1], 3)
pygame.draw.line(screen, (0, 0, 255), x3y3[0], x3y3[1], 3)
pygame.draw.line(screen, (0, 0, 255), x4y4[0], x4y4[1], 3)
while True:
ev = pygame.event.get()
for event in ev:
# handle get_pressed
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
x, y = event.pos
v = fourCorners(centroid(1,1)[0],centroid(1,1)[1],100,100)
button = pygame.Rect(v[1][0], v[1][1] ,100,100)
if (withinRect(x,y,v) == True and button.collidepoint(event.pos)):
print('X')
pygame.display.flip()
else:
break
pygame.display.flip()
Traverse all the 9 fields by 2 nested loos and define a pygame.Rect for the corresponding field:
for a in range(3):
for b in range(3):
button = pygame.Rect(a*100, b*100, 100, 100)
If you want you can define a margin, to limit the area to the center of a field:
margin = 4
button = pygame.Rect(a*100+margin, b*100+margin, 100-2*margin, 100-2*margin)
Use colliderect() to check if the click was inside the area:
if button.collidepoint(pos):
# [...]
Draw a line from the .bottomleft to the .topright and a line from the .topleft to the .bottomright, to form a cross:
pygame.draw.line(screen, (0, 0, 255), button.bottomleft, button.topright, 3)
pygame.draw.line(screen, (0, 0, 255), button.topleft, button.bottomright, 3)
ev = pygame.event.get()
for event in ev:
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
margin = 4
for a in range(3):
for b in range(3):
button = pygame.Rect(a*100+margin, b*100+margin, 100-2*margin, 100-2*margin)
if button.collidepoint(pos):
pygame.draw.line(screen, (0, 0, 255), button.bottomleft, button.topright, 3)
pygame.draw.line(screen, (0, 0, 255), button.topleft, button.bottomright, 3)
I finally figured out how to animate my sprite, but now I have a new problem. When running the game at my desired FPS (60), the character animation is way too quick. The animation looks smooth at around 10FPS, but the game looks choppy at that framerate. It is possible for my game to run at 60FPS, while the animation runs at a seperate FPS (ex. 10)? Any help appreciated!
Images and Sound FX Dowload
My code:
import pygame
import random
import time
import os
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d, %d" %(0, 20)
pygame.init()
SIZE = W, H = 400, 700
screen = pygame.display.set_mode(SIZE)
clock = pygame.time.Clock()
# colours
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BACKGROUND = (94, 194, 222)
STRIPE = (60, 160, 190)
LANELINE = (255, 255, 255)
x1 = 30
x2 = 330
lane1 = 30
lane2 = 130
lane3 = 230
lane4 = 330
y = 530
width = 40
height = 64
toggle1 = 0
toggle2 = 0
target_x1 = 30
target_x2 = 330
vel_x = 10
def drawScene():
screen.fill(BACKGROUND)
pygame.draw.polygon(screen, STRIPE, ((200, 700), (300, 700), (400, 600), (400, 500)))
pygame.draw.polygon(screen, STRIPE, ((0, 700), (100, 700), (400, 400), (400, 300)))
pygame.draw.polygon(screen, STRIPE, ((0, 500), (0, 600), (400, 200), (400, 100)))
pygame.draw.polygon(screen, STRIPE, ((0, 300), (0, 400), (400, 0), (300, 0)))
pygame.draw.polygon(screen, STRIPE, ((0, 100), (0, 200), (200, 0), (100, 0)))
pygame.draw.line(screen, LANELINE, (100, 0), (100, 700), 2)
pygame.draw.line(screen, LANELINE, (200, 0), (200, 700), 4)
pygame.draw.line(screen, LANELINE, (300, 0), (300, 700), 2)
mainsheet = pygame.image.load("dolphinSheet.png").convert()
sheetSize = mainsheet.get_size()
horiz_cells = 6
vert_cells = 1
cell_width = int(sheetSize[0] / horiz_cells)
cell_height = int(sheetSize[1] / vert_cells)
cellList = []
for vert in range(0, sheetSize[1], cell_height):
for horz in range(0, sheetSize[0], cell_width):
surface = pygame.Surface((cell_width, cell_height))
surface.blit(mainsheet, (0, 0),
(horz, vert, cell_width, cell_height))
colorkey = surface.get_at((0, 0))
surface.set_colorkey(colorkey)
cellList.append(surface)
cellPosition = 0
# main loop
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
pygame.mixer.music.load('percussiveHit.mp3')
pygame.mixer.music.play()
toggle1 += 1
if toggle1 % 2 == 1:
target_x1 += 100
else:
target_x1 -= 100
elif event.key == pygame.K_d:
pygame.mixer.music.load('percussiveHit.mp3')
pygame.mixer.music.play()
toggle2 += 1
if toggle2 % 2 == 1:
target_x2 -= 100
else:
target_x2 += 100
if x1 < target_x1:
x1 = min(x1 + vel_x, target_x1)
else:
x1 = max(x1 - vel_x, target_x1)
if x2 < target_x2:
x2 = min(x2 + vel_x, target_x2)
else:
x2 = max(x2 - vel_x, target_x2)
if cellPosition < len(cellList) - 1:
cellPosition += 1
else:
cellPosition = 0
drawScene()
pygame.draw.rect(screen, RED, (x1, y, width, height))
pygame.draw.rect(screen, RED, (x2, y, width, height))
screen.blit(cellList[cellPosition], (x1 + 4, y - 1))
screen.blit(cellList[cellPosition], (x2 + 4, y - 1))
# players
# screen.blit(playerImg, (x1 + 4, y - 5))
# screen.blit(playerImg, (x2 + 4, y - 5))
pygame.display.update()
pygame.quit()
Update the image based on a real-time millisecond delay, rather than every frame. Use pygame.time.get_ticks() (returns the number of milliseconds since pygame.init()) to update the image based on a specific time.
For example:
MS_FRAME_TIME = 100 # Mow many milliseconds a frame is shown for
...
last_paint_at = 0 # Start condition to ensure first paint
...
while True:
clock.tick(60)
ticks = pygame.time.get_ticks() # millliseconds since start
# If enough milliseconds have elapsed since the last frame, update!
if ( ticks - last_paint_at > MS_FRAME_TIME ):
last_paint_at = ticks
if ( cellPosition < len(cellList) - 1 ):
cellPosition += 1
else:
cellPosition = 0
screen.blit(cellList[cellPosition], (x1 + 4, y - 1))
screen.blit(cellList[cellPosition], (x2 + 4, y - 1))
If it fits your code, it may also be possible to simply look at the modulus of the time to select the animation frame.
def detect_video(image):
gray=image
blob = cv2.dnn.blobFromImage(gray, 1.0, (300, 300), [104, 117, 123], False, False)
net.setInput(blob)
detections = net.forward()
bboxes = []
gray=cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
frameWidth=image.shape[1]
frameHeight=image.shape[0]
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.7:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
cv2.rectangle(image,(x1,y1),(x2,y2),(255,255,0),3)
try:
image1 = gray[y1:(y2), x1:(x2)]
img = cv2.resize(image1, (48,48), interpolation = cv2.INTER_CUBIC) / 255.
prediction=model1.predict_proba(img.reshape(1,48,48,1))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,str(emotions[prediction[0].argmax()]),(x1,y1+10), font, 1,(255,255,255),2,cv2.LINE_AA)
result=prediction
if result is not None:
if result[0][6] < 0.6:
result[0][6] = result[0][6] - 0.12
result[0][:3] += 0.01
result[0][4:5] += 0.04
# write the different emotions and have a bar to indicate probabilities for each class
for index, emot in enumerate(emotion):
cv2.putText(image, emot, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.rectangle(image, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4), (255, 0, 0), -1)
emt=[prediction[0][0],prediction[0][1],prediction[0][2],prediction[0][3],prediction[0][4],prediction[0][5],prediction[0][6]]
indx=np.arange(len(emotion))
plt.bar(indx,emt,color='blue')
plt.xticks(indx,emotion)
plt.savefig("ab.png")
cv2.imshow("graph",cv2.imread("ab.png"))
plt.clf()
#cv2.waitKey(5)
#plt.show()
#return indx,emt
except:
#print("----->Problem during resize .Probably Cant detect any face")
continue
return image
I have made my own model and trained on KDEF dataset.Now when I am giving the video as an input , it detects the face in the video but it makes two bounding boxes.Can anyone help me whats the mistake in the code.Its running successfully but just creating two bounding boxes.The input which the neural networks accepts is 48*48.
first select the detection which has the most significant confidence then draw it on image.
detection_index = 0
max_confidence = 0
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if max_confidence < confidence:
max_confidence = confidence
detection_index = i
i = detection_index
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
cv2.rectangle(image, (x1, y1), (x2, y2), (255, 255, 0), 3)
try:
image1 = gray[y1:(y2), x1:(x2)]
img = cv2.resize(image1, (48, 48), interpolation=cv2.INTER_CUBIC) / 255.
prediction = model1.predict_proba(img.reshape(1, 48, 48, 1))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, str(emotions[prediction[0].argmax()]), (x1, y1 + 10), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
result = prediction
if result is not None:
if result[0][6] < 0.6:
result[0][6] = result[0][6] - 0.12
result[0][:3] += 0.01
result[0][4:5] += 0.04
# write the different emotions and have a bar to indicate probabilities for each class
for index, emot in enumerate(emotion):
cv2.putText(image, emot, (10, index * 20 + 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.rectangle(image, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),
(255, 0, 0), -1)
emt = [prediction[0][0], prediction[0][1], prediction[0][2], prediction[0][3], prediction[0][4],
prediction[0][5], prediction[0][6]]
indx = np.arange(len(emotion))
plt.bar(indx, emt, color='blue')
plt.xticks(indx, emotion)
plt.savefig("ab.png")
cv2.imshow("graph", cv2.imread("ab.png"))
plt.clf()
# cv2.waitKey(5)
# plt.show()
# return indx,emt
except:
# print("----->Problem during resize .Probably Cant detect any face")
continue
return image
I have written a code that draws circles over an image using mouse clicks. The radius and centre of the circles drawn are stored in a .txt file.
However, I want to add on another component into the text file;- the mean RGB value of the inside of the circles drawn. How do i go about doing that?
I've seen another qs where the author recommends using a mask. (RGB average of circles) Is it possible to do so without adding a mask?
import cv2
import numpy as np
import math
drawing = False
colours=[(255,0,0),(0,255,0),(0,0,255),(204,0,204),(0,155,255),(0,223,255),(238,220,130),
(255,79,79), (161,0,244),(32,178,170),(170,178,32)]
def draw_circle(event, x, y, flags, param):
global x1, y1, drawing, radius, num, img, img2,z,circleProps
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x1, y1 = x, y
radius = int(math.hypot(x - x1, y - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 0), 1)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
a, b = x, y
if a != x & b != y:
img = img2.copy()
radius = int(math.hypot(a - x1, b - y1))
cv2.circle(img, (x1,y1), radius, (255, 0, 0), 1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
num += 1
radius = int(math.hypot(x - x1, y - y1))
z = (z + 1) % (len(colours) - 1)
cv2.circle(img, (x1,y1), radius, colours[z], 1) #just need outer circle colour to be different
print("rad:",radius)
circleProps += "%d "%num + "%d "%x1 + "%d "%y1 + "%d \n"%radius
cv2.circle(img, (x1, y1), 1, (0, 255, 0), -1) #draws centre of circle
#font = cv2.FONT_HERSHEY_SIMPLEX
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(img, str(num), (x1 + radius, y1 + radius), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
img2 = img.copy()
elif event == cv2.EVENT_RBUTTONUP:
pass
def handleKeyboard():
if cv2.waitKey(20) == 38:
print ('38')
if __name__ == "__main__":
num = 0
z = 0
windowName = 'Drawing'
circleProps ="Num | x1 | y1 | r \n"
img = cv2.imread('image.jpg', cv2.IMREAD_COLOR)
img2 = img.copy()
cv2.namedWindow(windowName)
cv2.setMouseCallback(windowName, draw_circle)
while (True):
cv2.imshow(windowName, img)
# handleKeyboard()
if cv2.waitKey(20) == 27:
text_file = open("Output.txt", "w") # create txt file
text_file.write(circleProps)
text_file.close()
break
elif cv2.waitKey(20) == 38:
print ('38')
cv2.destroyAllWindows()
EDIT:
I have just added these lines into the function and I am getting RGB values. However I am not sure if the values printed are right? Could someone clarify?
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
num += 1
radius = int(math.hypot(x - x1, y - y1))
z = (z + 1) % (len(colours) - 1)
circle_drawn=cv2.circle(img, (x1,y1), radius, colours[z], 1) #lines added
RGB=cv2.mean(circle_drawn) #lines added
print("RGB: ", RGB) #lines added
print("rad:",radius)
circleProps += "%d "%num + "%d "%x1 + "%d "%y1 + "%d \n"%radius
cv2.circle(img, (x1, y1), 1, (0, 255, 0), -1) #draws centre of circle
#font = cv2.FONT_HERSHEY_SIMPLEX
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(img, str(num), (x1 + radius, y1 + radius), font, 1, (200, 255, 155), 1, cv2.LINE_AA)
img2 = img.copy()