-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathquickDrawApp.py
More file actions
118 lines (99 loc) · 4.52 KB
/
quickDrawApp.py
File metadata and controls
118 lines (99 loc) · 4.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import numpy as np
import os
import cv2
from keras.models import load_model
from collections import deque
model = load_model('WeightsOfQuickDraw.h5')
def main():
Lower_blue = np.array([110, 50, 50])
Upper_blue = np.array([130, 255, 255])
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
digit = np.zeros((200, 200, 3), dtype=np.uint8)
emojis = get_emojis()
cap = cv2.VideoCapture(0)
pts = deque(maxlen=512)
pred_class = 0
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
kernel = np.ones((5, 5), np.uint8)
mask = cv2.inRange(hsv, Lower_blue, Upper_blue)
mask = cv2.erode(mask, kernel, iterations=4)
cv2.imshow("mask", mask)
cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) >= 1:
cnt = max(cnts, key=cv2.contourArea)
if cv2.contourArea(cnt) > 200:
print("Counters > 200")
((x, y), radius) = cv2.minEnclosingCircle(cnt)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 0), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
M = cv2.moments(cnt)
center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
cv2.line(blackboard, pts[i - 1], pts[i], (255, 255, 255), 7)
cv2.line(img, pts[i - 1], pts[i], (0, 0, 255), 2)
elif len(cnts) == 0:
if len(pts) != []:
print ("Counters < 200")
blackboard_gray = cv2.cvtColor(blackboard, cv2.COLOR_BGR2GRAY)
blur1 = cv2.medianBlur(blackboard_gray, 15)
blur1 = cv2.GaussianBlur(blur1, (5, 5), 0)
thresh1 = cv2.threshold(blur1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
blackboard_cnts = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[1]
if len(blackboard_cnts) >= 1:
cnt = max(blackboard_cnts, key=cv2.contourArea)
#cv2.imshow("input image", cnt)
print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 2000:
print ("GREATER THAN 2000")
x, y, w, h = cv2.boundingRect(cnt)
digit = blackboard_gray[y:y + h, x:x + w]
pred_probab, pred_class = predict_model(model, digit)
print(pred_class, pred_probab)
pts = deque(maxlen=512)
blackboard = np.zeros((480, 640, 3), dtype=np.uint8)
img = overlay(img, emojis[pred_class], 400, 250, 100, 100)
cv2.imshow("Frame", img)
def predict_model(model, image):
image_x = 28
image_y = 28
img = cv2.resize(image, (image_x, image_y))
img = np.array(img, dtype=np.float32)
processed = np.reshape(img, (-1, image_x, image_y, 1))
print("processed: " + str(processed.shape))
pred_probab = model.predict(processed)[0]
pred_class = list(pred_probab).index(max(pred_probab))
return max(pred_probab), pred_class
def get_emojis():
emojis_folder = 'qd_emo/'
emojis = []
for emoji in range(len(os.listdir(emojis_folder))):
print(emoji)
emojis.append(cv2.imread(emojis_folder + str(emoji) + '.png', -1))
return emojis
def overlay(image, emoji, x, y, w, h):
emoji = cv2.resize(emoji, (w, h))
try:
image[y:y + h, x:x + w] = blend_transparent(image[y:y + h, x:x + w], emoji)
except:
pass
return image
def blend_transparent(face_img, overlay_t_img):
overlay_img = overlay_t_img[:, :, :3] # Grab the BRG planes
overlay_mask = overlay_t_img[:, :, 3:] # And the alpha plane
background_mask = 255 - overlay_mask
# Turn the masks into three channel, so we can use them as weights
overlay_mask = cv2.cvtColor(overlay_mask, cv2.COLOR_GRAY2BGR)
background_mask = cv2.cvtColor(background_mask, cv2.COLOR_GRAY2BGR)
face_part = (face_img * (1 / 255.0)) * (background_mask * (1 / 255.0))
overlay_part = (overlay_img * (1 / 255.0)) * (overlay_mask * (1 / 255.0))
return np.uint8(cv2.addWeighted(face_part, 255.0, overlay_part, 255.0, 0.0))
predict_model(model, np.zeros((50, 50, 1), dtype=np.uint8))
if __name__ == '__main__':
main()