Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
07b6afd
Lane detection with video (#1)
NguyenHuyNhan2003 Apr 3, 2024
6631aed
add test video
NguyenHuyNhan2003 Apr 3, 2024
81670e7
Lane detection with video (#2)
NguyenHuyNhan2003 Apr 3, 2024
72b129c
Merge branch 'develop' of https://github.com/superbadteam/LaneSenseGu…
NguyenHuyNhan2003 Apr 3, 2024
7831320
return a list of frames and coordinates
NguyenHuyNhan2003 Apr 12, 2024
5284b2d
return a list of frames and coordinates
NguyenHuyNhan2003 Apr 12, 2024
89145e1
Merge pull request #3 from superbadteam/edit_lane_detection
NguyenHuyNhan2003 Apr 12, 2024
a8b030a
init drowsiness detection
Apr 14, 2024
8d6e43b
[PBL5-1][PBL5-29] Generate dataset csv file (#4)
phamhongphuc1403 Apr 14, 2024
c5e4428
[PBL5-9][PBL5-32] Exact frame from video per second (#5)
huynamboz Apr 15, 2024
cecc684
Extract all frames between the specified timestamps
NguyenHuyNhan2003 May 11, 2024
1e84799
Merge branch 'develop' of https://github.com/superbadteam/LaneSenseGu…
NguyenHuyNhan2003 May 11, 2024
e212151
Merge pull request #7 from superbadteam/feat/edit_lane_detection
NguyenHuyNhan2003 May 11, 2024
ac2b918
feat: process image input (#9)
huynamboz May 12, 2024
6b68ed5
[PBL5-1][PBL5-36] Automatically generate image dataset (#12)
phamhongphuc1403 May 12, 2024
db69542
feat: handle input dataset (#11)
NguyenHuyNhan2003 May 13, 2024
4fc0678
feat: config model (#10)
huynamboz May 13, 2024
9e301ae
feat: train model (#13)
huynamboz May 13, 2024
83a83c3
[PBL5-9][PBL5-38] feat: training model (#14)
huynamboz May 21, 2024
3b09546
m
NguyenHuyNhan2003 May 21, 2024
67f217e
Merge pull request #15 from superbadteam/nhan/hough
NguyenHuyNhan2003 May 21, 2024
187fff4
feat: train model (#16)
NguyenHuyNhan2003 Jun 11, 2024
2b0851f
feat: read stream video from raspberry (#17)
huynamboz Jun 11, 2024
9630b2f
feat: read & forward stream (#18)
huynamboz Jun 11, 2024
ac93920
feat: train model v4 (#19)
NguyenHuyNhan2003 Jun 12, 2024
abd116c
feat: server (#20)
huynamboz Jun 14, 2024
9c7ce00
feat: ras server (#21)
huynamboz Jun 14, 2024
db0934b
feat: lane detected (#22)
NguyenHuyNhan2003 Jun 14, 2024
6500744
Merge branch 'feat/edit_lane_detection' of https://github.com/superba…
NguyenHuyNhan2003 Jun 15, 2024
5233d7b
M
NguyenHuyNhan2003 Jun 15, 2024
4d2c4d1
cm
NguyenHuyNhan2003 Jun 15, 2024
9f92c0f
mes
NguyenHuyNhan2003 Jun 15, 2024
b0a0793
latest changes
NguyenHuyNhan2003 Jun 15, 2024
2e64948
resolve conflict
NguyenHuyNhan2003 Jun 15, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,5 @@ lane_data
train_data

model_trained

trained_drowsiness_model

dlib
57 changes: 57 additions & 0 deletions TrainModel.ipynb

Large diffs are not rendered by default.

19 changes: 7 additions & 12 deletions data-transfer/readStream.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,11 +185,13 @@ def crop_output(image):
# lane_model = keras.models.load_model(r'C:\Users\nguye\OneDrive\Desktop\U\kì 6\PBL5\LaneSenseGuard\model_trained\model_trained_v4.h5')
lane_model = keras.models.load_model('././model_trained/model_trained_v4.h5')


dict = {'true': [1, 0], 'false': [0, 1]}
name_result = ['right', 'wrong']
frame_counter = 0
frame_per_predict = 24


cam2 = "http://192.168.137.9:8080/?action=stream"
# cam2 = "http://192.168.137.9:8080/?action=stream"

Expand All @@ -201,10 +203,8 @@ def crop_output(image):
import asyncio
async def send_and_receive():
global bytes, frame_counter

uri = "ws://192.168.137.9:12345"
uri2 = "ws://103.77.246.238:5001"
async with websockets.connect(uri) as websocket_1, websockets.connect(uri2) as websocket_2:
uri = "ws://192.168.145.37:12345"
async with websockets.connect(uri) as websocket:

while True:
bytes += stream.read(1024)
Expand All @@ -225,23 +225,18 @@ async def send_and_receive():
result = name_result[np.argmax(lane_model.predict(image.reshape(-1, 60, 160, 1)))]
print(result)

await websocket_1.send("lane:" + result)
response = await websocket_1.recv()
await websocket.send("lane:" + result)
response = await websocket.recv()
vertices = np.array([[(130, 390),(280, 305), (350, 305), (515,390)]], dtype=np.int32) # (480, 640, 3)
i = cv2.polylines(i, vertices, isClosed=True, color=(0, 255, 0), thickness=2)
# print(i.shape)
# Nén ảnh và gửi tới server WebSocket thứ hai
i = cv2.resize(i, (320, 240))
_, buffer = cv2.imencode('.jpg', i, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
await websocket_2.send(buffer.tobytes())

cv2.imshow('i', i)

if cv2.waitKey(1) == 27:
exit(0)
except Exception as ex:

print(ex)
# print(ex)

pass

Expand Down
234 changes: 219 additions & 15 deletions data-transfer/readStream2.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,223 @@
import cv2
import urllib.request
import numpy as np
import dlib
from imutils import face_utils
import os
import uuid
import tensorflow
from tensorflow import keras
from keras.models import load_model
# from dotenv import load_dotenv
from pathlib import Path

cam2 = "http://169.254.142.134:8000/stream.mjpg"
# cam2 = "http://169.254.142.134:8080/?action=stream"
stream = urllib.request.urlopen(cam2)
bytes = bytes()
while True:
bytes += stream.read(1024)
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a != -1 and b != -1:
jpg = bytes[a:b+2]
bytes = bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imshow('i', i)
if cv2.waitKey(1) == 27:
exit(0)
# dotenv_path = Path('../../.env')
# load_dotenv(dotenv_path=dotenv_path)

# DATASET_IMAGE_PATH = os.getenv('DATASET_IMAGE_PATH')

# print(DATASET_IMAGE_PATH)

# dirname = os.path.dirname(__file__)

predictor_path = "././drowsiness-detector/models/shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(predictor_path)

cascade_path = "././drowsiness-detector/models/haarcascade_frontalface_alt.xml"
face_cascade = cv2.CascadeClassifier(cascade_path)

# drowsy_model = load_model('././drowsiness-detector/models/blinkModel.hdf5')
# drowsy_model.summary()

# detect the face rectangle
def detect(img, cascade = face_cascade , minimumFeatureSize=(20, 20)):
if cascade.empty():
raise (Exception("There was a problem loading your Haar Cascade xml file."))
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=1, minSize=minimumFeatureSize)

# if it doesn't return rectangle return array
# with zero length
if len(rects) == 0:
return []

# convert last coord from (width,height) to (maxX, maxY)
rects[:, 2:] += rects[:, :2]

return rects

class RightEye:
def __init__(self, eye):
self.x_min = eye[:, 0].min() - 4
self.x_max = eye[:, 0].max() + 20
self.y_min = eye[:, 1].min() - 22
self.y_max = eye[:, 1].max()
self.width = self.x_max - self.x_min
self.height = int(self.width * (26/34))

class LeftEye:
def __init__(self, eye):
self.x_min = eye[:, 0].min() - 20
self.x_max = eye[:, 0].max() + 4
self.y_min = eye[:, 1].min() - 22
self.y_max = eye[:, 1].max()
self.width = self.x_max - self.x_min
self.height = int(self.width * (26/34))

def getFace(frame, gray):
def getFirstFace(te):
if len(te) > 1:
face = te[0]
return face
elif len(te) == 1:
[face] = te
return face

te = detect(gray, minimumFeatureSize=(80, 80))

if len(te) == 0:
return None

# colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]

# for i, feature in enumerate(te):
# x, y, w, h = feature
# color = colors[i % len(colors)] # Pick a color from the list, looping if necessary
# cv2.rectangle(frame, (x, y), (w, h), color, 2)


face = getFirstFace(te)

faceRectangle = dlib.rectangle(left = int(face[0]), top = int(face[1]),
right = int(face[2]), bottom = int(face[3]))

# cv2.rectangle(frame, (face[0], face[1]), (face[2], face[3]), (128, 0, 128), 2)
return faceRectangle

def getEyes(face, gray):
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)

# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]

return LeftEye(leftEye), RightEye(rightEye)

def saveImage(frame, eye):
subfolder_name = "gen_close"
subfolder_path = os.path.join(DATASET_IMAGE_PATH, subfolder_name)

if not os.path.exists(subfolder_path):
os.makedirs(subfolder_path)

roi = frame[eye.y_min : eye. y_min + eye.height, eye.x_min : eye.x_min + eye.width]

roi = cv2.resize(roi, (34, 26))

random_filename = str(uuid.uuid4()) + ".jpg"

cv2.imwrite(os.path.join(subfolder_path, random_filename), roi)

def process_image(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

face = getFace(frame, gray)

if face is None:
return None, None

leftEye, rightEye = getEyes(face, gray)

cv2.rectangle(frame, (leftEye.x_min, leftEye.y_min ), (leftEye.x_min + leftEye.width, leftEye.y_min + leftEye.height), (0, 255, 0), 2)

cv2.rectangle(frame, (rightEye.x_min, rightEye.y_min ), (rightEye.x_min + rightEye.width, rightEye.y_min + rightEye.height), (0, 255, 0), 2)

return leftEye, rightEye

def main():
camera = cv2.VideoCapture(0)

while True:
ret, frame = camera.read()

gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

face = getFace(frame, gray)

if face is None:
continue

leftEye, rightEye = getEyes(face, gray)

cv2.rectangle(frame, (leftEye.x_min, leftEye.y_min ), (leftEye.x_min + leftEye.width, leftEye.y_min + leftEye.height), (0, 255, 0), 2)

cv2.rectangle(frame, (rightEye.x_min, rightEye.y_min ), (rightEye.x_min + rightEye.width, rightEye.y_min + rightEye.height), (0, 255, 0), 2)

# saveImage(frame, leftEye)
# saveImage(frame, rightEye)

cv2.imshow('dataset generator', frame)

key = cv2.waitKey(1) & 0xFF

# if the `q` key was pressed, break from the loop
if key == ord('q'):
break
# do a little clean up
cv2.destroyAllWindows()
del(camera)

if __name__ == '__main__':
main()

# cam2 = "http://169.254.142.134:8000/stream.mjpg"
# # cam2 = "http://169.254.142.134:8080/?action=stream"
# stream = urllib.request.urlopen(cam2)
# bytes = bytes()

# import websockets
# import asyncio
# async def send_and_receive():
# global bytes, frame_counter
# uri = "ws://192.168.145.37:12345"
# async with websockets.connect(uri) as websocket:

# while True:
# bytes += stream.read(1024)
# a = bytes.find(b'\xff\xd8')
# b = bytes.find(b'\xff\xd9')
# if a != -1 and b != -1:
# jpg = bytes[a:b+2]
# bytes = bytes[b+2:]
# i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR)
# frame_counter += 1
# if frame_counter == frame_per_predict:
# frame_counter = 0
# try:
# # Get eyes ROI
# LeftEye_roi, RightEye_roi = process_image(i)
# if frame_counter == 0:
# # perform prediction

# await websocket.send("Left eye:" + LeftEye_roi + "Right :" + RightEye_roi)
# response = await websocket.recv()

# # print(i.shape)

# cv2.imshow('i', i)

# if cv2.waitKey(1) == 27:
# exit(0)
# except Exception as ex:
# # print(ex)

# pass


# asyncio.get_event_loop().run_until_complete(send_and_receive())

# py data-transfer/readStream2.py
1 change: 0 additions & 1 deletion data-transfer/server-ras.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import asyncio
import websockets

import json
import time
import RPi.GPIO as GPIO
Expand Down