diff --git a/Cursor_Movement_With_Eyeball_Using_OpenCV/.gitignore b/Cursor_Movement_With_Eyeball_Using_OpenCV/.gitignore new file mode 100644 index 0000000..1443a86 --- /dev/null +++ b/Cursor_Movement_With_Eyeball_Using_OpenCV/.gitignore @@ -0,0 +1,2 @@ +# Ignore virtual environment +venv/ \ No newline at end of file diff --git a/Cursor_Movement_With_Eyeball_Using_OpenCV/README.md b/Cursor_Movement_With_Eyeball_Using_OpenCV/README.md new file mode 100644 index 0000000..5a03f66 --- /dev/null +++ b/Cursor_Movement_With_Eyeball_Using_OpenCV/README.md @@ -0,0 +1,41 @@ +EyeBall Cursor Control +EyeBall Cursor Control is a Python application designed for hands-free control of the computer cursor using real-time eye tracking. It utilizes computer vision techniques to track eye movements through a webcam and translates them into mouse cursor movements on the screen. The application also includes blink detection to simulate mouse clicks or other predefined actions. + + +Features +Real-time Eye Tracking: Tracks user's eye movements in real-time using webcam input. +Cursor Movement: Moves the computer cursor based on the direction of the user's gaze. +Blink Detection: Detects user blinks to trigger mouse clicks or other actions. +Adjustable Sensitivity: Allows users to adjust sensitivity settings for precise cursor control. +Cross-Platform Compatibility: Works on multiple operating systems with Python and required dependencies. + +Step 1:Installation: +Clone the repository: +git clone https://github.com/Nikhitha_opensource/Cursor_Movement_With_Eyeball_Using_OpenCV.git + +cd Cursor_Movement_With_Eyeball_Using_OpenCV + +Step 2: Install dependencies: +pip install -r requirements.txt + +Step 3: Run the application: +streamlit run app.py + +NOTE: To get better results make sure to run in virtual Environment + +Working: +1.Position your face in front of the webcam. + +2.Use your gaze to control the mouse cursor on the screen. + +3.Blink to simulate mouse clicks or perform other predefined actions. + +Dependencies +opencv-python: For computer vision tasks. +mediapipe: For face mesh and landmark detection. +pyautogui: For controlling the mouse cursor. +streamlit: For creating the web interface. +streamlit-webrtc: For integrating webcam input in Streamlit. + +Contributing: +Contributions are welcome! Please fork the repository and submit a pull request with your improvements. \ No newline at end of file diff --git a/Cursor_Movement_With_Eyeball_Using_OpenCV/Requirements.txt b/Cursor_Movement_With_Eyeball_Using_OpenCV/Requirements.txt new file mode 100644 index 0000000..8fcfbc5 --- /dev/null +++ b/Cursor_Movement_With_Eyeball_Using_OpenCV/Requirements.txt @@ -0,0 +1,70 @@ +absl-py==2.1.0 +altair==5.3.0 +attrs==23.2.0 +blinker==1.8.2 +cachetools==5.3.3 +certifi==2024.6.2 +cffi==1.16.0 +charset-normalizer==3.3.2 +click==8.1.7 +colorama==0.4.6 +contourpy==1.2.1 +cycler==0.12.1 +flatbuffers==24.3.25 +fonttools==4.53.0 +gitdb==4.0.11 +GitPython==3.1.43 +idna==3.7 +jax==0.4.30 +jaxlib==0.4.30 +Jinja2==3.1.4 +jsonschema==4.22.0 +jsonschema-specifications==2023.12.1 +kiwisolver==1.4.5 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +matplotlib==3.9.0 +mdurl==0.1.2 +mediapipe==0.10.14 +ml-dtypes==0.4.0 +MouseInfo==0.1.3 +numpy==2.0.0 +opencv-contrib-python==4.10.0.84 +opencv-python==4.10.0.84 +opencv-python-headless==4.10.0.84 +opt-einsum==3.3.0 +packaging==24.1 +pandas==2.2.2 +pillow==10.3.0 +protobuf==4.25.3 +pyarrow==16.1.0 +PyAutoGUI==0.9.54 +pycparser==2.22 +pydeck==0.9.1 +PyGetWindow==0.0.9 +Pygments==2.18.0 +PyMsgBox==1.0.9 +pyparsing==3.1.2 +pyperclip==1.9.0 +PyRect==0.2.0 +PyScreeze==0.1.30 +python-dateutil==2.9.0.post0 +pytweening==1.2.0 +pytz==2024.1 +referencing==0.35.1 +requests==2.32.3 +rich==13.7.1 +rpds-py==0.18.1 +scipy==1.13.1 +six==1.16.0 +smmap==5.0.1 +sounddevice==0.4.7 +streamlit==1.36.0 +tenacity==8.4.1 +toml==0.10.2 +toolz==0.12.1 +tornado==6.4.1 +typing_extensions==4.12.2 +tzdata==2024.1 +urllib3==2.2.2 +watchdog==4.0.1 \ No newline at end of file diff --git a/Cursor_Movement_With_Eyeball_Using_OpenCV/app.py b/Cursor_Movement_With_Eyeball_Using_OpenCV/app.py new file mode 100644 index 0000000..044ef9f --- /dev/null +++ b/Cursor_Movement_With_Eyeball_Using_OpenCV/app.py @@ -0,0 +1,112 @@ +import cv2 +import mediapipe as mp +import pyautogui +import streamlit as st +from streamlit_webrtc import VideoTransformerBase, webrtc_streamer + +# Initialize FaceMesh +face_mesh = mp.solutions.face_mesh.FaceMesh(refine_landmarks=True) +screen_w, screen_h = pyautogui.size() + +class VideoTransformer(VideoTransformerBase): + def __init__(self): + self.face_mesh = mp.solutions.face_mesh.FaceMesh(refine_landmarks=True) + + def transform(self, frame): + frame = frame.to_ndarray(format="bgr24") + frame = cv2.flip(frame, 1) # Flip the frame horizontally + rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert frame to RGB + + # Process the frame to get the landmarks + output = self.face_mesh.process(rgb_frame) + landmarks_points = output.multi_face_landmarks + frame_h, frame_w, _ = frame.shape + + if landmarks_points: + landmarks = landmarks_points[0].landmark + for id, landmark in enumerate(landmarks[474:478]): + # Convert normalized landmark coordinates to pixel coordinates + x = int(landmark.x * frame_w) + y = int(landmark.y * frame_h) + cv2.circle(frame, (x, y), 3, (0, 255, 0)) + + if id == 1: + # Map normalized coordinates to screen coordinates + screen_x = int(landmark.x * screen_w) + screen_y = int(landmark.y * screen_h) + pyautogui.moveTo(screen_x, screen_y) + + # Define the landmarks for the left eye + left = [landmarks[145], landmarks[159]] + for landmark in left: + x = int(landmark.x * frame_w) + y = int(landmark.y * frame_h) + cv2.circle(frame, (x, y), 3, (0, 255, 255), -1) + + # Check for blink (vertical distance between two points is small) + if (left[0].y - left[1].y) < 0.020: + pyautogui.click() + pyautogui.sleep(1) + + return frame + +# Custom CSS for the webpage +st.markdown(""" + +""", unsafe_allow_html=True) + +# Streamlit interface +st.markdown('