From 8179560d0249874b023c483c49b30e019ef1249e Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 2 Mar 2026 18:14:40 +0000 Subject: [PATCH 1/4] Initial plan From 4230ea6661fc3519ba1c5ceb2293f403207fd605 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 2 Mar 2026 18:26:10 +0000 Subject: [PATCH 2/4] Implement Digital Bloom Python control system MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - config.ini: full config with WiFi, Devices, Vision, Personas, ML, Recording sections - vision_tracker.py: DeepFace + MediaPipe pose with Haar cascade fallback; EmotionData dataclass - osc_client.py: multi-device FlowerDevice/FlowerNetwork matching esp32_sylvie OSC protocol - persona_engine.py: 3-layer biomorphic engine (ML brain, state machine, motion render + jealousy network) - ml_trainer.py: sklearn RandomForest/SVM classifier for emotion→persona mapping - control_panel.py: Tkinter GUI with webcam preview, perception data, manual controls, ML training - main.py: application entry point wiring all components together - requirements.txt: updated with all dependencies - docs/: rewritten README, QUICKSTART, ARCHITECTURE, WIRING, TUNING Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- docs/ARCHITECTURE.md | 546 +++++----------------------- docs/QUICKSTART.md | 229 ++---------- docs/README.md | 347 +----------------- docs/TUNING.md | 338 ++++------------- docs/WIRING.md | 413 ++++----------------- python_controller/config.ini | 110 +++--- python_controller/control_panel.py | 334 +++++++++++++++++ python_controller/main.py | 306 +++------------- python_controller/ml_trainer.py | 131 +++++++ python_controller/osc_client.py | 158 ++++---- python_controller/persona_engine.py | 201 ++++++++++ python_controller/requirements.txt | 12 + python_controller/vision_tracker.py | 345 ++++++++++-------- 13 files changed, 1339 insertions(+), 2131 deletions(-) create mode 100644 python_controller/control_panel.py create mode 100644 python_controller/ml_trainer.py create mode 100644 python_controller/persona_engine.py diff --git a/docs/ARCHITECTURE.md b/docs/ARCHITECTURE.md index b3e6443..ddf6493 100644 --- a/docs/ARCHITECTURE.md +++ b/docs/ARCHITECTURE.md @@ -2,485 +2,131 @@ ## Overview -The Vision PID Control System consists of three main components: - -1. **Vision Processing** (Python + OpenCV) -2. **Control Algorithm** (PID Controller) -3. **Hardware Control** (ESP32 + OSC) - -## Component Architecture - -``` -┌─────────────────────────────────────────────────────────┐ -│ Computer System │ -│ │ -│ ┌────────────────────────────────────────────────┐ │ -│ │ Vision Processing Module │ │ -│ │ │ │ -│ │ ┌──────────┐ ┌────────────────────────┐ │ │ -│ │ │ Camera │───→│ OpenCV Processing │ │ │ -│ │ │ Input │ │ - Face Detection │ │ │ -│ │ └──────────┘ │ - Color Detection │ │ │ -│ │ │ - Error Calculation │ │ │ -│ │ └────────────┬───────────┘ │ │ -│ └─────────────────────────────────┼──────────────┘ │ -│ │ │ -│ │ (x_error, y_error) │ -│ ▼ │ -│ ┌─────────────────────────────────────────────────┐ │ -│ │ PID Control Module │ │ -│ │ │ │ -│ │ ┌──────────────┐ ┌──────────────┐ │ │ -│ │ │ PID Pan │ │ PID Tilt │ │ │ -│ │ │ Controller │ │ Controller │ │ │ -│ │ │ │ │ │ │ │ -│ │ │ Kp, Ki, Kd │ │ Kp, Ki, Kd │ │ │ -│ │ └──────┬───────┘ └──────┬───────┘ │ │ -│ │ │ │ │ │ -│ │ │ pan_correction │ tilt_correction│ │ -│ │ └────────┬───────────────┘ │ │ -│ └──────────────────┼──────────────────────────── │ │ -│ │ │ -│ │ (servo_angles, flower_state) │ -│ ▼ │ -│ ┌─────────────────────────────────────────────────┐ │ -│ │ OSC Communication Module │ │ -│ │ │ │ -│ │ ┌──────────────────────────────────────────┐ │ │ -│ │ │ UDP Client (python-osc) │ │ │ -│ │ │ - /flower/servo [pan, tilt] │ │ │ -│ │ │ - /flower/state [openness] │ │ │ -│ │ │ - /flower/motor [speed] │ │ │ -│ │ │ - /flower/mode [tracking] │ │ │ -│ │ └──────────────────┬───────────────────────┘ │ │ -│ └─────────────────────┼──────────────────────────┘ │ -└─────────────────────────┼──────────────────────────── ┘ - │ - │ WiFi/UDP - ▼ -┌─────────────────────────────────────────────────────────┐ -│ ESP32 System │ -│ │ -│ ┌─────────────────────────────────────────────────┐ │ -│ │ WiFi + OSC Server Module │ │ -│ │ │ │ -│ │ ┌──────────────────────────────────────────┐ │ │ -│ │ │ UDP Server (OSC Library) │ │ │ -│ │ │ Listens on port 8000 │ │ │ -│ │ │ Parses OSC messages │ │ │ -│ │ └──────────────────┬───────────────────────┘ │ │ -│ └─────────────────────┼──────────────────────────┘ │ -│ │ │ -│ │ (commands) │ -│ ▼ │ -│ ┌─────────────────────────────────────────────────┐ │ -│ │ Hardware Control Module │ │ -│ │ │ │ -│ │ ┌──────────────┐ ┌──────────────┐ │ │ -│ │ │ Servo Driver │ │ Motor Driver │ │ │ -│ │ │ │ │ │ │ │ -│ │ │ GPIO 18, 19 │ │ GPIO 25-27 │ │ │ -│ │ └──────┬───────┘ └──────┬───────┘ │ │ -│ └─────────┼──────────────────┼────────────────────┘ │ -│ │ │ │ -└─────────────┼──────────────────┼────────────────────────┘ - │ │ - ▼ ▼ - ┌──────────────┐ ┌──────────────┐ - │ Servos │ │ DC Motor │ - │ (Pan/Tilt) │ │ (Flower) │ - └──────────────┘ └──────────────┘ -``` - -## Data Flow - -### 1. Vision Processing Flow - -``` -Camera Frame (640x480 BGR) - │ - ▼ - Preprocessing - (flip, grayscale) - │ - ▼ -┌───────┴────────┐ -│ │ -▼ ▼ -Face Detection Color Detection -(Haar Cascade) (HSV Masking) -│ │ -└───────┬────────┘ - │ - ▼ - Target Center (x, y) - │ - ▼ - Calculate Error - error_x = target_x - center_x - error_y = target_y - center_y -``` - -### 2. PID Control Flow +Digital Bloom uses a three-layer biomorphic control architecture: ``` -Error Input - │ - ▼ -┌────────────────┐ -│ Calculate: │ -│ │ -│ P = Kp × error │ -│ I = Ki × ∫error│ -│ D = Kd × d/dt │ -└───────┬────────┘ - │ - ▼ - output = P + I + D - │ - ▼ - Clamp to limits - │ - ▼ - Correction Value -``` - -### 3. Communication Flow - -``` -Python (Client) ESP32 (Server) - │ │ - │ OSC /flower/servo │ - │ [90, 90] │ - ├───────────────────────────→│ - │ │ - │ Parse OSC - │ │ - │ Set Servo Angles - │ servo.write() - │ │ - │ (Next command...) │ - ├───────────────────────────→│ +┌────────────────────────────────────────────────────────────────┐ +│ PC / Python Controller │ +│ │ +│ Layer 1: ML Brain Emotion probabilities (7-dim) │ +│ (scikit-learn) + distance + pose openness │ +│ │ │ │ +│ ▼ │ │ +│ Persona Label ◄──────────┘ │ +│ (Empathy / Defensive / Predatory / Boredom / Surprise / Jealous)│ +│ │ │ +│ Layer 2: State Machine lookup PERSONA_PARAMS dict │ +│ + Jealousy Network: if flower A held Empathy >5s │ +│ → force flower B/C into 'Jealous' for 8s │ +│ │ │ +│ Layer 3: Motion Render EMA smoothing (alpha=0.3) │ +│ discrete params + Perlin-like jitter │ +│ │ │ +│ OSC / UDP ─────────────────────────────────────────┐ │ +└─────────────────────────────────────────────────────────────┼──┘ + │ + WiFi (ESP32 AP or existing LAN) │ + │ +┌─────────────────────────────────────────────────────────── ▼ ┐ +│ ESP32 Flowers (Sylvie / Sue) │ +│ │ +│ OSC Server (port 8888) → DC motors + RGB LEDs │ +│ OR servo + ultrasonic │ +└────────────────────────────────────────────────────────────────┘ ``` -## Module Details - -### Python Controller Modules - -#### `vision_tracker.py` -- **Purpose**: Object detection and tracking -- **Classes**: - - `VisionTracker`: Base class - - `FaceTracker`: Haar Cascade face detection - - `ColorTracker`: HSV color-based tracking -- **Output**: (x_error, y_error, detected) - -#### `pid_controller.py` -- **Purpose**: Error correction algorithm -- **Algorithm**: Proportional-Integral-Derivative control -- **Features**: - - Anti-windup - - Output limiting - - Sample time control -- **Output**: correction value - -#### `osc_client.py` -- **Purpose**: Network communication with ESP32 -- **Protocol**: OSC over UDP -- **Commands**: - - Servo control - - Flower state - - Motor speed - - Tracking mode - -#### `main.py` -- **Purpose**: Main application loop -- **Responsibilities**: - - Initialize all modules - - Run control loop - - Handle user input - - Display feedback - -### ESP32 Firmware Modules +## Layer 1 — ML Brain -#### WiFi Module -- Connect to network -- Obtain IP address via DHCP -- Maintain connection +**File**: `persona_engine.py` (`PersonaEngine.predict_persona`) -#### OSC Server Module -- Listen on UDP port 8000 -- Parse OSC messages -- Route to handlers +Input features (10-dim): +- 7 emotion probabilities from DeepFace: `angry, disgust, fear, happy, sad, surprise, neutral` +- `distance_estimate` (metres, derived from face bounding box area) +- `face_area` (normalised 0–1) +- `pose_openness` (MediaPipe wrist/shoulder spread, 0–1) -#### Servo Control Module -- ESP32Servo library -- PWM generation (50Hz) -- Position control (0-180°) +Classifier: `RandomForestClassifier` (default) or `SVC` (configurable in `config.ini`) -#### Motor Control Module -- H-bridge driver control -- PWM speed control -- Direction control +When no model is trained, a heuristic rule-set is used (see `_heuristic_persona`). -## Timing and Performance +## Layer 2 — State Machine / Nervous System -### Python Controller +**File**: `persona_engine.py` (`PERSONA_PARAMS`, `PersonaEngine.update`) -| Component | Rate | Latency | -|-----------|------|---------| -| Camera Capture | 30 FPS | ~33ms | -| Vision Processing | 30 FPS | ~10-30ms | -| PID Update | 33 Hz | <1ms | -| OSC Send | As needed | <5ms | +Each persona maps to hardware parameters: -**Total Loop Time**: ~50-70ms (15-20 FPS effective) +| Persona | Openness | Jitter | Speed | LED Hue | +|-----------|----------|--------|-------|---------| +| Empathy | 1.0 | 0.0 | 0.4 | 120 (green) | +| Defensive | 0.1 | 0.2 | 0.2 | 240 (blue) | +| Predatory | 0.7 | 0.1 | 0.8 | 0 (red) | +| Boredom | 0.3 | 0.0 | 0.1 | 200 (cool blue) | +| Surprise | 1.0 | 1.0 | 1.0 | 60 (yellow) | +| Jealous | 0.6 | 0.5 | 0.6 | 0 (red) | -### ESP32 Firmware +**Jealousy Network**: If the primary device stays in `Empathy` for >5 seconds, all other devices are force-overridden to `Jealous` for 8 seconds. -| Component | Rate | Response | -|-----------|------|----------| -| WiFi Poll | Continuous | <1ms | -| OSC Parse | On packet | <1ms | -| Servo Update | Immediate | ~20ms (servo) | -| Motor Update | Immediate | <1ms (PWM) | +## Layer 3 — Physical Rendering -**Network Latency**: 5-20ms on local WiFi +**File**: `persona_engine.py` (`PersonaEngine._apply_persona`) -### End-to-End Latency +- **EMA smoothing**: `state = prev * (1 - α) + target * α` — prevents abrupt jumps +- **Jitter**: random noise added to openness proportional to `jitter` param +- Alpha configurable via `config.ini` `[Personas] ema_alpha` -``` -Camera → Processing → PID → OSC → Network → ESP32 → Servo - 33ms 20ms 1ms 5ms 10ms 1ms 20ms - -Total: ~90ms (acceptable for human tracking) -``` - -## State Management - -### Python Controller State - -```python -{ - 'tracking_active': bool, # Is tracking enabled? - 'pan_angle': float (0-180), # Current pan position - 'tilt_angle': float (0-180), # Current tilt position - 'flower_openness': float (0-1), # Flower open state - 'lost_target_time': float, # When target was lost -} -``` - -### ESP32 State - -```cpp -{ - currentPanAngle: int (0-180), - currentTiltAngle: int (0-180), - currentFlowerState: float (0.0-1.0), - currentMotorSpeed: int (-100 to 100), - trackingMode: bool -} -``` - -## Error Handling +## Vision Pipeline -### Python Controller +**File**: `vision_tracker.py` ``` -Try: - Capture frame - Process vision - Calculate PID - Send OSC -Except: - Camera error → Retry or exit - Network error → Log and continue - Keyboard interrupt → Clean shutdown -``` - -### ESP32 - -``` -If OSC parse error: - Log to Serial - Continue (ignore bad packet) - -If servo/motor error: - Log to Serial - Continue (may need hardware reset) -``` - -## Configuration Parameters - -### PID Tuning Parameters - -Located in `config.ini`: - -```ini -[PID_Pan] -kp = 0.15 # Responsiveness -ki = 0.01 # Steady-state error correction -kd = 0.05 # Oscillation damping -output_limit = 30 # Max angle change per update - -[PID_Tilt] -kp = 0.15 -ki = 0.01 -kd = 0.05 -output_limit = 30 +Webcam frame (BGR) + │ + ├──▶ MediaPipe Pose ──▶ pose_openness (0–1) + │ + └──▶ DeepFace.analyze ──▶ emotions dict, age, gender, face region + │ + ├──▶ face_area → distance_estimate + └──▶ k-means colour → dominant_color hex ``` -### Vision Parameters - -```ini -[Tracking] -tracker_type = face # or 'color' +DeepFace runs every 5 frames (configurable) to keep latency low. +MediaPipe runs every frame (lightweight). -[ColorTracking] -lower_hsv = 0, 120, 70 # Red color range -upper_hsv = 10, 255, 255 -``` +## Communication -### Network Parameters +Protocol: **OSC over UDP** (python-osc → arduino-osc library) -```ini -[Network] -esp32_ip = 192.168.1.100 -esp32_port = 8000 -``` +Actual esp32_sylvie commands: -## Scalability and Extensions +| OSC Address | Args | Description | +|-------------|------|-------------| +| `/auto` | `[0\|1]` | Switch auto/manual mode | +| `/motor1` | `[1\|-1\|0]` | DC motor A: open/close/stop | +| `/motor2` | `[1\|-1\|0]` | DC motor B | +| `/led1` | `[r, g, b]` | RGB LED 1 (0–255 each) | +| `/led2` | `[r, g, b]` | RGB LED 2 | +| `/preset` | `[1\|2\|3]` | Scene presets | -### Adding New Trackers +Default ESP32 AP: `192.168.4.1:8888` -Extend `VisionTracker` base class: +## File Structure -```python -class MyTracker(VisionTracker): - def get_tracking_error(self, frame): - # Implement custom tracking - return (x_error, y_error, detected) ``` +python_controller/ +├── main.py # Entry point +├── config.ini # All configuration +├── vision_tracker.py # DeepFace + MediaPipe +├── osc_client.py # Multi-device OSC network +├── persona_engine.py # 3-layer control engine +├── ml_trainer.py # sklearn classifier training +├── control_panel.py # Tkinter GUI +├── pid_controller.py # PID (retained for servo tuning) +└── requirements.txt -### Adding New Behaviors - -Modify `FlowerControlSystem.run()` in `main.py`: - -```python -if detected: - # Custom behavior here - if distance < threshold: - self.flower_openness = 1.0 -``` - -### Multiple Flowers - -Create multiple OSC clients: - -```python -flower1 = FlowerOSCClient("192.168.1.100", 8000) -flower2 = FlowerOSCClient("192.168.1.101", 8000) +esp32_firmware/ +├── eps32_sylvie/ # DC motor + LED firmware (main) +│ ├── esp32_sylvie.ino +│ └── sylvie.ino +└── esp32_sue/ # Servo + ultrasonic firmware + └── servo.ino ``` - -### Advanced Vision - -Replace trackers with ML models: -- MediaPipe for pose detection -- YOLO for object detection -- OpenPose for body tracking - -## Security Considerations - -### Network Security - -- OSC over UDP is unencrypted -- Use on trusted networks only -- Consider VPN for remote access -- Firewall rules to limit access - -### Physical Safety - -- Limit servo speed (output_limits) -- Limit servo range (0-180° clamping) -- Emergency stop mechanism -- Physical limit switches - -## Performance Optimization - -### Python - -1. **Reduce resolution**: 320x240 for faster processing -2. **Skip frames**: Process every Nth frame -3. **Optimize detection**: Reduce search region -4. **Multi-threading**: Separate capture and processing - -### ESP32 - -1. **Servo updates**: Only when angle changes -2. **Motor smoothing**: Ramp speed changes -3. **WiFi optimization**: Static IP (faster than DHCP) -4. **Minimize Serial**: Reduce debug output - -## Testing Strategy - -### Unit Tests - -- `test_pid.py`: PID algorithm correctness -- Individual module tests - -### Integration Tests - -- `test_osc.py`: Network communication -- `test_vision.py`: Camera and tracking - -### System Tests - -- Full system with manual control -- Live tracking performance -- Stress testing (rapid movements) - -## Debugging - -### Enable Verbose Logging - -Python: -```python -import logging -logging.basicConfig(level=logging.DEBUG) -``` - -ESP32: -```cpp -// In loop(), add: -Serial.print("Pan: "); -Serial.println(currentPanAngle); -``` - -### Common Debug Points - -1. **Vision**: Display detected regions -2. **PID**: Log error and output values -3. **Network**: Monitor packet transmission -4. **Servos**: Verify angle commands -5. **Timing**: Measure loop frequencies - -## Performance Metrics - -Track these for optimization: - -- **Frame Rate**: Target 20+ FPS -- **Detection Rate**: Target 90%+ when object visible -- **Response Time**: Target <100ms end-to-end -- **Tracking Accuracy**: Target ±5 pixels error -- **Servo Smoothness**: No visible jitter - -## Future Enhancements - -1. **Machine Learning**: Train custom object detector -2. **Multi-target**: Track multiple objects -3. **Predictive Control**: Anticipate movements -4. **Adaptive PID**: Auto-tune based on performance -5. **Web Interface**: Browser-based control panel -6. **Data Logging**: Record and analyze sessions -7. **Voice Control**: Add speech recognition -8. **Mobile App**: Android/iOS control diff --git a/docs/QUICKSTART.md b/docs/QUICKSTART.md index 7d77b41..cda26dc 100644 --- a/docs/QUICKSTART.md +++ b/docs/QUICKSTART.md @@ -1,226 +1,51 @@ # Quick Start Guide -Get your Vision PID Control System running in 15 minutes! - -## Prerequisites - -- Python 3.7+ installed -- USB webcam or built-in camera -- ESP32 development board with WiFi -- Arduino IDE (for ESP32 firmware) - -## Step 1: Install Python Dependencies (2 minutes) +## 1. Install Python dependencies ```bash cd python_controller pip install -r requirements.txt ``` -This installs: -- `opencv-python` - Computer vision library -- `numpy` - Numerical computing -- `python-osc` - OSC communication - -## Step 2: Setup ESP32 Hardware (5 minutes) - -### Minimal Test Setup - -For initial testing, you only need: -- ESP32 board -- 2× Servo motors -- USB power - -**Connections:** -``` -ESP32 GPIO 18 → Pan Servo Signal (Orange/Yellow wire) -ESP32 GPIO 19 → Tilt Servo Signal (Orange/Yellow wire) -ESP32 GND → Servo Ground (Brown/Black wires) -ESP32 5V → Servo Power (Red wires) -``` - -⚠️ **Important**: For testing only 1-2 servos can be powered from ESP32. For production, use external 5V supply! - -## Step 3: Flash ESP32 Firmware (5 minutes) - -1. **Install Arduino IDE** from https://www.arduino.cc/en/software - -2. **Add ESP32 Board Support:** - - Arduino IDE → Preferences - - Additional Board URLs: `https://dl.espressif.com/dl/package_esp32_index.json` - - Tools → Board → Boards Manager → Search "ESP32" → Install - -3. **Install Libraries:** - - Sketch → Include Library → Manage Libraries - - Search and install: - - `ESP32Servo` by Kevin Harrington - - `OSC` by Adrian Freed (search for "OSC ESP") +## 2. Flash ESP32 firmware -4. **Configure WiFi:** - - Open `esp32_firmware/flower_control.ino` - - Update lines 19-23: - ```cpp - const char* ssid = "YOUR_WIFI_NAME"; - const char* password = "YOUR_WIFI_PASSWORD"; - ``` - - **Alternative (More Secure):** - - Copy `esp32_firmware/wifi_credentials_template.h` to `wifi_config.h` - - Edit `wifi_config.h` with your credentials (this file is .gitignored) - - In `flower_control.ino`, replace lines 19-23 with: `#include "wifi_config.h"` +Open `esp32_firmware/eps32_sylvie/esp32_sylvie.ino` in Arduino IDE and upload to your ESP32 board. -5. **Upload:** - - Connect ESP32 via USB - - Tools → Board → ESP32 Dev Module - - Tools → Port → (Select your ESP32) - - Click Upload ↑ +Default WiFi hotspot: **SSID = `ESP32_Sylvie`**, **Password = `12345678`** -6. **Get IP Address:** - - Open Serial Monitor (Ctrl+Shift+M) - - Set baud rate to 115200 - - Press ESP32 reset button - - Note the IP address shown (e.g., `192.168.1.100`) +## 3. Connect PC to ESP32 hotspot -## Step 4: Test the System (3 minutes) +Connect your laptop to the `ESP32_Sylvie` WiFi network. +The ESP32 will be at `192.168.4.1`. -### Test 1: Hardware Test +## 4. Run the control panel ```bash cd python_controller -python test_osc.py 192.168.1.XXX +python main.py ``` -Replace `192.168.1.XXX` with your ESP32's IP address. +The Tkinter control panel opens. The webcam preview and perception data should update in real time. -You should see servos move through a test sequence. Watch the ESP32 Serial Monitor to confirm it's receiving commands. +## 5. Quick test — manual control -### Test 2: Vision Test - -```bash -python test_pid.py -``` - -This tests the PID controller (no hardware needed). - -### Test 3: Manual Control - -```bash -python example_manual_control.py 192.168.1.XXX -``` - -Control servos manually with keyboard commands: -- Type `left`, `right`, `up`, `down` to move servos -- Type `r` to reset -- Type `q` to quit - -## Step 5: Run Full System - -### Face Tracking - -```bash -python main.py --tracker face --ip 192.168.1.XXX -``` +1. In the **Manual Control** section, select `sylvie` from the Device dropdown. +2. Drag **Motor 1** slider to 1.0 and click **Send Manual** — the flower should open. +3. Set Motor 1 to -1.0 and send — the flower should close. -- Press **SPACE** to enable tracking -- Face the camera - servos should follow you! -- Press **O** to open flower, **C** to close -- Press **Q** to quit +## 6. Record training samples & train ML -### Color Tracking +1. Stand in front of the camera in different poses/expressions. +2. Select a **Persona Label** (e.g. `Empathy`) and click **⏺ Record Sample**. +3. Record at least 10 samples across different personas. +4. Click **🏋 Train Model**. +5. Click **▶ Auto Mode: ON** — the flowers now respond automatically. -```bash -python main.py --tracker color --ip 192.168.1.XXX -``` - -- Hold a **red object** in front of camera -- Press **SPACE** to enable tracking -- System tracks the colored object - -## Troubleshooting Quick Fixes - -### "Failed to open camera" -```bash -# Try different camera ID -python main.py --camera 1 --ip 192.168.1.XXX -``` - -### "OSC commands not working" -1. Check ESP32 Serial Monitor - is it connected to WiFi? -2. Ping ESP32: `ping 192.168.1.XXX` -3. Computer and ESP32 on same WiFi network? - -### "No face detected" -- Ensure good lighting -- Face should be 0.5-2 meters from camera -- Face camera directly (not at angle) - -### "Servos jittering" -Edit `config.ini`: -```ini -[PID_Pan] -kp = 0.1 # Reduce from 0.15 -kd = 0.08 # Increase from 0.05 -``` - -## Configuration - -Edit `python_controller/config.ini` to customize: - -```ini -[Network] -esp32_ip = 192.168.1.XXX # Your ESP32 IP - -[PID_Pan] -kp = 0.15 # Proportional gain (speed) -ki = 0.01 # Integral gain (steady-state) -kd = 0.05 # Derivative gain (smoothing) -``` - -## Next Steps - -1. **Add the Motor**: Follow [WIRING.md](WIRING.md) to connect DC motor for flower mechanism -2. **Tune PID**: Follow [TUNING.md](TUNING.md) for optimal tracking performance -3. **Build Mechanism**: Design and 3D-print your flower mechanism -4. **Customize**: Modify tracking behavior in `main.py` - -## Safety Notes - -⚠️ Start with low PID gains to prevent sudden movements -⚠️ Use proper power supply for production use (not ESP32 5V pin) -⚠️ Keep fingers away from moving servos and motors - -## Getting Help - -**Check the logs:** -- Python: Terminal output -- ESP32: Serial Monitor (115200 baud) - -**Common Issues:** -- See [docs/README.md](README.md) for detailed troubleshooting -- Check wiring matches [docs/WIRING.md](WIRING.md) -- Verify Python dependencies installed - -## Success Indicators - -You know it's working when: -- ✓ ESP32 Serial Monitor shows "WiFi connected!" -- ✓ Servos center on startup (90°) -- ✓ Camera window opens showing video feed -- ✓ Face/object detection draws green box -- ✓ Servos smoothly follow tracked target -- ✓ OSC messages appear in Serial Monitor - -## Full System Diagram - -``` -┌──────────┐ ┌──────────┐ -│ Computer │ │ ESP32 │ -│ + Camera │◄──WiFi─→│ + Servos │ -└──────────┘ └──────────┘ - │ │ - │ 1. Detects face │ - │ 2. Calculates PID │ - │ 3. Sends OSC ──────→│ 4. Moves servos - │ │ 5. Controls flower -``` +## Troubleshooting -Congratulations! Your Vision PID Control System is now running! 🎉 +| Symptom | Fix | +|---------|-----| +| No camera preview | Check `camera_id` in `config.ini` | +| Motor does not move | Verify PC is on `ESP32_Sylvie` WiFi; check IP 192.168.4.1 | +| DeepFace errors | Install: `pip install deepface` or set `emotion_backend = haar` in config.ini | +| MediaPipe errors | Install: `pip install mediapipe` or set `enable_pose = false` | diff --git a/docs/README.md b/docs/README.md index 30fea84..55461e3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,340 +1,19 @@ -# Vision PID Control System for ESP32 Flower +# Digital Bloom — Documentation -A real-time vision tracking system that uses OpenCV to detect faces or colored objects, applies PID control to calculate corrections, and sends commands to an ESP32-controlled 3D-printed flower via OSC over UDP. +Interactive flower sculpture installation controlled by emotion recognition and pose estimation. -## Features +## Docs Index -- **Vision Tracking**: Face detection (Haar Cascade) or color tracking (HSV) -- **PID Control**: Smooth servo movements with tunable PID parameters -- **OSC Communication**: Real-time UDP commands to ESP32 -- **Interactive Control**: Keyboard controls for manual operation -- **Flower Behavior**: Opens when target detected, closes when target lost +| File | Description | +|------|-------------| +| [QUICKSTART.md](QUICKSTART.md) | Get running in 5 minutes | +| [ARCHITECTURE.md](ARCHITECTURE.md) | System design & data flow | +| [WIRING.md](WIRING.md) | ESP32 hardware connections | +| [TUNING.md](TUNING.md) | ML training & persona tuning | -## System Architecture +## Hardware Overview -``` -┌─────────────────────┐ -│ Camera Input │ -│ (OpenCV) │ -└──────────┬──────────┘ - │ - v -┌─────────────────────┐ -│ Vision Tracker │ -│ (Face/Color) │ -└──────────┬──────────┘ - │ Error (x, y) - v -┌─────────────────────┐ -│ PID Controller │ -│ (Pan & Tilt) │ -└──────────┬──────────┘ - │ Corrections - v -┌─────────────────────┐ -│ OSC Client │ -│ (UDP) │ -└──────────┬──────────┘ - │ Commands - v -┌─────────────────────┐ -│ ESP32 │ -│ (WiFi/OSC) │ -└──────────┬──────────┘ - │ - v -┌─────────────────────┐ -│ Servos & Motors │ -│ (Flower Control) │ -└─────────────────────┘ -``` +Two types of physical flower sculptures: -## Hardware Requirements - -### Computer Side -- Computer with USB webcam or built-in camera -- Python 3.7 or higher -- WiFi connection to same network as ESP32 - -### ESP32 Side -- ESP32 development board -- 2x Servo motors (for pan/tilt) -- 1x DC motor with motor driver (L298N or similar) -- Power supply (5V for servos, appropriate voltage for motor) -- 3D-printed flower mechanism - -## Installation - -### Python Controller - -1. Navigate to the `python_controller` directory: -```bash -cd python_controller -``` - -2. Install required packages: -```bash -pip install -r requirements.txt -``` - -### ESP32 Firmware - -1. Install Arduino IDE or PlatformIO -2. Install required libraries: - - ESP32Servo - - OSC (for ESP32) - - WiFi (included with ESP32 board) - -3. Open `esp32_firmware/flower_control.ino` -4. Update WiFi credentials: - ```cpp - const char* ssid = "YOUR_WIFI_SSID"; - const char* password = "YOUR_WIFI_PASSWORD"; - ``` - -5. Connect hardware according to wiring diagram (see below) -6. Upload to ESP32 -7. Note the IP address shown in Serial Monitor - -## Hardware Wiring - -### ESP32 Pin Connections - -``` -ESP32 GPIO 18 ──→ Pan Servo Signal -ESP32 GPIO 19 ──→ Tilt Servo Signal -ESP32 GPIO 25 ──→ Motor Driver IN1 -ESP32 GPIO 26 ──→ Motor Driver IN2 -ESP32 GPIO 27 ──→ Motor Driver ENABLE (PWM) - -ESP32 GND ──→ Common Ground -ESP32 5V ──→ Servo Power (if using ESP32 power) -``` - -### Motor Driver (L298N Example) - -``` -IN1 ──→ GPIO 25 -IN2 ──→ GPIO 26 -ENA ──→ GPIO 27 (PWM) -OUT1, OUT2 ──→ DC Motor -12V+ ──→ Motor power supply -GND ──→ Common ground -``` - -### Servos - -``` -Pan Servo: - - Signal ──→ GPIO 18 - - VCC ──→ 5V - - GND ──→ GND - -Tilt Servo: - - Signal ──→ GPIO 19 - - VCC ──→ 5V - - GND ──→ GND -``` - -## Usage - -### Basic Usage - -1. Ensure ESP32 is powered on and connected to WiFi -2. Update `config.ini` with your ESP32's IP address -3. Run the controller: - -```bash -python main.py --tracker face --ip 192.168.1.100 -``` - -### Command Line Options - -```bash -python main.py [OPTIONS] - -Options: - --tracker {face,color} Tracking mode (default: face) - --ip IP_ADDRESS ESP32 IP address (default: 192.168.1.100) - --port PORT ESP32 OSC port (default: 8000) - --camera CAMERA_ID Camera device ID (default: 0) -``` - -### Keyboard Controls - -| Key | Action | -|-----|--------| -| SPACE | Toggle tracking on/off | -| 'o' | Open flower manually | -| 'c' | Close flower manually | -| 'r' | Reset servos to center position | -| 'q' | Quit application | - -### Examples - -**Face tracking:** -```bash -python main.py --tracker face --ip 192.168.1.100 -``` - -**Color tracking (red object):** -```bash -python main.py --tracker color --ip 192.168.1.100 -``` - -## Configuration - -Edit `config.ini` to customize: - -- **Network**: ESP32 IP and port -- **Camera**: Resolution and device ID -- **PID Parameters**: Tune for your specific hardware -- **Color Tracking**: Adjust HSV range for different colors -- **Behavior**: Flower opening/closing speed and timing - -### PID Tuning - -The PID controller uses three parameters: -- **Kp (Proportional)**: Immediate response to error (default: 0.15) -- **Ki (Integral)**: Eliminates steady-state error (default: 0.01) -- **Kd (Derivative)**: Dampens oscillations (default: 0.05) - -**Tuning tips:** -1. Start with Kp only, adjust until system responds -2. Add Ki to eliminate steady-state offset -3. Add Kd to reduce oscillations -4. Adjust `output_limits` to control max servo speed - -### Color Tracking HSV Ranges - -Common colors in HSV: -- **Red**: (0, 120, 70) to (10, 255, 255) -- **Blue**: (100, 150, 0) to (140, 255, 255) -- **Green**: (40, 40, 40) to (80, 255, 255) -- **Yellow**: (20, 100, 100) to (30, 255, 255) - -Use a color picker tool to find custom HSV ranges. - -## OSC Message Protocol - -The system sends the following OSC messages: - -| Address | Arguments | Description | -|---------|-----------|-------------| -| `/flower/servo` | [pan_angle, tilt_angle] | Set servo positions (0-180°) | -| `/flower/state` | [openness] | Set flower openness (0.0-1.0) | -| `/flower/motor` | [speed] | Set motor speed (-100 to 100) | -| `/flower/mode` | [mode] | Set tracking mode (0=idle, 1=tracking) | - -## Troubleshooting - -### Camera not working -- Check camera permissions -- Try different camera ID: `--camera 1` -- Verify camera with: `python -c "import cv2; print(cv2.VideoCapture(0).isOpened())"` - -### Face detection not working -- Ensure good lighting -- Face should be front-facing -- Try adjusting distance from camera -- Check that Haar Cascade file is installed - -### Color tracking not working -- Adjust HSV range in `config.ini` -- Use bright, solid-colored objects -- Ensure good lighting conditions -- Object should be larger than 500 pixels - -### ESP32 not receiving commands -- Verify ESP32 IP address -- Check that computer and ESP32 are on same network -- Check firewall settings (allow UDP on port 8000) -- Monitor ESP32 Serial output for connection status - -### Servos jittering -- Reduce PID gains (especially Kp and Kd) -- Increase `sample_time` in PID controller -- Check servo power supply -- Add capacitor across servo power lines - -### Motor not responding -- Check motor driver connections -- Verify power supply voltage -- Test motor driver with simple Arduino sketch -- Check motor driver enable pin (GPIO 27) - -## Project Structure - -``` -DATT3700/ -├── python_controller/ -│ ├── main.py # Main application -│ ├── pid_controller.py # PID control algorithm -│ ├── vision_tracker.py # Face and color tracking -│ ├── osc_client.py # OSC communication -│ ├── requirements.txt # Python dependencies -│ └── config.ini # Configuration file -├── esp32_firmware/ -│ └── flower_control.ino # ESP32 Arduino sketch -└── docs/ - ├── README.md # This file - ├── WIRING.md # Wiring diagrams - └── TUNING.md # PID tuning guide -``` - -## Development - -### Adding Custom Trackers - -Create a new tracker class in `vision_tracker.py`: - -```python -class MyTracker(VisionTracker): - def __init__(self, frame_width, frame_height): - super().__init__(frame_width, frame_height) - # Initialize your tracker - - def get_tracking_error(self, frame): - # Implement your tracking logic - x_error = 0 - y_error = 0 - detected = False - return (x_error, y_error, detected) -``` - -### Modifying Flower Behavior - -Edit the `run()` method in `main.py` to customize: -- Opening/closing logic -- Response to target detection/loss -- Additional OSC commands - -## Performance - -- **Tracking Rate**: ~30 FPS (depends on camera and processing) -- **PID Update Rate**: ~33 Hz (30ms sample time) -- **OSC Latency**: <10ms on local network -- **Servo Response**: Depends on PID tuning - -## Safety Notes - -- Start with low PID gains to prevent violent movements -- Ensure adequate power supply for motors and servos -- Add limit switches if flower mechanism can jam -- Keep fingers clear of moving parts during testing -- Use appropriate current limiting for motors - -## License - -See LICENSE file in repository root. - -## Credits - -Built for DATT3700 course project using: -- OpenCV for computer vision -- python-osc for OSC protocol -- ESP32Servo library for servo control - -## Contributing - -This is a course project. For educational use and reference. +- **Sylvie** (`esp32_sylvie`): DC-motor driven petals + RGB LED, WiFi AP mode +- **Sue** (`esp32_sue`): Servo-driven petals + ultrasonic proximity sensor diff --git a/docs/TUNING.md b/docs/TUNING.md index 4a3f76f..324a7d8 100644 --- a/docs/TUNING.md +++ b/docs/TUNING.md @@ -1,306 +1,104 @@ -# PID Tuning Guide +# Tuning Guide -## Understanding PID Control +## ML Model Tuning -A PID controller combines three control strategies: +### Recording Training Data -### Proportional (Kp) -- Responds proportionally to the current error -- Higher Kp = faster response but can cause overshoot -- Formula: `P = Kp × error` +1. Open the control panel: `python main.py` +2. Stand in front of the webcam and deliberately express each emotion. +3. Select the matching **Persona Label** in the panel and click **⏺ Record Sample**. +4. Aim for **≥15 samples per persona** (at least 6 personas = 90+ total). +5. Include variety: different distances, lighting, partial faces. -### Integral (Ki) -- Accumulates error over time -- Eliminates steady-state error -- Too high Ki can cause oscillations -- Formula: `I = Ki × ∫(error)dt` +### Training -### Derivative (Kd) -- Responds to rate of error change -- Dampens oscillations and overshoot -- Sensitive to noise -- Formula: `D = Kd × d(error)/dt` +Click **🏋 Train Model**. The panel shows cross-validation accuracy. -### Total Output -``` -output = Kp×error + Ki×∫(error)dt + Kd×d(error)/dt -``` - -## Tuning Process - -### Step 1: Start with P-Only Control - -1. Set Ki = 0, Kd = 0 -2. Set Kp to a small value (e.g., 0.1) -3. Gradually increase Kp until system responds -4. Continue increasing until slight overshoot occurs -5. Reduce Kp by 20-30% - -**Result**: System responds but may have steady-state error - -### Step 2: Add Integral Control - -1. Keep Kp from Step 1 -2. Set Ki to a small value (e.g., 0.001) -3. Gradually increase Ki until steady-state error disappears -4. If oscillations occur, reduce Ki - -**Result**: No steady-state error, but may oscillate - -### Step 3: Add Derivative Control - -1. Keep Kp and Ki from Steps 1-2 -2. Set Kd to a small value (e.g., 0.01) -3. Gradually increase Kd to dampen oscillations -4. If system becomes sluggish, reduce Kd - -**Result**: Fast response, no steady-state error, minimal oscillation - -## Ziegler-Nichols Method - -If the above doesn't work, try Ziegler-Nichols tuning: - -1. Set Ki = 0, Kd = 0 -2. Increase Kp until system oscillates continuously -3. Note the ultimate gain (Ku) and oscillation period (Tu) -4. Apply formulas: - - Kp = 0.6 × Ku - - Ki = 2 × Kp / Tu - - Kd = Kp × Tu / 8 - -## Application-Specific Tuning - -### For the Flower Tracker: - -**Pan Control (Horizontal)** -- Start: Kp=0.1, Ki=0.005, Kd=0.03 -- Goal: Smooth horizontal tracking without jerky movements - -**Tilt Control (Vertical)** -- Start: Kp=0.1, Ki=0.005, Kd=0.03 -- Goal: Stable vertical positioning with minimal bounce - -### Tuning Tips for This Project: - -1. **Kp (0.1-0.3)**: - - Too low: Slow to respond, lags behind target - - Too high: Jerky movements, overshoots target - - Optimal: Smooth following with slight lag - -2. **Ki (0.001-0.05)**: - - Too low: Doesn't center on stationary target - - Too high: Oscillates around target - - Optimal: Slowly centers without oscillation - -3. **Kd (0.01-0.1)**: - - Too low: Overshoots and bounces - - Too high: Sluggish, resistant to quick movements - - Optimal: Smooth deceleration as it approaches target - -## Testing Procedure - -### Setup -```bash -python main.py --tracker face --ip YOUR_ESP32_IP -``` - -### Test 1: Step Response -1. Stand in front of camera at center -2. Press SPACE to enable tracking -3. Quickly move left/right by one body width -4. Observe servo response +Target accuracy ≥ 75%. If lower: +- Add more samples +- Reduce number of persona classes +- Switch to SVM in `config.ini`: `[ML] classifier = svm` -**Good Response:** -- Smooth movement toward new position -- Slight overshoot (<10°) -- Settles within 1-2 seconds +### Retraining -**Bad Response:** -- Excessive overshoot (>20°) -- Oscillates back and forth -- Very slow to reach position +Training data is saved to `training_data.json` automatically. +Model is saved to `ml_model.pkl`. -### Test 2: Continuous Tracking -1. Enable tracking -2. Slowly move left to right -3. Observe smoothness +To start fresh: delete both files, then re-record. -**Good Response:** -- Smooth, continuous following -- No jerky movements -- Minimal lag +--- -**Bad Response:** -- Choppy, stepwise movement -- Large lag behind movement -- Random oscillations +## Persona Parameters -### Test 3: Stationary Target -1. Enable tracking -2. Remain still at offset position (not centered) -3. Wait 5 seconds - -**Good Response:** -- Gradually moves to center on face -- Settles without oscillation -- Final error < 5 pixels - -**Bad Response:** -- Doesn't center (needs more Ki) -- Oscillates around target (too much Ki) -- Overshoots repeatedly - -## Configuration File Tuning - -Edit `config.ini`: - -```ini -[PID_Pan] -kp = 0.15 # Adjust this first -ki = 0.01 # Then this -kd = 0.05 # Finally this -output_limit = 30 - -[PID_Tilt] -kp = 0.15 -ki = 0.01 -kd = 0.05 -output_limit = 30 -``` - -## Common Issues and Solutions - -### Issue: System oscillates -**Solution**: Reduce Kp, increase Kd - -### Issue: Slow to respond -**Solution**: Increase Kp, reduce Kd - -### Issue: Doesn't center on target -**Solution**: Increase Ki - -### Issue: Overshoots target -**Solution**: Reduce Kp, increase Kd - -### Issue: Jerky movements -**Solution**: Reduce Kp, reduce output_limit - -### Issue: Drifts when stationary -**Solution**: Increase Ki (but watch for oscillation) - -## Advanced: Dynamic Tuning - -For different conditions, you may want different PID values: +Edit `PERSONA_PARAMS` in `persona_engine.py` to tune each persona's physical expression: ```python -# In main.py, add mode switching: -if detected and distance_to_target < 50: - # Close to target: gentle control - self.pid_pan.set_tunings(kp=0.1, ki=0.005, kd=0.08) -else: - # Far from target: aggressive control - self.pid_pan.set_tunings(kp=0.2, ki=0.02, kd=0.03) +PERSONA_PARAMS = { + 'Empathy': { + 'openness': 1.0, # 0=closed, 1=fully open + 'jitter': 0.0, # 0=smooth, 1=chaotic + 'speed': 0.4, # motion speed multiplier + 'led_hue': 120, # hue 0-360 (green=120) + 'led_sat': 0.8, # saturation 0-1 + 'led_bri': 0.8, # brightness 0-1 + }, + ... +} ``` -## Performance Metrics +--- -Track these metrics during tuning: +## EMA Smoothing -1. **Rise Time**: Time to reach 90% of target - - Target: < 1 second +Set `ema_alpha` in `config.ini` `[Personas]`: +- `0.1` = very slow/sluggish response +- `0.5` = medium +- `0.9` = fast/snappy (may look jerky) -2. **Overshoot**: Maximum deviation beyond target - - Target: < 10% of error distance +Default: `0.3` -3. **Settling Time**: Time to stay within 5% of target - - Target: < 2 seconds +--- -4. **Steady-State Error**: Final error when stationary - - Target: < 5 pixels +## Jealousy Network -## Logging for Analysis +`jealousy_trigger_seconds` (default 5.0): seconds of continuous `Empathy` on the primary flower before siblings become `Jealous`. -Add this to `main.py` for detailed tuning analysis: +Override duration is hardcoded at 8 seconds (`persona_engine.py` line `override_until = time.time() + 8.0`). -```python -import csv -import time - -# In FlowerControlSystem.__init__: -self.log_file = open('pid_log.csv', 'w', newline='') -self.log_writer = csv.writer(self.log_file) -self.log_writer.writerow(['time', 'x_error', 'y_error', - 'pan_output', 'tilt_output', - 'pan_angle', 'tilt_angle']) - -# In run() loop: -if self.tracking_active and detected: - self.log_writer.writerow([ - time.time(), - x_error, y_error, - pan_correction, tilt_correction, - self.pan_angle, self.tilt_angle - ]) -``` +--- -Analyze with: -```python -import pandas as pd -import matplotlib.pyplot as plt - -df = pd.read_csv('pid_log.csv') -plt.plot(df['time'], df['x_error']) -plt.plot(df['time'], df['pan_angle']) -plt.legend(['Error', 'Servo Angle']) -plt.show() -``` +## Vision Backend -## Hardware Considerations +### DeepFace (recommended) +`[Vision] emotion_backend = deepface` -### Servo Response Time -- Cheap servos: slower response, increase Kd -- Quality servos: faster response, reduce Kd +More accurate but ~200 ms per analysis. Runs every 5 frames. -### Power Supply -- Weak power: servos lag, reduce all gains -- Strong power: risk of damage, use output limits +### Haar Cascade (fast fallback) +`[Vision] emotion_backend = haar` -### Mechanical Load -- Heavy flower: increase Kp, reduce Kd -- Light flower: decrease Kp, increase Kd +Only detects face presence, no emotion. Useful for testing hardware without GPU. -## Safety Limits +### Pose Estimation +`[Vision] enable_pose = true/false` -Always keep these limits in place: +Requires MediaPipe. Adds wrist/shoulder spread as `pose_openness` feature. -```python -# In pid_controller.py -output_limits=(-30, 30) # Max 30° change per update - -# In main.py -self.pan_angle = max(0, min(180, self.pan_angle)) -self.tilt_angle = max(0, min(180, self.tilt_angle)) -``` +--- -This prevents: -- Violent servo movements -- Exceeding mechanical limits -- Damaging the flower mechanism +## Multiple Devices -## Final Checklist +Add devices to `config.ini`: -- [ ] Kp provides adequate response speed -- [ ] Ki eliminates steady-state error without oscillation -- [ ] Kd dampens overshoot without making system sluggish -- [ ] System tracks smoothly during slow movements -- [ ] System responds quickly to sudden changes -- [ ] No excessive oscillation or jitter -- [ ] Servos don't make unusual noises -- [ ] Power supply remains stable during operation -- [ ] Tracking works in various lighting conditions - -## References +```ini +[Devices] +device_list = sylvie,sue2 + +[Device_sue2] +ip = 192.168.4.3 +port = 8888 +type = dc_motor +description = Second flower unit +``` -- [PID Control Theory](https://en.wikipedia.org/wiki/PID_controller) -- [Ziegler-Nichols Method](https://en.wikipedia.org/wiki/Ziegler%E2%80%93Nichols_method) -- [Control Systems Engineering](https://www.mathworks.com/help/control/) +Each additional device joins the jealousy network automatically. diff --git a/docs/WIRING.md b/docs/WIRING.md index cfb5d97..11c03a0 100644 --- a/docs/WIRING.md +++ b/docs/WIRING.md @@ -1,350 +1,63 @@ -# Wiring Diagrams - -## Complete System Wiring - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Power Supply │ -│ │ -│ ┌────────┐ ┌──────────┐ ┌──────────┐ │ -│ │ 5V 2A │────────→│ Servos │ │ ESP32 │ │ -│ │ Supply │ │ (2x) │ │ │ │ -│ └────────┘ └──────────┘ └──────────┘ │ -│ │ -│ ┌────────┐ ┌──────────┐ │ -│ │ 12V │────────→│ Motor │ │ -│ │ Supply │ │ Driver │ │ -│ └────────┘ └──────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────┘ -``` - -## Detailed ESP32 Connections - -``` - ESP32 Development Board - ┌───────────────────┐ - │ │ - Pan Servo ────│ GPIO 18 │ - Tilt Servo ────│ GPIO 19 │ - │ │ - Motor IN1 ────│ GPIO 25 │ - Motor IN2 ────│ GPIO 26 │ - Motor ENA ────│ GPIO 27 (PWM) │ - │ │ - Common GND ────│ GND │ - 5V Servo ────│ 5V (optional) │ - │ │ - └───────────────────┘ -``` - -## Servo Wiring - -### Pan Servo (Horizontal Rotation) -``` -Pan Servo ESP32 -┌─────────┐ ┌──────┐ -│ Red │─────────────────→│ 5V │ -│ Brown │─────────────────→│ GND │ -│ Orange │─────────────────→│ IO18 │ -└─────────┘ └──────┘ -``` - -### Tilt Servo (Vertical Rotation) -``` -Tilt Servo ESP32 -┌─────────┐ ┌──────┐ -│ Red │─────────────────→│ 5V │ -│ Brown │─────────────────→│ GND │ -│ Orange │─────────────────→│ IO19 │ -└─────────┘ └──────┘ -``` - -**Note**: Servo color codes may vary: -- Red/Orange = Power (5V) -- Brown/Black = Ground -- Yellow/White/Orange = Signal - -## Motor Driver Wiring (L298N Example) - -``` - L298N Motor Driver - ┌──────────────────────┐ - │ │ -ESP32 │ IN1 ←──────────────│ GPIO 25 - │ IN2 ←──────────────│ GPIO 26 - │ ENA ←──────────────│ GPIO 27 (PWM) - │ │ - │ OUT1 ──────────────│ Motor + -Motor │ OUT2 ──────────────│ Motor - - │ │ - │ +12V ←─────────────│ 12V Supply - │ GND ────────────── │ Common GND - │ │ - │ +5V ──────────────→│ (Optional: Power ESP32) - │ │ - └──────────────────────┘ - -Notes: -- Remove ENA jumper if using PWM speed control -- 5V output can power ESP32 (but not through USB simultaneously) -- Use common ground for all components -``` - -## Alternative Motor Driver (TB6612FNG) - -Smaller, more efficient for low-power motors: - -``` - TB6612FNG Motor Driver - ┌──────────────────────┐ - │ │ -ESP32 │ AIN1 ←─────────────│ GPIO 25 - │ AIN2 ←─────────────│ GPIO 26 - │ PWMA ←─────────────│ GPIO 27 - │ │ - │ AO1 ───────────────│ Motor + -Motor │ AO2 ───────────────│ Motor - - │ │ - │ VM ←───────────────│ Motor Supply (12V) - │ VCC ←──────────────│ Logic Supply (3.3V from ESP32) - │ GND ───────────────│ Common GND - │ STBY ──────────────│ 3.3V (standby disable) - │ │ - └──────────────────────┘ -``` - -## Power Supply Recommendations - -### Option 1: Separate Supplies -- **ESP32**: USB power (5V 500mA) -- **Servos**: 5V 2A wall adapter -- **Motor**: 12V battery or wall adapter - -**Advantages**: Isolated, no noise interference -**Disadvantages**: Multiple power sources needed - -### Option 2: Single Supply with Regulator -- **Main Supply**: 12V 3A -- **Buck Converter**: 12V → 5V (for servos and ESP32) - -``` -12V Supply - │ - ├─→ Motor Driver (12V) - │ - └─→ Buck Converter (12V→5V) - │ - ├─→ Servos (5V) - └─→ ESP32 (5V via VIN pin) -``` - -### Option 3: Battery Powered -- **Battery**: 3S LiPo (11.1V) or 3x 18650 cells -- **BEC**: For 5V servo/ESP32 power - -## Complete Breadboard Layout - -``` - ┌────────────────────────────────────────┐ - │ Breadboard │ - │ │ - ESP32 │ ┌──────┐ │ - ─────→│──│ IO18 │──────→ Pan Servo Signal │ - ─────→│──│ IO19 │──────→ Tilt Servo Signal │ - ─────→│──│ IO25 │──────→ Motor Driver IN1 │ - ─────→│──│ IO26 │──────→ Motor Driver IN2 │ - ─────→│──│ IO27 │──────→ Motor Driver ENA │ - │ │ │ │ - ─────→│──│ GND │──────→ Common Ground Rail │ - ─────→│──│ 5V │──────→ 5V Power Rail │ - │ └──────┘ │ - │ │ - │ Ground Rail ─────────────────────────│ - │ │ │ │ │ │ - │ │ │ │ └──→ Servo 1 GND │ - │ │ │ └──────→ Servo 2 GND │ - │ │ └──────────→ Motor Driver GND │ - │ └─────────────→ Power Supply GND │ - │ │ - │ 5V Rail ─────────────────────────────│ - │ │ │ │ - │ │ └──────────→ Servo 1 VCC │ - │ └─────────────→ Servo 2 VCC │ - │ │ - └────────────────────────────────────────┘ -``` - -## PCB Design (Advanced) - -For a permanent installation, consider a custom PCB: - -### Features: -- ESP32 module socket -- 2x Servo headers (3-pin) -- Motor driver integrated or socket -- Power input terminals -- LED status indicators -- Voltage regulators - -### Layout: -``` -┌─────────────────────────────────────┐ -│ [12V IN] [GND] [ESP32]│ -│ │ -│ [5V REG] [Status LEDs] │ -│ │ -│ [SERVO 1] [SERVO 2] │ -│ │ │ │ │ │ │ │ -│ │ -│ [MOTOR DRIVER] [Motor Out] │ -│ IN1 IN2 ENA + - │ -│ │ -└─────────────────────────────────────┘ -``` - -## Mechanical Assembly - -### Servo Mounting for Pan/Tilt - -``` - Pan Servo (Base) - │ - │ Servo Horn - │ - ▼ - ┌────────┐ - │ Tilt │◄── Tilt Servo - │ Bracket│ - └────────┘ - │ - │ - ▼ - [Camera or Flower] -``` - -### Flower Mechanism - -``` - Motor with Gear - │ - │ Drive Belt/Gear - │ - ▼ - ┌──────────┐ - │ Flower │ - │ Petals │◄── Linked to close/open - │ │ - └──────────┘ - │ - │ Mounted on - │ - [Pan/Tilt Servos] -``` - -## Cable Management - -### Recommended Wire Gauges: -- **Servo Signal**: 22-24 AWG -- **Servo Power**: 18-20 AWG -- **Motor Power**: 16-18 AWG -- **Logic Signals**: 22-24 AWG - -### Cable Lengths: -- Keep servo cables < 30cm for clean signals -- Keep motor power cables as short as possible -- Use twisted pairs for motor power to reduce EMI -- Separate power and signal cables when possible - -## Common Wiring Mistakes to Avoid - -1. ❌ **Sharing servo power with ESP32 5V pin** - - ESP32 can't supply enough current for servos - - Use external 5V supply for servos - -2. ❌ **No common ground** - - All components must share a common ground - - ESP32, motor driver, servos, power supplies - -3. ❌ **Motor driver logic voltage mismatch** - - Some drivers need 5V logic, ESP32 is 3.3V - - Use level shifters or compatible driver - -4. ❌ **Reversed motor polarity** - - Check motor direction, swap wires if needed - - Or invert in software - -5. ❌ **No decoupling capacitors** - - Add 100µF capacitor across motor terminals - - Add 0.1µF capacitors near ESP32 power pins - -## Testing Checklist - -- [ ] All grounds connected together -- [ ] ESP32 powered and programming properly -- [ ] Servos centered at 90° on power-up -- [ ] Motor can turn both directions -- [ ] No unusual heat from any component -- [ ] WiFi connects successfully -- [ ] OSC commands received (check Serial Monitor) -- [ ] Servos respond to OSC commands -- [ ] Motor responds to OSC commands -- [ ] No interference between motor and servos - -## Safety Features (Recommended) - -``` - ┌─────────┐ - │ Fuse │──→ From 12V Supply - └─────────┘ - - ┌─────────┐ - │ E-Stop │──→ Motor Driver Enable - └─────────┘ - - ┌─────────┐ - │ Limit │──→ GPIO Input (stop on trigger) - │ Switch │ - └─────────┘ -``` - -## Troubleshooting Wiring Issues - -| Symptom | Possible Cause | Solution | -|---------|----------------|----------| -| Servo jitters | Insufficient power | Use dedicated 5V 2A supply | -| Motor doesn't spin | Wrong wiring | Check IN1/IN2 connections | -| ESP32 resets | Voltage drop | Add capacitors, separate supplies | -| No WiFi | Poor power | Use quality USB cable/supply | -| Erratic behavior | EMI from motor | Add capacitors, separate cables | - -## Datasheets and Resources - -### ESP32 -- [ESP32 Pinout](https://randomnerdtutorials.com/esp32-pinout-reference-gpios/) -- [ESP32 Datasheet](https://www.espressif.com/sites/default/files/documentation/esp32_datasheet_en.pdf) - -### Motor Drivers -- [L298N Datasheet](https://www.st.com/resource/en/datasheet/l298.pdf) -- [TB6612FNG Datasheet](https://www.sparkfun.com/datasheets/Robotics/TB6612FNG.pdf) - -### Servos -- [Servo Motor Basics](https://www.electronics-tutorials.ws/io/io_5.html) -- Standard hobby servos: 4.8-6V, 1-2A peak per servo - -## Bill of Materials (BOM) - -| Component | Quantity | Notes | -|-----------|----------|-------| -| ESP32 Dev Board | 1 | Any ESP32 with WiFi | -| Servo Motor (SG90) | 2 | Or similar 9g servo | -| DC Motor | 1 | 6-12V, appropriate for flower | -| Motor Driver | 1 | L298N or TB6612FNG | -| 5V 2A Power Supply | 1 | For servos | -| 12V Power Supply | 1 | For motor (if needed) | -| Jumper Wires | ~20 | Male-to-male and male-to-female | -| Breadboard | 1 | Full-size recommended | -| Capacitor 100µF | 1 | For motor noise suppression | -| Capacitor 0.1µF | 3 | For decoupling | - -**Total estimated cost**: $30-50 USD +# Hardware Wiring Guide + +## esp32_sylvie — DC Motor Flower + +### Components +- 1× ESP32 development board +- 2× DC motors (flower open/close mechanism) +- 2× RGB LEDs (status & ambient lighting) +- 1× H-bridge motor driver (e.g. L298N or similar) + +### Pin Assignments + +| Signal | GPIO | Notes | +|--------|------|-------| +| Motor A (+) | 25 | M1_A | +| Motor A (−) | 26 | M1_B | +| Motor B (+) | 18 | M2_A | +| Motor B (−) | 19 | M2_B | +| LED1 Red | 2 | 220Ω resistor in series | +| LED1 Green | 4 | 220Ω resistor in series | +| LED1 Blue | 5 | 220Ω resistor in series | +| LED2 Red | 12 | 220Ω resistor in series | +| LED2 Green | 13 | 220Ω resistor in series | +| LED2 Blue | 14 | 220Ω resistor in series | + +### WiFi / OSC +- **Mode**: Access Point (AP) +- **SSID**: `ESP32_Sylvie` +- **Password**: `12345678` +- **IP**: `192.168.4.1` +- **OSC Port**: `8888` + +--- + +## esp32_sue — Servo Flower + +### Components +- 1× ESP32 development board +- 1× Servo motor (petal mechanism, 60°–120° range) +- 1× HC-SR04 ultrasonic distance sensor + +### Pin Assignments + +| Signal | GPIO | Notes | +|--------|------|-------| +| Ultrasonic TRIG | 27 | | +| Ultrasonic ECHO | 33 | | +| Servo PWM | 14 | 50Hz, 500–2400µs pulse | + +### Physical Limits +- Closed angle: **60°** +- Open angle: **120°** +- Opens when distance ≤ 20 cm +- Closes when distance ≥ 40 cm + +--- + +## Power Supply Notes + +- ESP32: 3.3 V logic, 5 V USB or external +- DC motors: require separate 5–12 V supply via H-bridge +- Servo: 5 V (powered directly from ESP32 5V pin for low-torque servos) +- LEDs: connect 220 Ω–330 Ω resistor from GPIO to LED anode; cathode to GND diff --git a/python_controller/config.ini b/python_controller/config.ini index dd3f03a..4e25625 100644 --- a/python_controller/config.ini +++ b/python_controller/config.ini @@ -1,55 +1,61 @@ -# Configuration file for Vision PID Control System - -[Network] -# ESP32 IP address (update with your ESP32's IP) -esp32_ip = 192.168.1.100 -# OSC communication port -esp32_port = 8000 - -[Camera] -# Camera device ID (usually 0 for built-in webcam) +# Digital Bloom Control System Configuration + +[WiFi] +# mode: ap (connect to ESP32 hotspot) or sta (connect to existing network) +mode = ap +# When mode=ap: credentials of the ESP32 hotspot to connect to +ap_ssid = ESP32_Sylvie +ap_password = 12345678 +# When mode=sta: credentials for existing WiFi +sta_ssid = YOUR_WIFI_SSID +sta_password = YOUR_WIFI_PASSWORD + +[Devices] +# Comma-separated list of device names (must match sections below) +device_list = sylvie +# Repeat to add more: device_list = sylvie,sue + +[Device_sylvie] +ip = 192.168.4.1 +port = 8888 +type = dc_motor +description = DC motor flower with RGB LEDs + +[Device_sue] +ip = 192.168.4.2 +port = 8000 +type = servo +description = Servo petal flower with ultrasonic sensor + +[Vision] camera_id = 0 -# Frame dimensions frame_width = 640 frame_height = 480 - -[Tracking] -# Tracker type: 'face' or 'color' -tracker_type = face - -[ColorTracking] -# HSV color range for color tracking (default: red) -# Lower bound (Hue, Saturation, Value) -lower_hsv = 0, 120, 70 -# Upper bound (Hue, Saturation, Value) -upper_hsv = 10, 255, 255 - -[PID_Pan] -# PID parameters for pan (horizontal) control -kp = 0.15 -ki = 0.01 -kd = 0.05 -output_limit = 30 - -[PID_Tilt] -# PID parameters for tilt (vertical) control -kp = 0.15 -ki = 0.01 -kd = 0.05 -output_limit = 30 - -[Servo] -# Initial servo positions (degrees) -pan_center = 90 -tilt_center = 90 -# Servo limits -pan_min = 0 -pan_max = 180 -tilt_min = 0 -tilt_max = 180 - -[Behavior] -# Time to wait before closing flower when target is lost (seconds) -lost_target_timeout = 2.0 -# Flower open/close speed (0.0-1.0 per frame) -flower_speed = 0.05 +# emotion backend: deepface or haar (haar is faster, deepface more accurate) +emotion_backend = deepface +# deepface model: VGG-Face, Facenet, OpenFace, DeepFace, DeepID, ArcFace, Dlib, SFace +deepface_model = VGG-Face +# enable pose estimation with mediapipe +enable_pose = true +# minimum face detection confidence +min_face_confidence = 0.5 + +[Personas] +# Persona labels used in state machine +labels = Empathy,Defensive,Predatory,Boredom,Surprise,Jealous +# Time (seconds) a flower must stay in Empathy to trigger jealousy in others +jealousy_trigger_seconds = 5.0 +# EMA smoothing factor for motion output (0=no smoothing, 1=frozen) +ema_alpha = 0.3 + +[ML] +# Path to save/load trained model +model_path = ml_model.pkl +# Training data file +data_path = training_data.json +# Classifier type: random_forest or svm +classifier = random_forest + +[Recording] +# Directory to save recorded motion sequences +sequence_dir = sequences diff --git a/python_controller/control_panel.py b/python_controller/control_panel.py new file mode 100644 index 0000000..8b63d4f --- /dev/null +++ b/python_controller/control_panel.py @@ -0,0 +1,334 @@ +""" +Digital Bloom Control Panel +Tkinter-based GUI for the Digital Bloom installation. +""" + +import tkinter as tk +from tkinter import ttk, messagebox +import threading +import time +import queue +import logging +import configparser +import os +import sys +from PIL import Image, ImageTk +import cv2 + +logger = logging.getLogger(__name__) + + +class ControlPanel: + """Main Tkinter control panel for Digital Bloom.""" + + PERSONAS = ['Empathy', 'Defensive', 'Predatory', 'Boredom', 'Surprise', 'Jealous'] + EMOTION_KEYS = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] + + def __init__(self, config: configparser.ConfigParser, vision_tracker, flower_network, persona_engine, ml_trainer): + self.config = config + self.vision = vision_tracker + self.network = flower_network + self.persona_engine = persona_engine + self.ml_trainer = ml_trainer + + # Camera + cam_id = config.getint('Vision', 'camera_id', fallback=0) + self.cap = cv2.VideoCapture(cam_id) + self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, config.getint('Vision', 'frame_width', fallback=640)) + self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, config.getint('Vision', 'frame_height', fallback=480)) + + # State + self.auto_mode = False + self.running = True + self.frame_queue = queue.Queue(maxsize=2) + self._last_emotion_data = None + self._selected_device = tk.StringVar() + device_names = self.network.device_names() if self.network else [] + if device_names: + self._selected_device.set(device_names[0]) + + self._build_ui() + self._start_vision_thread() + + def _build_ui(self): + self.root = tk.Tk() + self.root.title("Digital Bloom Control Panel") + self.root.configure(bg='#1a1a2e') + self.root.protocol("WM_DELETE_WINDOW", self._on_close) + + style = ttk.Style() + style.theme_use('clam') + style.configure('TFrame', background='#1a1a2e') + style.configure('TLabel', background='#1a1a2e', foreground='#e0e0e0', font=('Helvetica', 10)) + style.configure('Title.TLabel', background='#1a1a2e', foreground='#00d4ff', font=('Helvetica', 14, 'bold')) + style.configure('Value.TLabel', background='#1a1a2e', foreground='#00ff88', font=('Helvetica', 10, 'bold')) + style.configure('TButton', background='#16213e', foreground='#e0e0e0', font=('Helvetica', 9)) + style.configure('TCombobox', background='#16213e', foreground='#e0e0e0') + style.configure('TScale', background='#1a1a2e', troughcolor='#16213e') + style.configure('Section.TLabelframe', background='#16213e', foreground='#00d4ff', font=('Helvetica', 10, 'bold')) + style.configure('Section.TLabelframe.Label', background='#16213e', foreground='#00d4ff') + + # ── Title bar ── + title_frame = ttk.Frame(self.root) + title_frame.pack(fill='x', padx=8, pady=(8, 0)) + ttk.Label(title_frame, text="🌸 Digital Bloom Control Panel", style='Title.TLabel').pack(side='left') + self._conn_label = ttk.Label(title_frame, text="● Disconnected", foreground='#ff4444', background='#1a1a2e', font=('Helvetica', 10)) + self._conn_label.pack(side='right', padx=8) + + # ── Top row: preview + perception ── + top_frame = ttk.Frame(self.root) + top_frame.pack(fill='x', padx=8, pady=4) + + # Webcam preview + preview_frame = ttk.LabelFrame(top_frame, text="📹 Live Preview", style='Section.TLabelframe') + preview_frame.pack(side='left', padx=(0, 4)) + self._preview_label = ttk.Label(preview_frame) + self._preview_label.pack(padx=4, pady=4) + + # Perception data + perc_frame = ttk.LabelFrame(top_frame, text="📊 Perception Data", style='Section.TLabelframe') + perc_frame.pack(side='left', fill='both', expand=True, padx=(4, 0)) + + self._emotion_labels = {} + for i, emo in enumerate(self.EMOTION_KEYS): + row = ttk.Frame(perc_frame) + row.pack(fill='x', padx=6, pady=1) + ttk.Label(row, text=f"{emo.capitalize()}:", width=10).pack(side='left') + val_label = ttk.Label(row, text="0.00", style='Value.TLabel', width=6) + val_label.pack(side='left') + bar = ttk.Progressbar(row, length=120, maximum=1.0, mode='determinate') + bar.pack(side='left', padx=4) + self._emotion_labels[emo] = (val_label, bar) + + info_frame = ttk.Frame(perc_frame) + info_frame.pack(fill='x', padx=6, pady=4) + self._info_labels = {} + info_fields = [('dominant', 'Emotion'), ('age', 'Age'), ('gender', 'Gender'), + ('distance', 'Distance'), ('persons', 'Persons'), ('pose', 'Pose'),('color', 'Color')] + for key, label_text in info_fields: + row = ttk.Frame(info_frame) + row.pack(fill='x', pady=1) + ttk.Label(row, text=f"{label_text}:", width=10).pack(side='left') + lbl = ttk.Label(row, text="—", style='Value.TLabel') + lbl.pack(side='left') + self._info_labels[key] = lbl + + # ── Middle: manual controls ── + ctrl_frame = ttk.LabelFrame(self.root, text="🎚️ Manual Control", style='Section.TLabelframe') + ctrl_frame.pack(fill='x', padx=8, pady=4) + + ctrl_inner = ttk.Frame(ctrl_frame) + ctrl_inner.pack(fill='x', padx=6, pady=4) + + # Device selector + dev_row = ttk.Frame(ctrl_inner) + dev_row.pack(fill='x', pady=2) + ttk.Label(dev_row, text="Device:").pack(side='left') + device_names = self.network.device_names() if self.network else [] + dev_combo = ttk.Combobox(dev_row, textvariable=self._selected_device, values=device_names, width=16, state='readonly') + dev_combo.pack(side='left', padx=6) + + # Sliders + self._sliders = {} + slider_defs = [ + ('motor1', 'Motor 1 (DC)', -1.0, 1.0, 0.0), + ('motor2', 'Motor 2 (DC)', -1.0, 1.0, 0.0), + ('led_hue', 'LED Hue °', 0, 360, 120), + ('led_sat', 'LED Saturation', 0.0, 1.0, 0.8), + ('led_bri', 'LED Brightness', 0.0, 1.0, 0.7), + ] + slider_grid = ttk.Frame(ctrl_inner) + slider_grid.pack(fill='x') + for col_idx, (key, label, vmin, vmax, default) in enumerate(slider_defs): + col_frame = ttk.Frame(slider_grid) + col_frame.grid(row=0, column=col_idx, padx=6, pady=2, sticky='n') + ttk.Label(col_frame, text=label, font=('Helvetica', 9)).pack() + var = tk.DoubleVar(value=default) + slider = ttk.Scale(col_frame, from_=vmin, to=vmax, orient='vertical', + variable=var, length=80, + command=lambda v, k=key: self._on_slider_change(k)) + slider.pack() + val_lbl = ttk.Label(col_frame, text=f"{default:.2f}", style='Value.TLabel', font=('Helvetica', 8)) + val_lbl.pack() + self._sliders[key] = (var, val_lbl) + + send_btn = ttk.Button(ctrl_inner, text="📤 Send Manual", command=self._send_manual) + send_btn.pack(pady=4) + + # ── Bottom row: Recording + ML ── + bot_frame = ttk.Frame(self.root) + bot_frame.pack(fill='x', padx=8, pady=4) + + # Recording + rec_frame = ttk.LabelFrame(bot_frame, text="💾 Record Mapping", style='Section.TLabelframe') + rec_frame.pack(side='left', fill='both', expand=True, padx=(0, 4)) + + rec_inner = ttk.Frame(rec_frame) + rec_inner.pack(padx=6, pady=4) + ttk.Label(rec_inner, text="Persona Label:").pack(side='left') + self._record_persona = tk.StringVar(value='Empathy') + persona_combo = ttk.Combobox(rec_inner, textvariable=self._record_persona, + values=self.PERSONAS, width=12, state='readonly') + persona_combo.pack(side='left', padx=4) + ttk.Button(rec_inner, text="⏺ Record Sample", command=self._record_sample).pack(side='left', padx=4) + self._sample_count_label = ttk.Label(rec_inner, text=f"Samples: {self.ml_trainer.sample_count}") + self._sample_count_label.pack(side='left', padx=4) + + # ML + ml_frame = ttk.LabelFrame(bot_frame, text="🤖 Machine Learning", style='Section.TLabelframe') + ml_frame.pack(side='left', fill='both', expand=True, padx=(4, 0)) + + ml_inner = ttk.Frame(ml_frame) + ml_inner.pack(padx=6, pady=4) + ttk.Button(ml_inner, text="🏋 Train Model", command=self._train_model).pack(side='left', padx=4) + ttk.Button(ml_inner, text="📂 Load Model", command=self._load_model).pack(side='left', padx=4) + self._auto_btn = ttk.Button(ml_inner, text="▶ Auto Mode: OFF", command=self._toggle_auto) + self._auto_btn.pack(side='left', padx=4) + self._ml_status = ttk.Label(ml_frame, text="No model loaded", foreground='#888') + self._ml_status.pack(padx=6, pady=2) + + def _on_slider_change(self, key): + var, lbl = self._sliders[key] + lbl.config(text=f"{var.get():.2f}") + + def _send_manual(self): + device_name = self._selected_device.get() + device = self.network.get(device_name) if self.network else None + if device is None: + messagebox.showwarning("No Device", f"Device '{device_name}' not found.") + return + m1 = self._sliders['motor1'][0].get() + m2 = self._sliders['motor2'][0].get() + hue = self._sliders['led_hue'][0].get() + sat = self._sliders['led_sat'][0].get() + bri = self._sliders['led_bri'][0].get() + + device.set_motor(1, int(round(m1))) + device.set_motor(2, int(round(m2))) + device.set_led_hsv(1, hue, sat, bri) + device.set_led_hsv(2, (hue + 30) % 360, sat * 0.8, bri * 0.7) + + def _record_sample(self): + if self._last_emotion_data is None: + messagebox.showwarning("No Data", "No perception data available yet.") + return + label = self._record_persona.get() + from persona_engine import PersonaEngine + features = PersonaEngine._extract_features(self._last_emotion_data) + self.ml_trainer.record_sample(features, label) + count = self.ml_trainer.sample_count + self._sample_count_label.config(text=f"Samples: {count}") + self._ml_status.config(text=f"Recorded '{label}' ({count} total)") + + def _train_model(self): + self._ml_status.config(text="Training...") + self.root.update() + metrics = self.ml_trainer.train() + if metrics: + acc = metrics.get('accuracy', 0) + n = metrics.get('n_samples', 0) + self._ml_status.config(text=f"Trained: acc={acc:.0%}, n={n}") + # Push model to persona engine + self.persona_engine.set_ml_model(self.ml_trainer.model, self.ml_trainer.label_encoder) + else: + self._ml_status.config(text="Training failed (need ≥10 samples)") + + def _load_model(self): + ok = self.ml_trainer.load_model() + if ok: + self.persona_engine.set_ml_model(self.ml_trainer.model, self.ml_trainer.label_encoder) + self._ml_status.config(text="Model loaded ✓") + else: + self._ml_status.config(text="No model file found") + + def _toggle_auto(self): + self.auto_mode = not self.auto_mode + if self.auto_mode: + self._auto_btn.config(text="⏹ Auto Mode: ON") + else: + self._auto_btn.config(text="▶ Auto Mode: OFF") + # Stop all motors + if self.network: + self.network.broadcast_stop() + + def _start_vision_thread(self): + self._vision_thread = threading.Thread(target=self._vision_loop, daemon=True) + self._vision_thread.start() + self._update_ui() + + def _vision_loop(self): + while self.running: + ret, frame = self.cap.read() + if not ret: + time.sleep(0.05) + continue + frame = cv2.flip(frame, 1) + try: + annotated, emotion_data = self.vision.process_frame(frame) + except Exception as e: + logger.debug(f"Vision error: {e}") + annotated = frame + from vision_tracker import EmotionData + emotion_data = EmotionData() + + self._last_emotion_data = emotion_data + + # Auto mode: run persona engine + if self.auto_mode and self.network: + primary = self._selected_device.get() + states = self.persona_engine.update(emotion_data, primary_device=primary) + self.persona_engine.apply_to_network(self.network) + + # Put frame in queue (non-blocking) + try: + self.frame_queue.put_nowait((annotated, emotion_data)) + except queue.Full: + pass + + def _update_ui(self): + if not self.running: + return + try: + annotated, emotion_data = self.frame_queue.get_nowait() + # Update preview + rgb = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB) + img = Image.fromarray(rgb).resize((320, 240)) + imgtk = ImageTk.PhotoImage(image=img) + self._preview_label.imgtk = imgtk + self._preview_label.config(image=imgtk) + # Update emotion bars + for emo_key, (val_lbl, bar) in self._emotion_labels.items(): + val = emotion_data.emotions.get(emo_key, 0.0) + val_lbl.config(text=f"{val:.2f}") + bar['value'] = val + # Update info labels + self._info_labels['dominant'].config(text=emotion_data.dominant_emotion) + self._info_labels['age'].config(text=str(emotion_data.age) if emotion_data.age else '—') + self._info_labels['gender'].config(text=emotion_data.gender) + self._info_labels['distance'].config(text=f"{emotion_data.distance_estimate:.1f}m") + self._info_labels['persons'].config(text=str(emotion_data.person_count)) + self._info_labels['pose'].config(text=f"{emotion_data.pose_openness:.2f}") + color_hex = emotion_data.dominant_color + self._info_labels['color'].config(text=color_hex, foreground=color_hex if color_hex != '#808080' else '#808080') + # Connection status + dev_count = len(self.network.devices) if self.network else 0 + if dev_count > 0: + self._conn_label.config(text=f"● {dev_count} device(s)", foreground='#00ff88') + else: + self._conn_label.config(text="● No devices", foreground='#ffaa00') + except queue.Empty: + pass + except Exception as e: + logger.debug(f"UI update error: {e}") + + self.root.after(33, self._update_ui) # ~30 fps + + def run(self): + self.root.mainloop() + + def _on_close(self): + self.running = False + if self.network: + self.network.broadcast_stop() + self.cap.release() + self.root.destroy() diff --git a/python_controller/main.py b/python_controller/main.py index fe54845..6f00c45 100644 --- a/python_controller/main.py +++ b/python_controller/main.py @@ -1,264 +1,72 @@ #!/usr/bin/env python3 """ -Vision PID Control System - Main Application -Tracks face or colored object and controls ESP32-driven flower. +Digital Bloom — Main Application Entry Point """ -import cv2 -import argparse +import configparser +import logging +import os import sys -import time -from pid_controller import PIDController -from vision_tracker import FaceTracker, ColorTracker -from osc_client import FlowerOSCClient +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s [%(name)s] %(levelname)s: %(message)s' +) +logger = logging.getLogger('DigitalBloom') -class FlowerControlSystem: - """Main control system for the flower tracker.""" - - def __init__(self, tracker_type='face', esp32_ip='192.168.1.100', - esp32_port=8000, camera_id=0): - """ - Initialize the control system. - - Args: - tracker_type: Type of tracker ('face' or 'color') - esp32_ip: IP address of ESP32 - esp32_port: UDP port for OSC - camera_id: Camera device ID - """ - print(f"Initializing Flower Control System...") - - # Initialize camera - self.cap = cv2.VideoCapture(camera_id) - if not self.cap.isOpened(): - raise RuntimeError("Failed to open camera") - - # Set camera resolution - self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) - self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) - - frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - - print(f"Camera initialized: {frame_width}x{frame_height}") - - # Initialize tracker - if tracker_type == 'face': - self.tracker = FaceTracker(frame_width, frame_height) - print("Using Face Tracker") - elif tracker_type == 'color': - self.tracker = ColorTracker(frame_width, frame_height) - print("Using Color Tracker (tracking red by default)") - else: - raise ValueError(f"Unknown tracker type: {tracker_type}") - - # Initialize PID controllers for pan and tilt - # PID tuning: kp=proportional, ki=integral, kd=derivative - self.pid_pan = PIDController( - kp=0.15, ki=0.01, kd=0.05, - setpoint=0.0, # Error should be 0 (centered) - output_limits=(-30, 30), # Max angle change per update - sample_time=0.03 - ) - - self.pid_tilt = PIDController( - kp=0.15, ki=0.01, kd=0.05, - setpoint=0.0, - output_limits=(-30, 30), - sample_time=0.03 - ) - - # Initialize OSC client - self.osc_client = FlowerOSCClient(esp32_ip, esp32_port) - print(f"OSC client initialized: {esp32_ip}:{esp32_port}") - - # Servo positions (center position) - self.pan_angle = 90 - self.tilt_angle = 90 - - # Tracking state - self.tracking_active = False - self.flower_openness = 0.0 - self.lost_target_time = None - - def run(self): - """Main control loop.""" - print("\nStarting control loop...") - print("Controls:") - print(" SPACE - Toggle tracking on/off") - print(" 'o' - Open flower") - print(" 'c' - Close flower") - print(" 'r' - Reset servo positions") - print(" 'q' - Quit") - print("-" * 50) - + +def main(): + config = configparser.ConfigParser() + config_path = os.path.join(os.path.dirname(__file__), 'config.ini') + config.read(config_path) + + logger.info("=== Digital Bloom Control System ===") + + # Import modules + from vision_tracker import VisionTracker + from osc_client import FlowerNetwork + from persona_engine import PersonaEngine + from ml_trainer import MLTrainer + + # Initialise components + vision = VisionTracker(config) + network = FlowerNetwork(config) + persona_engine = PersonaEngine(config, network.device_names()) + ml_trainer = MLTrainer(config) + + # Try to load existing ML model + if ml_trainer.load_model(): + persona_engine.set_ml_model(ml_trainer.model, ml_trainer.label_encoder) + logger.info("ML model loaded") + else: + logger.info("No ML model found, using heuristic persona mapping") + + # Launch control panel + try: + from control_panel import ControlPanel + panel = ControlPanel(config, vision, network, persona_engine, ml_trainer) + panel.run() + except ImportError as e: + logger.error(f"Could not launch control panel: {e}") + logger.info("Running headless vision loop (Ctrl+C to stop)") + import cv2, time + cap = cv2.VideoCapture(config.getint('Vision', 'camera_id', fallback=0)) try: while True: - # Read frame from camera - ret, frame = self.cap.read() + ret, frame = cap.read() if not ret: - print("Failed to read frame from camera") - break - - # Flip frame horizontally for mirror effect - frame = cv2.flip(frame, 1) - - # Draw center crosshair - cv2.line(frame, (self.tracker.center_x - 20, self.tracker.center_y), - (self.tracker.center_x + 20, self.tracker.center_y), - (255, 255, 255), 1) - cv2.line(frame, (self.tracker.center_x, self.tracker.center_y - 20), - (self.tracker.center_x, self.tracker.center_y + 20), - (255, 255, 255), 1) - - # Get tracking error - x_error, y_error, detected = self.tracker.get_tracking_error(frame) - - if self.tracking_active: - if detected: - # Calculate PID output - pan_correction = self.pid_pan.update(x_error) - tilt_correction = self.pid_tilt.update(y_error) - - # Update servo positions - self.pan_angle -= pan_correction # Negative to follow target - self.tilt_angle += tilt_correction - - # Clamp to valid servo range - self.pan_angle = max(0, min(180, self.pan_angle)) - self.tilt_angle = max(0, min(180, self.tilt_angle)) - - # Send servo commands - self.osc_client.send_servo_command( - int(self.pan_angle), - int(self.tilt_angle) - ) - - # Open flower when target is detected - if self.flower_openness < 1.0: - self.flower_openness = min(1.0, self.flower_openness + 0.05) - self.osc_client.send_flower_state(self.flower_openness) - - # Reset lost target timer - self.lost_target_time = None - - else: - # Target lost - if self.lost_target_time is None: - self.lost_target_time = time.time() - - # Close flower after 2 seconds of no detection - if time.time() - self.lost_target_time > 2.0: - if self.flower_openness > 0.0: - self.flower_openness = max(0.0, self.flower_openness - 0.05) - self.osc_client.send_flower_state(self.flower_openness) - - # Display status - status_color = (0, 255, 0) if detected else (0, 0, 255) - status_text = "TRACKING" if self.tracking_active else "IDLE" - cv2.putText(frame, f"Mode: {status_text}", (10, 30), - cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2) - cv2.putText(frame, f"Target: {'DETECTED' if detected else 'LOST'}", - (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2) - cv2.putText(frame, f"Pan: {int(self.pan_angle)}° Tilt: {int(self.tilt_angle)}°", - (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) - cv2.putText(frame, f"Flower: {int(self.flower_openness * 100)}%", - (10, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2) - - if detected: - cv2.putText(frame, f"Error: X={x_error:+.0f} Y={y_error:+.0f}", - (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) - - # Show frame - cv2.imshow('Flower Vision Control', frame) - - # Handle keyboard input - key = cv2.waitKey(1) & 0xFF - if key == ord('q'): - break - elif key == ord(' '): - self.tracking_active = not self.tracking_active - if self.tracking_active: - self.osc_client.send_tracking_mode(1) - self.pid_pan.reset() - self.pid_tilt.reset() - print("Tracking ENABLED") - else: - self.osc_client.send_tracking_mode(0) - print("Tracking DISABLED") - elif key == ord('o'): - self.flower_openness = 1.0 - self.osc_client.send_flower_state(self.flower_openness) - print("Flower OPEN") - elif key == ord('c'): - self.flower_openness = 0.0 - self.osc_client.send_flower_state(self.flower_openness) - print("Flower CLOSED") - elif key == ord('r'): - self.pan_angle = 90 - self.tilt_angle = 90 - self.osc_client.send_servo_command(90, 90) - print("Servos RESET to center") - + time.sleep(0.1) + continue + _, emotion_data = vision.process_frame(frame) + persona_engine.update(emotion_data) + persona_engine.apply_to_network(network) + time.sleep(0.05) except KeyboardInterrupt: - print("\nInterrupted by user") + logger.info("Stopped") finally: - self.cleanup() - - def cleanup(self): - """Clean up resources.""" - print("\nCleaning up...") - - # Return servos to center - self.osc_client.send_servo_command(90, 90) - - # Close flower - self.osc_client.send_flower_state(0.0) - - # Stop tracking - self.osc_client.send_tracking_mode(0) - - # Release camera - self.cap.release() - - # Close windows - cv2.destroyAllWindows() - - print("Cleanup complete") - - -def main(): - """Entry point for the application.""" - parser = argparse.ArgumentParser( - description='Vision PID Control System for ESP32 Flower' - ) - parser.add_argument('--tracker', type=str, default='face', - choices=['face', 'color'], - help='Type of tracker to use (default: face)') - parser.add_argument('--ip', type=str, default='192.168.1.100', - help='ESP32 IP address (default: 192.168.1.100)') - parser.add_argument('--port', type=int, default=8000, - help='ESP32 OSC port (default: 8000)') - parser.add_argument('--camera', type=int, default=0, - help='Camera device ID (default: 0)') - - args = parser.parse_args() - - try: - system = FlowerControlSystem( - tracker_type=args.tracker, - esp32_ip=args.ip, - esp32_port=args.port, - camera_id=args.camera - ) - system.run() - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - return 1 - - return 0 + cap.release() + network.broadcast_stop() if __name__ == '__main__': - sys.exit(main()) + sys.exit(main() or 0) diff --git a/python_controller/ml_trainer.py b/python_controller/ml_trainer.py new file mode 100644 index 0000000..af84b08 --- /dev/null +++ b/python_controller/ml_trainer.py @@ -0,0 +1,131 @@ +""" +ML Trainer — lightweight emotion-to-persona classifier. +Uses scikit-learn RandomForest or SVM. +Saves/loads model to/from file. +""" + +import json +import os +import logging +import configparser +from typing import List, Tuple, Optional + +logger = logging.getLogger(__name__) + +try: + import numpy as np + from sklearn.ensemble import RandomForestClassifier + from sklearn.svm import SVC + from sklearn.preprocessing import LabelEncoder + from sklearn.model_selection import cross_val_score + import joblib + SKLEARN_AVAILABLE = True +except ImportError: + SKLEARN_AVAILABLE = False + print("[ML] scikit-learn not available, ML training disabled") + + +class MLTrainer: + """Trains and persists an emotion → persona classifier.""" + + FEATURE_NAMES = [ + 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral', + 'distance', 'face_area', 'pose_openness' + ] + + def __init__(self, config: configparser.ConfigParser): + self.config = config + self.model_path = config.get('ML', 'model_path', fallback='ml_model.pkl') + self.data_path = config.get('ML', 'data_path', fallback='training_data.json') + self.classifier_type = config.get('ML', 'classifier', fallback='random_forest') + + self.model = None + self.label_encoder = LabelEncoder() if SKLEARN_AVAILABLE else None + self.training_data: List[dict] = [] # list of {features: [...], label: str} + + # Load existing data + self._load_data() + + def record_sample(self, features: list, persona_label: str): + """Add one training sample.""" + self.training_data.append({'features': features, 'label': persona_label}) + self._save_data() + logger.info(f"[ML] Recorded sample: label={persona_label}, total={len(self.training_data)}") + + def train(self) -> Optional[dict]: + """Train classifier on all recorded data. Returns metrics dict or None.""" + if not SKLEARN_AVAILABLE: + logger.error("[ML] scikit-learn not available") + return None + + if len(self.training_data) < 10: + logger.warning(f"[ML] Not enough samples ({len(self.training_data)}), need at least 10") + return None + + X = np.array([d['features'] for d in self.training_data]) + y_raw = [d['label'] for d in self.training_data] + y = self.label_encoder.fit_transform(y_raw) + + if self.classifier_type == 'svm': + clf = SVC(kernel='rbf', probability=True, C=10) + else: + clf = RandomForestClassifier(n_estimators=100, random_state=42) + + # Cross-validation + if len(X) >= 20: + scores = cross_val_score(clf, X, y, cv=min(5, len(X) // 4)) + accuracy = float(scores.mean()) + else: + accuracy = 0.0 + + clf.fit(X, y) + self.model = clf + + # Save model + if SKLEARN_AVAILABLE: + joblib.dump({'model': clf, 'label_encoder': self.label_encoder}, self.model_path) + logger.info(f"[ML] Model saved to {self.model_path}, accuracy={accuracy:.2f}") + + return {'accuracy': accuracy, 'n_samples': len(self.training_data), + 'labels': list(self.label_encoder.classes_)} + + def load_model(self) -> bool: + """Load previously trained model.""" + if not SKLEARN_AVAILABLE or not os.path.exists(self.model_path): + return False + try: + data = joblib.load(self.model_path) + self.model = data['model'] + self.label_encoder = data['label_encoder'] + logger.info(f"[ML] Model loaded from {self.model_path}") + return True + except Exception as e: + logger.warning(f"[ML] Failed to load model: {e}") + return False + + def _save_data(self): + try: + with open(self.data_path, 'w') as f: + json.dump(self.training_data, f, indent=2) + except Exception as e: + logger.warning(f"[ML] Failed to save data: {e}") + + def _load_data(self): + if os.path.exists(self.data_path): + try: + with open(self.data_path, 'r') as f: + self.training_data = json.load(f) + logger.info(f"[ML] Loaded {len(self.training_data)} training samples") + except Exception as e: + logger.warning(f"[ML] Failed to load data: {e}") + self.training_data = [] + + def clear_data(self): + self.training_data = [] + if os.path.exists(self.data_path): + os.remove(self.data_path) + logger.info("[ML] Training data cleared") + + @property + def sample_count(self) -> int: + return len(self.training_data) diff --git a/python_controller/osc_client.py b/python_controller/osc_client.py index 4d99793..dab2989 100644 --- a/python_controller/osc_client.py +++ b/python_controller/osc_client.py @@ -1,73 +1,97 @@ """ -OSC Client for ESP32 Communication -Sends motor and servo commands via OSC over UDP. +OSC Client — Multi-device flower control. +Matches the actual esp32_sylvie.ino OSC protocol. """ from pythonosc import udp_client -from pythonosc.osc_message_builder import OscMessageBuilder - - -class FlowerOSCClient: - """OSC client for sending commands to ESP32.""" - - def __init__(self, ip="192.168.1.100", port=8000): - """ - Initialize OSC client. - - Args: - ip: IP address of ESP32 - port: UDP port for OSC communication - """ +import configparser +import logging + +logger = logging.getLogger(__name__) + + +class FlowerDevice: + """Represents a single ESP32 flower device.""" + + def __init__(self, name: str, ip: str, port: int, device_type: str = 'dc_motor'): + self.name = name self.ip = ip self.port = port - self.client = udp_client.SimpleUDPClient(ip, port) - - def send_servo_command(self, pan_angle, tilt_angle): - """ - Send servo angles to ESP32. - - Args: - pan_angle: Pan servo angle (0-180 degrees) - tilt_angle: Tilt servo angle (0-180 degrees) - """ - # Clamp angles to valid range - pan_angle = max(0, min(180, pan_angle)) - tilt_angle = max(0, min(180, tilt_angle)) - - # Send OSC message - self.client.send_message("/flower/servo", [pan_angle, tilt_angle]) - - def send_flower_state(self, openness): - """ - Send flower open/close state to ESP32. - - Args: - openness: Flower openness (0.0 = closed, 1.0 = fully open) - """ - # Clamp to valid range - openness = max(0.0, min(1.0, openness)) - - # Send OSC message - self.client.send_message("/flower/state", openness) - - def send_motor_speed(self, speed): - """ - Send motor speed command to ESP32. - - Args: - speed: Motor speed (-100 to 100, negative = reverse) - """ - # Clamp to valid range - speed = max(-100, min(100, speed)) - - # Send OSC message - self.client.send_message("/flower/motor", speed) - - def send_tracking_mode(self, mode): - """ - Send tracking mode to ESP32. - - Args: - mode: Tracking mode (0 = idle, 1 = tracking) - """ - self.client.send_message("/flower/mode", mode) + self.device_type = device_type # 'dc_motor' or 'servo' + try: + self.client = udp_client.SimpleUDPClient(ip, port) + self.connected = True + except Exception as e: + logger.warning(f"[OSC] Could not create client for {name} ({ip}:{port}): {e}") + self.client = None + self.connected = False + + def send(self, address: str, value): + if self.client is None: + return + try: + self.client.send_message(address, value) + except Exception as e: + logger.debug(f"[OSC] Send error to {self.name}: {e}") + + # --- High-level helpers for dc_motor type (esp32_sylvie protocol) --- + def set_auto(self, enabled: bool): + self.send('/auto', 1 if enabled else 0) + + def set_motor(self, motor_num: int, direction: int): + """direction: 1=forward, -1=reverse, 0=stop""" + self.send(f'/motor{motor_num}', direction) + + def set_led(self, led_num: int, r: int, g: int, b: int): + self.send(f'/led{led_num}', [r, g, b]) + + def set_preset(self, preset: int): + self.send('/preset', preset) + + def stop_all(self): + self.set_auto(False) + self.set_preset(3) # preset 3 = stop all in esp32_sylvie + + # --- Convenience: set flower openness (0.0-1.0) mapped to motor direction --- + def set_openness(self, openness: float): + """openness 0.0=closed, 1.0=fully open. Drives motor1 to open/close.""" + direction = 1 if openness > 0.5 else (-1 if openness < 0.3 else 0) + self.set_motor(1, direction) + + # --- Convenience: set LED color from HSV-like params --- + def set_led_hsv(self, led_num: int, hue: float, saturation: float, brightness: float): + """hue 0-360, saturation 0-1, brightness 0-1""" + import colorsys + r, g, b = colorsys.hsv_to_rgb(hue / 360.0, saturation, brightness) + self.set_led(led_num, int(r * 255), int(g * 255), int(b * 255)) + + +class FlowerNetwork: + """Manages all flower devices loaded from config.""" + + def __init__(self, config: configparser.ConfigParser): + self.devices: dict[str, FlowerDevice] = {} + device_list = [d.strip() for d in config.get('Devices', 'device_list', fallback='').split(',') if d.strip()] + for name in device_list: + section = f'Device_{name}' + if config.has_section(section): + ip = config.get(section, 'ip') + port = config.getint(section, 'port') + dev_type = config.get(section, 'type', fallback='dc_motor') + self.devices[name] = FlowerDevice(name, ip, port, dev_type) + logger.info(f"[Network] Registered device '{name}' at {ip}:{port} (type={dev_type})") + else: + logger.warning(f"[Network] No config section for device '{name}'") + + def get(self, name: str) -> FlowerDevice: + return self.devices.get(name) + + def all_devices(self) -> list: + return list(self.devices.values()) + + def broadcast_stop(self): + for dev in self.devices.values(): + dev.stop_all() + + def device_names(self) -> list: + return list(self.devices.keys()) diff --git a/python_controller/persona_engine.py b/python_controller/persona_engine.py new file mode 100644 index 0000000..8a721aa --- /dev/null +++ b/python_controller/persona_engine.py @@ -0,0 +1,201 @@ +""" +Persona Engine — 3-layer biomorphic system. + +Layer 1: ML Brain — maps messy sensor data → discrete persona label +Layer 2: State Machine — maps persona label → precise motor/LED params +Layer 3: Motion Render — applies EMA smoothing and physical dynamics +""" + +import time +import math +import random +import configparser +import logging +from dataclasses import dataclass, field +from typing import Dict, Optional +from vision_tracker import EmotionData + +logger = logging.getLogger(__name__) + + +# ─── Layer 2: Persona → Hardware params ──────────────────────────────────────── + +PERSONA_PARAMS: Dict[str, dict] = { + 'Empathy': {'openness': 1.0, 'jitter': 0.0, 'speed': 0.4, 'led_hue': 120, 'led_sat': 0.8, 'led_bri': 0.8}, + 'Defensive': {'openness': 0.1, 'jitter': 0.2, 'speed': 0.2, 'led_hue': 240, 'led_sat': 0.9, 'led_bri': 0.5}, + 'Predatory': {'openness': 0.7, 'jitter': 0.1, 'speed': 0.8, 'led_hue': 0, 'led_sat': 1.0, 'led_bri': 0.9}, + 'Boredom': {'openness': 0.3, 'jitter': 0.0, 'speed': 0.1, 'led_hue': 200, 'led_sat': 0.3, 'led_bri': 0.3}, + 'Surprise': {'openness': 1.0, 'jitter': 1.0, 'speed': 1.0, 'led_hue': 60, 'led_sat': 1.0, 'led_bri': 1.0}, + 'Jealous': {'openness': 0.6, 'jitter': 0.5, 'speed': 0.6, 'led_hue': 0, 'led_sat': 1.0, 'led_bri': 0.7}, +} + + +@dataclass +class DeviceState: + """Rendered state for one physical flower device.""" + name: str + persona: str = 'Boredom' + openness: float = 0.3 # 0-1 + led_hue: float = 200.0 # 0-360 + led_sat: float = 0.3 + led_bri: float = 0.3 + jitter_offset: float = 0.0 + override_until: float = 0.0 # epoch time; if > now, state is locked + + def is_overridden(self) -> bool: + return time.time() < self.override_until + + +class PersonaEngine: + """ + Drives all flower devices based on sensor data. + """ + + def __init__(self, config: configparser.ConfigParser, device_names: list): + self.config = config + self.ema_alpha = config.getfloat('Personas', 'ema_alpha', fallback=0.3) + self.jealousy_trigger = config.getfloat('Personas', 'jealousy_trigger_seconds', fallback=5.0) + + self.states: Dict[str, DeviceState] = { + name: DeviceState(name=name) for name in device_names + } + + # Track how long first device has been Empathy + self._empathy_start: Dict[str, Optional[float]] = {n: None for n in device_names} + + # ML model (set by MLTrainer) + self.ml_model = None + self.label_encoder = None + + def set_ml_model(self, model, label_encoder): + self.ml_model = model + self.label_encoder = label_encoder + + def predict_persona(self, emotion_data: EmotionData) -> str: + """Layer 1: ML Brain — predict persona from sensor data.""" + if self.ml_model is None: + # Heuristic fallback + return self._heuristic_persona(emotion_data) + + try: + features = self._extract_features(emotion_data) + import numpy as np + pred = self.ml_model.predict([features])[0] + if self.label_encoder: + return self.label_encoder.inverse_transform([pred])[0] + return str(pred) + except Exception as e: + logger.debug(f"ML predict failed: {e}") + return self._heuristic_persona(emotion_data) + + def _heuristic_persona(self, emotion_data: EmotionData) -> str: + """Simple rule-based fallback when no ML model is trained.""" + e = emotion_data.emotions + if e.get('happy', 0) > 0.4: + return 'Empathy' + if e.get('surprise', 0) > 0.4: + return 'Surprise' + if e.get('angry', 0) > 0.3 or e.get('fear', 0) > 0.3: + return 'Defensive' + if emotion_data.distance_estimate < 0.8: + return 'Predatory' + if emotion_data.person_count == 0: + return 'Boredom' + return 'Empathy' + + @staticmethod + def _extract_features(emotion_data: EmotionData) -> list: + e = emotion_data.emotions + return [ + e.get('angry', 0), e.get('disgust', 0), e.get('fear', 0), + e.get('happy', 0), e.get('sad', 0), e.get('surprise', 0), + e.get('neutral', 0), + emotion_data.distance_estimate, + emotion_data.face_area, + emotion_data.pose_openness, + ] + + def update(self, emotion_data: EmotionData, primary_device: str = None): + """ + Layer 2: State machine update. + - Predicts persona for primary device + - Applies jealousy network to other devices + - Returns dict of DeviceState + """ + if not self.states: + return {} + + device_names = list(self.states.keys()) + if primary_device is None: + primary_device = device_names[0] + + # Predict primary persona + primary_persona = self.predict_persona(emotion_data) + + for name, state in self.states.items(): + if state.is_overridden(): + continue # Locked by override (e.g. Jealous) + + if name == primary_device: + target_persona = primary_persona + else: + # Jealousy network: if another device has been Empathy for too long + target_persona = 'Boredom' + + self._apply_persona(state, target_persona) + + # Jealousy network + if primary_persona == 'Empathy': + if self._empathy_start[primary_device] is None: + self._empathy_start[primary_device] = time.time() + elapsed = time.time() - self._empathy_start[primary_device] + if elapsed >= self.jealousy_trigger: + for name in device_names: + if name != primary_device and not self.states[name].is_overridden(): + logger.info(f"[Jealousy] Device '{name}' becomes Jealous!") + self._apply_persona(self.states[name], 'Jealous') + self.states[name].override_until = time.time() + 8.0 # 8s jealous burst + else: + self._empathy_start[primary_device] = None + + return self.states + + def _apply_persona(self, state: DeviceState, persona: str): + """Layer 2 → Layer 3: Apply persona params with EMA smoothing.""" + params = PERSONA_PARAMS.get(persona, PERSONA_PARAMS['Boredom']) + state.persona = persona + alpha = self.ema_alpha + + # Add jitter (Layer 3 physical render) + jitter_amount = params['jitter'] + jitter = (random.random() - 0.5) * 2.0 * jitter_amount * 0.2 + + target_openness = params['openness'] + jitter + target_openness = max(0.0, min(1.0, target_openness)) + + # EMA smoothing + state.openness = state.openness * (1 - alpha) + target_openness * alpha + state.led_hue = state.led_hue * (1 - alpha) + params['led_hue'] * alpha + state.led_sat = state.led_sat * (1 - alpha) + params['led_sat'] * alpha + state.led_bri = state.led_bri * (1 - alpha) + params['led_bri'] * alpha + state.jitter_offset = jitter + + def manual_override(self, device_name: str, persona: str, duration: float = 0): + """Manually set a device's persona (from GUI).""" + if device_name in self.states: + self._apply_persona(self.states[device_name], persona) + if duration > 0: + self.states[device_name].override_until = time.time() + duration + + def apply_to_network(self, flower_network): + """Send all device states to the FlowerNetwork via OSC.""" + for name, state in self.states.items(): + device = flower_network.get(name) + if device is None: + continue + # Set motor direction from openness + direction = 1 if state.openness > 0.6 else (-1 if state.openness < 0.3 else 0) + device.set_motor(1, direction) + # Set LEDs + device.set_led_hsv(1, state.led_hue, state.led_sat, state.led_bri) + device.set_led_hsv(2, (state.led_hue + 30) % 360, state.led_sat * 0.8, state.led_bri * 0.7) diff --git a/python_controller/requirements.txt b/python_controller/requirements.txt index 60d743c..09310ea 100644 --- a/python_controller/requirements.txt +++ b/python_controller/requirements.txt @@ -1,3 +1,15 @@ +# Core opencv-python>=4.8.1.78 numpy>=1.24.0 python-osc>=1.8.0 +Pillow>=10.0.0 + +# Emotion recognition (optional but recommended) +deepface>=0.0.79 + +# Pose estimation (optional but recommended) +mediapipe>=0.10.0 + +# Machine learning +scikit-learn>=1.3.0 +joblib>=1.3.0 diff --git a/python_controller/vision_tracker.py b/python_controller/vision_tracker.py index 2533bde..001e943 100644 --- a/python_controller/vision_tracker.py +++ b/python_controller/vision_tracker.py @@ -1,173 +1,204 @@ """ Vision Tracking Module -Implements face and color tracking using OpenCV. +Emotion recognition (DeepFace) + Pose estimation (MediaPipe). +Falls back to Haar cascade if DeepFace/MediaPipe not available. """ import cv2 import numpy as np +import math +import configparser +from dataclasses import dataclass, field +from typing import Dict, Optional, Tuple +try: + from deepface import DeepFace + DEEPFACE_AVAILABLE = True +except ImportError: + DEEPFACE_AVAILABLE = False + print("[Vision] DeepFace not available, using Haar cascade fallback") -class VisionTracker: - """Base class for vision tracking.""" - - def __init__(self, frame_width=640, frame_height=480): - """ - Initialize vision tracker. - - Args: - frame_width: Width of camera frame - frame_height: Height of camera frame - """ - self.frame_width = frame_width - self.frame_height = frame_height - self.center_x = frame_width // 2 - self.center_y = frame_height // 2 - - def get_tracking_error(self, frame): - """ - Get tracking error from frame. - - Args: - frame: Input image frame - - Returns: - tuple: (x_error, y_error, detected) where errors are in pixels - from center, and detected is a boolean - """ - raise NotImplementedError("Subclass must implement get_tracking_error") +try: + import mediapipe as mp + MEDIAPIPE_AVAILABLE = True +except ImportError: + MEDIAPIPE_AVAILABLE = False + print("[Vision] MediaPipe not available, pose estimation disabled") -class FaceTracker(VisionTracker): - """Face tracking using Haar Cascade.""" - - def __init__(self, frame_width=640, frame_height=480): - """Initialize face tracker with Haar Cascade.""" - super().__init__(frame_width, frame_height) - - # Load pre-trained face detector - cascade_path = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml' - self.face_cascade = cv2.CascadeClassifier(cascade_path) - - if self.face_cascade.empty(): - raise RuntimeError("Failed to load face cascade classifier") - - def get_tracking_error(self, frame): - """ - Detect face and calculate tracking error. - - Args: - frame: Input BGR image - - Returns: - tuple: (x_error, y_error, detected) - """ - # Convert to grayscale for face detection - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - - # Detect faces - faces = self.face_cascade.detectMultiScale( - gray, - scaleFactor=1.1, - minNeighbors=5, - minSize=(30, 30) - ) - - if len(faces) > 0: - # Use the largest face - largest_face = max(faces, key=lambda f: f[2] * f[3]) - x, y, w, h = largest_face - - # Calculate center of face - face_center_x = x + w // 2 - face_center_y = y + h // 2 - - # Calculate error from frame center - x_error = face_center_x - self.center_x - y_error = face_center_y - self.center_y - - # Draw rectangle around face - cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) - cv2.circle(frame, (face_center_x, face_center_y), 5, (0, 255, 0), -1) - - return (x_error, y_error, True) - - return (0, 0, False) +@dataclass +class EmotionData: + emotions: Dict[str, float] = field(default_factory=lambda: { + 'angry': 0.0, 'disgust': 0.0, 'fear': 0.0, 'happy': 0.0, + 'sad': 0.0, 'surprise': 0.0, 'neutral': 1.0 + }) + dominant_emotion: str = 'neutral' + confidence: float = 0.0 + age: int = 0 + gender: str = 'unknown' + face_area: float = 0.0 # normalized 0-1 relative to frame + distance_estimate: float = 3.0 # meters + person_count: int = 0 + pose_openness: float = 0.0 # 0=closed/defensive, 1=open/welcoming + dominant_color: str = '#808080' -class ColorTracker(VisionTracker): - """Color tracking using HSV color space.""" - - def __init__(self, frame_width=640, frame_height=480, - lower_hsv=(0, 120, 70), upper_hsv=(10, 255, 255)): - """ - Initialize color tracker. - - Args: - frame_width: Width of camera frame - frame_height: Height of camera frame - lower_hsv: Lower bound for HSV color range (default: red) - upper_hsv: Upper bound for HSV color range (default: red) - """ - super().__init__(frame_width, frame_height) - self.lower_hsv = np.array(lower_hsv) - self.upper_hsv = np.array(upper_hsv) - - def set_color_range(self, lower_hsv, upper_hsv): - """ - Update color tracking range. +class VisionTracker: + def __init__(self, config: configparser.ConfigParser): + vis = config['Vision'] + self.camera_id = config.getint('Vision', 'camera_id', fallback=0) + self.frame_width = config.getint('Vision', 'frame_width', fallback=640) + self.frame_height = config.getint('Vision', 'frame_height', fallback=480) + self.emotion_backend = vis.get('emotion_backend', 'deepface') + self.deepface_model = vis.get('deepface_model', 'VGG-Face') + self.enable_pose = config.getboolean('Vision', 'enable_pose', fallback=True) + self.min_confidence = config.getfloat('Vision', 'min_face_confidence', fallback=0.5) + + self._frame_area = self.frame_width * self.frame_height + + # Haar cascade fallback + cascade_path = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml' + self.face_cascade = cv2.CascadeClassifier(cascade_path) - Args: - lower_hsv: Lower bound for HSV color range - upper_hsv: Upper bound for HSV color range - """ - self.lower_hsv = np.array(lower_hsv) - self.upper_hsv = np.array(upper_hsv) + # MediaPipe pose + self.pose = None + if MEDIAPIPE_AVAILABLE and self.enable_pose: + mp_pose = mp.solutions.pose + self.pose = mp_pose.Pose( + static_image_mode=False, + min_detection_confidence=0.5, + min_tracking_confidence=0.5 + ) + self.mp_draw = mp.solutions.drawing_utils + + # Frame counter to throttle DeepFace (expensive) + self._frame_count = 0 + self._deepface_interval = 5 # run DeepFace every N frames + self._last_emotion_data = EmotionData() - def get_tracking_error(self, frame): - """ - Detect colored object and calculate tracking error. - - Args: - frame: Input BGR image - - Returns: - tuple: (x_error, y_error, detected) - """ - # Convert to HSV color space - hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) - - # Create mask for color - mask = cv2.inRange(hsv, self.lower_hsv, self.upper_hsv) - - # Apply morphological operations to reduce noise - kernel = np.ones((5, 5), np.uint8) - mask = cv2.erode(mask, kernel, iterations=1) - mask = cv2.dilate(mask, kernel, iterations=2) - - # Find contours - contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, - cv2.CHAIN_APPROX_SIMPLE) - - if contours: - # Find the largest contour - largest_contour = max(contours, key=cv2.contourArea) - - # Get minimum area that's significant - if cv2.contourArea(largest_contour) > 500: - # Calculate moments to find center - M = cv2.moments(largest_contour) - if M["m00"] > 0: - cx = int(M["m10"] / M["m00"]) - cy = int(M["m01"] / M["m00"]) + def process_frame(self, frame: np.ndarray) -> Tuple[np.ndarray, EmotionData]: + """Process a frame and return annotated frame + EmotionData.""" + annotated = frame.copy() + self._frame_count += 1 + + data = EmotionData() + + # --- Pose estimation (every frame, lightweight) --- + if self.pose is not None: + rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + results = self.pose.process(rgb) + if results.pose_landmarks: + self.mp_draw.draw_landmarks( + annotated, results.pose_landmarks, + mp.solutions.pose.POSE_CONNECTIONS, + landmark_drawing_spec=self.mp_draw.DrawingSpec(color=(0, 255, 128), thickness=1, circle_radius=2) + ) + data.pose_openness = self.calculate_pose_openness(results.pose_landmarks) + + # --- Emotion / face detection --- + if DEEPFACE_AVAILABLE and self.emotion_backend == 'deepface': + if self._frame_count % self._deepface_interval == 0: + try: + result_list = DeepFace.analyze( + img_path=frame, + actions=['emotion', 'age', 'gender'], + enforce_detection=False, + silent=True + ) + if isinstance(result_list, dict): + result_list = [result_list] - # Calculate error from frame center - x_error = cx - self.center_x - y_error = cy - self.center_y + data.person_count = len(result_list) + if result_list: + r = result_list[0] + raw_emotions = r.get('emotion', {}) + total = sum(raw_emotions.values()) or 1.0 + data.emotions = {k: v / total for k, v in raw_emotions.items()} + data.dominant_emotion = r.get('dominant_emotion', 'neutral') + data.confidence = data.emotions.get(data.dominant_emotion, 0.0) + data.age = int(r.get('age', 0)) + gender_val = r.get('dominant_gender', r.get('gender', 'unknown')) + if isinstance(gender_val, dict): + data.gender = max(gender_val, key=gender_val.get) + else: + data.gender = str(gender_val) + + # Face region + region = r.get('region', {}) + if region: + rx, ry, rw, rh = region.get('x',0), region.get('y',0), region.get('w',0), region.get('h',0) + face_rect = (rx, ry, rw, rh) + area_ratio = (rw * rh) / self._frame_area + data.face_area = min(1.0, area_ratio) + data.distance_estimate = self.estimate_distance(area_ratio) + data.dominant_color = self.get_dominant_color(frame, face_rect) + cv2.rectangle(annotated, (rx, ry), (rx+rw, ry+rh), (0, 255, 0), 2) + cv2.putText(annotated, f"{data.dominant_emotion} {data.confidence:.2f}", + (rx, ry - 8), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 1) - # Draw contour and center - cv2.drawContours(frame, [largest_contour], -1, (0, 255, 0), 2) - cv2.circle(frame, (cx, cy), 5, (0, 255, 0), -1) - - return (x_error, y_error, True) - - return (0, 0, False) + self._last_emotion_data = data + except Exception as e: + data = self._last_emotion_data # reuse last good result + else: + data = self._last_emotion_data + else: + # Haar cascade fallback + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + faces = self.face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30)) + data.person_count = len(faces) + if len(faces) > 0: + largest = max(faces, key=lambda f: f[2] * f[3]) + x, y, w, h = largest + area_ratio = (w * h) / self._frame_area + data.face_area = min(1.0, area_ratio) + data.distance_estimate = self.estimate_distance(area_ratio) + data.dominant_color = self.get_dominant_color(frame, (x, y, w, h)) + data.dominant_emotion = 'neutral' + data.confidence = 0.5 + cv2.rectangle(annotated, (x, y), (x+w, y+h), (200, 200, 0), 2) + + return annotated, data + + @staticmethod + def estimate_distance(face_area_ratio: float) -> float: + if face_area_ratio <= 0: + return 3.0 + return max(0.3, min(5.0, 0.15 / math.sqrt(face_area_ratio))) + + @staticmethod + def calculate_pose_openness(pose_landmarks) -> float: + try: + lm = pose_landmarks.landmark + # Use shoulder-wrist spread as openness + ls = lm[11] # left shoulder + rs = lm[12] # right shoulder + lw = lm[15] # left wrist + rw = lm[16] # right wrist + shoulder_width = abs(rs.x - ls.x) + if shoulder_width < 0.01: + return 0.5 + wrist_spread = abs(rw.x - lw.x) + openness = min(1.0, wrist_spread / (shoulder_width * 3.0)) + return openness + except Exception: + return 0.5 + + @staticmethod + def get_dominant_color(frame: np.ndarray, rect=None) -> str: + try: + if rect is not None: + x, y, w, h = rect + roi = frame[max(0,y):y+h, max(0,x):x+w] + else: + roi = frame + if roi.size == 0: + return '#808080' + # Downsample for speed + small = cv2.resize(roi, (20, 20)) + pixels = small.reshape(-1, 3).astype(np.float32) + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) + _, _, centers = cv2.kmeans(pixels, 1, None, criteria, 3, cv2.KMEANS_RANDOM_CENTERS) + b, g, r = [int(c) for c in centers[0]] + return f'#{r:02X}{g:02X}{b:02X}' + except Exception: + return '#808080' From 0a12b68507b5725db30bfb7d8404b1c2737a3901 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 2 Mar 2026 18:27:26 +0000 Subject: [PATCH 3/4] Make jealousy burst duration configurable via config.ini Add jealousy_burst_seconds to [Personas] config section (default 8.0). PersonaEngine reads this value instead of hardcoding 8.0 seconds. Updated TUNING.md to document the new config key. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- docs/TUNING.md | 2 +- python_controller/config.ini | 2 ++ python_controller/persona_engine.py | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/TUNING.md b/docs/TUNING.md index 324a7d8..ae15d93 100644 --- a/docs/TUNING.md +++ b/docs/TUNING.md @@ -63,7 +63,7 @@ Default: `0.3` `jealousy_trigger_seconds` (default 5.0): seconds of continuous `Empathy` on the primary flower before siblings become `Jealous`. -Override duration is hardcoded at 8 seconds (`persona_engine.py` line `override_until = time.time() + 8.0`). +`jealousy_burst_seconds` (default 8.0): how long the `Jealous` override lasts. Both are configurable in `config.ini` under `[Personas]`. --- diff --git a/python_controller/config.ini b/python_controller/config.ini index 4e25625..c32c0d1 100644 --- a/python_controller/config.ini +++ b/python_controller/config.ini @@ -45,6 +45,8 @@ min_face_confidence = 0.5 labels = Empathy,Defensive,Predatory,Boredom,Surprise,Jealous # Time (seconds) a flower must stay in Empathy to trigger jealousy in others jealousy_trigger_seconds = 5.0 +# Duration (seconds) a flower stays in Jealous after trigger +jealousy_burst_seconds = 8.0 # EMA smoothing factor for motion output (0=no smoothing, 1=frozen) ema_alpha = 0.3 diff --git a/python_controller/persona_engine.py b/python_controller/persona_engine.py index 8a721aa..443affe 100644 --- a/python_controller/persona_engine.py +++ b/python_controller/persona_engine.py @@ -55,6 +55,7 @@ def __init__(self, config: configparser.ConfigParser, device_names: list): self.config = config self.ema_alpha = config.getfloat('Personas', 'ema_alpha', fallback=0.3) self.jealousy_trigger = config.getfloat('Personas', 'jealousy_trigger_seconds', fallback=5.0) + self.jealousy_burst = config.getfloat('Personas', 'jealousy_burst_seconds', fallback=8.0) self.states: Dict[str, DeviceState] = { name: DeviceState(name=name) for name in device_names @@ -154,7 +155,7 @@ def update(self, emotion_data: EmotionData, primary_device: str = None): if name != primary_device and not self.states[name].is_overridden(): logger.info(f"[Jealousy] Device '{name}' becomes Jealous!") self._apply_persona(self.states[name], 'Jealous') - self.states[name].override_until = time.time() + 8.0 # 8s jealous burst + self.states[name].override_until = time.time() + self.jealousy_burst else: self._empathy_start[primary_device] = None From 9f4f299b9729f18f1b63d7b9763817ceab09254d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 2 Mar 2026 18:30:57 +0000 Subject: [PATCH 4/4] Implement Digital Bloom control system: vision, OSC, persona engine, ML trainer, Tkinter GUI, and docs rewrite Co-authored-by: Sa1koro <13943286+Sa1koro@users.noreply.github.com> --- docs/WIRING.md | 12 ++++++------ python_controller/config.ini | 1 + python_controller/control_panel.py | 2 +- python_controller/persona_engine.py | 9 +++++++-- python_controller/vision_tracker.py | 16 +++++++++++++--- 5 files changed, 28 insertions(+), 12 deletions(-) diff --git a/docs/WIRING.md b/docs/WIRING.md index 11c03a0..982f9d1 100644 --- a/docs/WIRING.md +++ b/docs/WIRING.md @@ -16,12 +16,12 @@ | Motor A (−) | 26 | M1_B | | Motor B (+) | 18 | M2_A | | Motor B (−) | 19 | M2_B | -| LED1 Red | 2 | 220Ω resistor in series | -| LED1 Green | 4 | 220Ω resistor in series | -| LED1 Blue | 5 | 220Ω resistor in series | -| LED2 Red | 12 | 220Ω resistor in series | -| LED2 Green | 13 | 220Ω resistor in series | -| LED2 Blue | 14 | 220Ω resistor in series | +| LED1 Red | 2 | 220Ω–330Ω resistor in series (adjust for LED Vf) | +| LED1 Green | 4 | 220Ω–330Ω resistor in series | +| LED1 Blue | 5 | 220Ω–330Ω resistor in series | +| LED2 Red | 12 | 220Ω–330Ω resistor in series | +| LED2 Green | 13 | 220Ω–330Ω resistor in series | +| LED2 Blue | 14 | 220Ω–330Ω resistor in series | ### WiFi / OSC - **Mode**: Access Point (AP) diff --git a/python_controller/config.ini b/python_controller/config.ini index c32c0d1..e70bd10 100644 --- a/python_controller/config.ini +++ b/python_controller/config.ini @@ -5,6 +5,7 @@ mode = ap # When mode=ap: credentials of the ESP32 hotspot to connect to ap_ssid = ESP32_Sylvie +# IMPORTANT: Change this password to something stronger in production! ap_password = 12345678 # When mode=sta: credentials for existing WiFi sta_ssid = YOUR_WIFI_SSID diff --git a/python_controller/control_panel.py b/python_controller/control_panel.py index 8b63d4f..2c8d683 100644 --- a/python_controller/control_panel.py +++ b/python_controller/control_panel.py @@ -104,7 +104,7 @@ def _build_ui(self): info_frame.pack(fill='x', padx=6, pady=4) self._info_labels = {} info_fields = [('dominant', 'Emotion'), ('age', 'Age'), ('gender', 'Gender'), - ('distance', 'Distance'), ('persons', 'Persons'), ('pose', 'Pose'),('color', 'Color')] + ('distance', 'Distance'), ('persons', 'Persons'), ('pose', 'Pose'), ('color', 'Color')] for key, label_text in info_fields: row = ttk.Frame(info_frame) row.pack(fill='x', pady=1) diff --git a/python_controller/persona_engine.py b/python_controller/persona_engine.py index 443affe..36b460b 100644 --- a/python_controller/persona_engine.py +++ b/python_controller/persona_engine.py @@ -168,8 +168,10 @@ def _apply_persona(self, state: DeviceState, persona: str): alpha = self.ema_alpha # Add jitter (Layer 3 physical render) + # JITTER_SCALE: maps 0-1 jitter param to ±0.2 openness variation + JITTER_SCALE = 0.2 jitter_amount = params['jitter'] - jitter = (random.random() - 0.5) * 2.0 * jitter_amount * 0.2 + jitter = (random.random() - 0.5) * 2.0 * jitter_amount * JITTER_SCALE target_openness = params['openness'] + jitter target_openness = max(0.0, min(1.0, target_openness)) @@ -195,7 +197,10 @@ def apply_to_network(self, flower_network): if device is None: continue # Set motor direction from openness - direction = 1 if state.openness > 0.6 else (-1 if state.openness < 0.3 else 0) + # Hysteresis thresholds: open>0.6, close<0.3, hold otherwise + OPEN_THRESHOLD = 0.6 + CLOSE_THRESHOLD = 0.3 + direction = 1 if state.openness > OPEN_THRESHOLD else (-1 if state.openness < CLOSE_THRESHOLD else 0) device.set_motor(1, direction) # Set LEDs device.set_led_hsv(1, state.led_hue, state.led_sat, state.led_bri) diff --git a/python_controller/vision_tracker.py b/python_controller/vision_tracker.py index 001e943..21056e1 100644 --- a/python_controller/vision_tracker.py +++ b/python_controller/vision_tracker.py @@ -159,11 +159,20 @@ def process_frame(self, frame: np.ndarray) -> Tuple[np.ndarray, EmotionData]: return annotated, data + # Distance estimation calibration constants + DISTANCE_CALIBRATION_FACTOR = 0.15 # empirical: ~15cm reference face width + DISTANCE_MIN_METERS = 0.3 + DISTANCE_MAX_METERS = 5.0 + @staticmethod def estimate_distance(face_area_ratio: float) -> float: if face_area_ratio <= 0: - return 3.0 - return max(0.3, min(5.0, 0.15 / math.sqrt(face_area_ratio))) + return VisionTracker.DISTANCE_MAX_METERS + return max( + VisionTracker.DISTANCE_MIN_METERS, + min(VisionTracker.DISTANCE_MAX_METERS, + VisionTracker.DISTANCE_CALIBRATION_FACTOR / math.sqrt(face_area_ratio)) + ) @staticmethod def calculate_pose_openness(pose_landmarks) -> float: @@ -180,7 +189,8 @@ def calculate_pose_openness(pose_landmarks) -> float: wrist_spread = abs(rw.x - lw.x) openness = min(1.0, wrist_spread / (shoulder_width * 3.0)) return openness - except Exception: + except Exception as e: + logger.debug(f"[Vision] Pose openness calculation failed: {e}") return 0.5 @staticmethod