diff --git a/software/dashboard/app.py b/software/dashboard/app.py
index 0d10940..6466d28 100644
--- a/software/dashboard/app.py
+++ b/software/dashboard/app.py
@@ -39,20 +39,45 @@ def zmq_worker(ip='127.0.0.1', port=5556):
def generate_frames(camera_name):
while True:
- frame_data = None
+ frame_bytes = None
+ detections = []
+
with lock:
if camera_name in latest_observation:
b64_str = latest_observation[camera_name]
if b64_str:
try:
- # It is already a base64 encoded JPG from the host
- frame_data = base64.b64decode(b64_str)
+ frame_bytes = base64.b64decode(b64_str)
except Exception:
pass
+
+ # Get detections
+ raw_dets = latest_observation.get("detections", {})
+ if isinstance(raw_dets, dict):
+ detections = raw_dets.get(camera_name, [])
- if frame_data:
+ if frame_bytes:
+ # Decode to image to draw on it
+ nparr = np.frombuffer(frame_bytes, np.uint8)
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
+
+ if img is not None:
+ # Draw detections
+ for det in detections:
+ box = det.get("box", [])
+ label = det.get("label", "obj")
+ if len(box) == 4:
+ x1, y1, x2, y2 = map(int, box)
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
+ cv2.putText(img, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
+
+ # Re-encode
+ ret, buffer = cv2.imencode('.jpg', img)
+ if ret:
+ frame_bytes = buffer.tobytes()
+
yield (b'--frame\r\n'
- b'Content-Type: image/jpeg\r\n\r\n' + frame_data + b'\r\n')
+ b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
else:
# Return a blank or placeholder image if no data
pass
@@ -71,8 +96,13 @@ def video_feed(camera_name):
@app.route('/api/status')
def get_status():
with lock:
- # Filter out large image data for status endpoint
- status = {k: v for k, v in latest_observation.items() if not (isinstance(v, str) and len(v) > 1000)}
+ # Filter out large image data for status endpoint, but keep the key
+ status = {}
+ for k, v in latest_observation.items():
+ if isinstance(v, str) and len(v) > 1000:
+ status[k] = "__IMAGE_DATA__"
+ else:
+ status[k] = v
status['connected'] = connected
return jsonify(status)
diff --git a/software/dashboard/templates/index.html b/software/dashboard/templates/index.html
index ce3baa1..28130c8 100644
--- a/software/dashboard/templates/index.html
+++ b/software/dashboard/templates/index.html
@@ -46,23 +46,32 @@
Robot State
// Update Status Table
for (const [key, value] of Object.entries(data)) {
- // Skip camera data (if any leaks into status) and long strings
- if (typeof value === 'string' && value.length > 100) continue;
- // Check if this key might be a camera
- // If it's a camera key and not in activeCameras, add it
- if (knownCameras.includes(key) || key.includes('cam') || key.includes('wrist')) {
+ // Check if this key is a camera placeholder
+ if (value === "__IMAGE_DATA__") {
if (!activeCameras.has(key) && !document.getElementById('cam-' + key)) {
addCamera(key);
activeCameras.add(key);
}
continue; // Don't show camera keys in text table
}
+
+ // Skip if it looks like a camera key but we didn't get image data marker
+ // (This avoids treating wrist_roll.pos as a camera)
+ if (knownCameras.includes(key)) {
+ // It's a known camera but value isn't image data?
+ // Maybe it's empty string or something.
+ // Let's just continue to show it as text if it's not __IMAGE_DATA__
+ }
let displayValue = value;
- if (typeof value === 'number') displayValue = value.toFixed(2);
+ if (typeof value === 'number') {
+ displayValue = value.toFixed(2);
+ } else if (typeof value === 'object') {
+ displayValue = JSON.stringify(value, null, 2);
+ }
- rows += `| ${key} | ${displayValue} |
`;
+ rows += `| ${key} | ${displayValue} |
`;
}
table.innerHTML = rows;
})
diff --git a/software/examples/alohamini/chore_sequencer.py b/software/examples/alohamini/chore_sequencer.py
new file mode 100644
index 0000000..892e712
--- /dev/null
+++ b/software/examples/alohamini/chore_sequencer.py
@@ -0,0 +1,115 @@
+import time
+import json
+import threading
+import zmq
+from nav_service import NavigationService
+
+class ChoreExecutor:
+ def __init__(self):
+ self.nav = NavigationService()
+ self.nav.start()
+
+ self.running = False
+ self.current_chore = None
+ self.chore_thread = None
+
+ # Robot State
+ self.detections = {}
+
+ # Subscribe to obs for detections
+ self.nav.sub_socket.setsockopt(zmq.SUBSCRIBE, b"")
+ # We need a way to read detections. NavigationService already reads obs.
+ # Let's piggyback or just read from nav service state?
+ # Nav service only stores pose and rooms.
+ # Let's subclass or monkeypatch NavService to expose detections,
+ # OR just read from the socket in NavService and store it.
+ # Actually, let's just modify NavService to store full obs or detections.
+
+ def start_chore(self, chore_name):
+ if self.running:
+ print("[CHORE] Already running a chore.")
+ return
+
+ self.running = True
+ self.current_chore = chore_name
+ self.chore_thread = threading.Thread(target=self._run_chore, args=(chore_name,))
+ self.chore_thread.start()
+
+ def stop_chore(self):
+ self.running = False
+ if self.chore_thread:
+ self.chore_thread.join()
+ self.nav.stop()
+
+ def _run_chore(self, chore_name):
+ print(f"[CHORE] Starting: {chore_name}")
+
+ if chore_name == "clean_up":
+ self._do_clean_up()
+ else:
+ print(f"[CHORE] Unknown chore: {chore_name}")
+
+ self.running = False
+ print(f"[CHORE] Finished: {chore_name}")
+
+ def _do_clean_up(self):
+ # 1. Go to Kitchen
+ print("[CHORE] Step 1: Go to Kitchen")
+ self.nav.go_to_room("Kitchen")
+ self._wait_until_idle()
+
+ # 2. Look for Trash
+ print("[CHORE] Step 2: Scan for Trash")
+ trash = self._wait_for_detection("trash")
+
+ if trash:
+ print(f"[CHORE] Found trash at {trash.get('box')}!")
+ # 3. Pick it up (Mock action)
+ print("[CHORE] Step 3: Picking up trash...")
+ time.sleep(2)
+
+ # 4. Go to Bin (Let's say Bin is in Hallway)
+ print("[CHORE] Step 4: Go to Hallway (Trash Bin)")
+ self.nav.go_to_room("Hallway")
+ self._wait_until_idle()
+
+ # 5. Drop it
+ print("[CHORE] Step 5: Dropping trash")
+ time.sleep(1)
+ else:
+ print("[CHORE] No trash found.")
+
+ def _wait_for_detection(self, label, timeout=5.0):
+ # Poll self.nav.detections
+ start = time.time()
+ while time.time() - start < timeout:
+ dets = self.nav.detections
+ for cam, items in dets.items():
+ for item in items:
+ if item.get("label") == label:
+ return item
+ time.sleep(0.1)
+ return None
+
+ def _wait_until_idle(self):
+ # Wait for nav service to report idle
+ # (It might briefly be idle before starting move, so wait a tiny bit first if needed)
+ time.sleep(0.5)
+ while not self.nav.is_idle():
+ time.sleep(0.1)
+
+if __name__ == "__main__":
+ executor = ChoreExecutor()
+
+ # Wait for connection
+ time.sleep(2)
+
+ print("Available Rooms:", executor.nav.rooms.keys())
+
+ executor.start_chore("clean_up")
+
+ # Keep main thread alive
+ while executor.running:
+ time.sleep(1)
+
+ executor.stop_chore()
diff --git a/software/examples/alohamini/nav_service.py b/software/examples/alohamini/nav_service.py
new file mode 100644
index 0000000..199cb8b
--- /dev/null
+++ b/software/examples/alohamini/nav_service.py
@@ -0,0 +1,106 @@
+import time
+import zmq
+import json
+import threading
+from navigation import NavigationController
+
+class NavigationService:
+ def __init__(self, zmq_obs_ip="127.0.0.1", zmq_obs_port=5556, zmq_cmd_port=5555):
+ self.controller = NavigationController()
+
+ self.context = zmq.Context()
+ self.sub_socket = self.context.socket(zmq.SUB)
+ self.sub_socket.setsockopt(zmq.SUBSCRIBE, b"")
+ self.sub_socket.connect(f"tcp://{zmq_obs_ip}:{zmq_obs_port}")
+ self.sub_socket.setsockopt(zmq.CONFLATE, 1)
+
+ self.pub_socket = self.context.socket(zmq.PUSH)
+ self.pub_socket.connect(f"tcp://{zmq_obs_ip}:{zmq_cmd_port}")
+
+ self.running = False
+ self.current_pose = None
+ self.rooms = {}
+ self.detections = {}
+ self.nav_status = "idle" # idle, moving
+
+ def is_idle(self):
+ return self.nav_status == "idle"
+
+ def start(self):
+ self.running = True
+ self.thread = threading.Thread(target=self._loop, daemon=True)
+ self.thread.start()
+ print("NavigationService started.")
+
+ def stop(self):
+ self.running = False
+ if hasattr(self, 'thread'):
+ self.thread.join()
+
+ def go_to_room(self, room_name):
+ if room_name not in self.rooms:
+ print(f"[NAV] Unknown room: {room_name}")
+ return False
+
+ target = self.rooms[room_name]
+ self.controller.set_target(target["x"], target["y"])
+ self.nav_status = "moving"
+ return True
+
+ def _loop(self):
+ while self.running:
+ try:
+ # 1. Get Observation
+ msg = self.sub_socket.recv_string()
+ obs = json.loads(msg)
+
+ # Update World Knowledge
+ if "rooms" in obs:
+ self.rooms = obs["rooms"]
+
+ if "detections" in obs:
+ self.detections = obs["detections"]
+
+ self.current_pose = {
+ "x": obs.get("x_pos", 0.0),
+ "y": obs.get("y_pos", 0.0),
+ "theta": obs.get("theta_pos", 0.0)
+ }
+
+ # 2. Compute Control
+ action = self.controller.get_action(self.current_pose)
+
+ if action:
+ # Check if action is zero (reached)
+ if action["x.vel"] == 0 and action["theta.vel"] == 0:
+ self.nav_status = "idle"
+ else:
+ self.nav_status = "moving"
+
+ self.pub_socket.send_string(json.dumps(action))
+ else:
+ self.nav_status = "idle"
+
+
+ except Exception as e:
+ print(f"[NAV] Error: {e}")
+ time.sleep(0.1)
+
+if __name__ == "__main__":
+ # Test Script
+ nav = NavigationService()
+ nav.start()
+
+ print("Waiting for connection...")
+ time.sleep(2)
+
+ rooms = ["Kitchen", "Bedroom", "Living Room"]
+
+ for room in rooms:
+ print(f"Going to {room}...")
+ nav.go_to_room(room)
+
+ # Wait for arrival (mock)
+ time.sleep(5)
+
+ nav.stop()
diff --git a/software/examples/alohamini/navigation.py b/software/examples/alohamini/navigation.py
new file mode 100644
index 0000000..bb1b861
--- /dev/null
+++ b/software/examples/alohamini/navigation.py
@@ -0,0 +1,129 @@
+import math
+import time
+import numpy as np
+
+class PIDController:
+ def __init__(self, kp, ki, kd, output_limits=(None, None)):
+ self.kp = kp
+ self.ki = ki
+ self.kd = kd
+ self.min_out, self.max_out = output_limits
+
+ self.prev_error = 0.0
+ self.integral = 0.0
+ self.last_time = None
+
+ def update(self, error, current_time=None):
+ if current_time is None:
+ current_time = time.perf_counter()
+
+ if self.last_time is None:
+ self.last_time = current_time
+ dt = 0.0
+ else:
+ dt = current_time - self.last_time
+ self.last_time = current_time
+
+ # P term
+ p_term = self.kp * error
+
+ # I term
+ if dt > 0:
+ self.integral += error * dt
+ # Clamp integral if needed (anti-windup)
+ i_term = self.ki * self.integral
+
+ # D term
+ d_term = 0.0
+ if dt > 0:
+ d_term = self.kd * (error - self.prev_error) / dt
+
+ self.prev_error = error
+
+ output = p_term + i_term + d_term
+
+ if self.min_out is not None:
+ output = max(self.min_out, output)
+ if self.max_out is not None:
+ output = min(self.max_out, output)
+
+ return output
+
+ def reset(self):
+ self.prev_error = 0.0
+ self.integral = 0.0
+ self.last_time = None
+
+class NavigationController:
+ def __init__(self):
+ # PID for Linear Velocity (Drive to distance)
+ # Target distance is 0.
+ self.linear_pid = PIDController(kp=1.0, ki=0.0, kd=0.1, output_limits=(-0.5, 0.5))
+
+ # PID for Angular Velocity (Turn to heading)
+ # Target heading difference is 0.
+ self.angular_pid = PIDController(kp=2.0, ki=0.0, kd=0.1, output_limits=(-90.0, 90.0))
+
+ self.target_pose = None # {x, y}
+ self.tolerance_dist = 0.1 # meters
+ self.tolerance_angle = 5.0 # degrees
+
+ def set_target(self, x, y):
+ self.target_pose = {"x": x, "y": y}
+ self.linear_pid.reset()
+ self.angular_pid.reset()
+ print(f"[NAV] Target set: ({x}, {y})")
+
+ def get_action(self, current_pose):
+ """
+ current_pose: {x, y, theta (degrees)}
+ Returns: {x.vel, y.vel, theta.vel} (Action dict)
+ """
+ if self.target_pose is None:
+ return {}
+
+ tx, ty = self.target_pose["x"], self.target_pose["y"]
+ cx, cy, cth = current_pose["x"], current_pose["y"], current_pose["theta"]
+
+ # Calculate distance to target
+ dx = tx - cx
+ dy = ty - cy
+ dist = math.sqrt(dx*dx + dy*dy)
+
+ if dist < self.tolerance_dist:
+ print("[NAV] Target reached.")
+ self.target_pose = None
+ return {"x.vel": 0.0, "y.vel": 0.0, "theta.vel": 0.0}
+
+ # Calculate heading to target
+ target_heading = math.degrees(math.atan2(dy, dx))
+
+ # Calculate heading error (shortest path)
+ heading_error = target_heading - cth
+ # Normalize to [-180, 180]
+ while heading_error > 180: heading_error -= 360
+ while heading_error < -180: heading_error += 360
+
+ # Control Logic:
+ # 1. Turn to face target
+ # 2. Drive forward
+
+ # Use PID
+ ang_vel = self.angular_pid.update(heading_error)
+
+ # If facing roughly target, drive
+ lin_vel = 0.0
+ if abs(heading_error) < 45: # Drive only if somewhat aligned
+ # Use distance as error for linear PID?
+ # Or just proportional to dist?
+ # We want to minimize distance (error = dist - 0 = dist)
+ lin_vel = self.linear_pid.update(dist)
+
+ # Simple approach: constant speed scaled by alignment
+ # lin_vel = 0.2 * (1.0 - abs(heading_error)/45.0)
+
+ return {
+ "x.vel": lin_vel,
+ "y.vel": 0.0, # Non-holonomic drive (like a tank/car), no strafing for now
+ "theta.vel": ang_vel
+ }
diff --git a/software/examples/alohamini/standalone_sim.py b/software/examples/alohamini/standalone_sim.py
index 194f97f..bb22459 100644
--- a/software/examples/alohamini/standalone_sim.py
+++ b/software/examples/alohamini/standalone_sim.py
@@ -5,6 +5,7 @@
import numpy as np
import zmq
import cv2
+import math
from dataclasses import dataclass, field
# --- Mocks for LeRobot dependencies ---
@@ -42,6 +43,18 @@ class LiftAxisConfig:
class DeviceNotConnectedError(Exception):
pass
+# --- Simulation Classes ---
+
+@dataclass
+class SimObject:
+ label: str
+ x: float # meters
+ y: float # meters
+ width: float = 0.2 # meters
+ height: float = 0.2 # meters
+ color: tuple = (0, 0, 255) # BGR
+ shape: str = "rectangle" # or circle
+
# --- LeKiwiSim Implementation (Adapted) ---
class LeKiwiSim:
@@ -53,9 +66,25 @@ def __init__(self, config: LeKiwiClientConfig):
self.state = {
"x": 0.0,
"y": 0.0,
- "theta": 0.0,
+ "theta": 0.0, # degrees
}
+ # World Objects
+ self.objects = [
+ SimObject("soda_can", 1.0, 0.5, 0.1, 0.1, (255, 0, 0), "circle"), # Blue Soda
+ SimObject("trash", 1.5, -0.5, 0.2, 0.2, (0, 0, 255), "rectangle"), # Red Trash
+ SimObject("charger", 2.0, 0.0, 0.3, 0.3, (0, 255, 0), "rectangle"), # Green Charger
+ SimObject("table", 0.0, 1.5, 0.8, 0.4, (42, 42, 165), "rectangle"), # Brownish Table
+ ]
+
+ # Semantic Map (Room coordinates)
+ self.rooms = {
+ "Living Room": {"x": 0.0, "y": 0.0},
+ "Kitchen": {"x": 2.0, "y": 2.0},
+ "Bedroom": {"x": -2.0, "y": 1.0},
+ "Hallway": {"x": 0.0, "y": 3.0}
+ }
+
# Joint positions
self.joints = {}
joint_names = [
@@ -91,6 +120,139 @@ def disconnect(self):
print("LeKiwiSim disconnected.")
self._is_connected = False
+ def _world_to_robot(self, wx, wy):
+ """Convert world coordinates to robot-centric coordinates (robot is at 0,0 facing +X)"""
+ dx = wx - self.state["x"]
+ dy = wy - self.state["y"]
+
+ # Rotate by -theta
+ rad = np.radians(self.state["theta"])
+ c, s = np.cos(rad), np.sin(rad)
+
+ # Standard rotation matrix for rotating points CCW is:
+ # x' = x cos - y sin
+ # y' = x sin + y cos
+ # Here we rotate the *points* by -theta (or coordinate system by +theta)
+ # So we use -theta.
+ # x' = dx * cos(-t) - dy * sin(-t) = dx * c + dy * s
+ # y' = dx * sin(-t) + dy * cos(-t) = -dx * s + dy * c
+
+ rx = dx * c + dy * s
+ ry = -dx * s + dy * c
+
+ return rx, ry
+
+ def render_camera(self, cam_name, width, height):
+ # Create a background (Gray floor)
+ img = np.full((height, width, 3), 200, dtype=np.uint8)
+
+ # Grid lines (every 1 meter)
+ # We need to map world coords to pixel coords.
+ # Let's say: Center of image is Robot (0,0 local).
+ # Scale: 100 pixels = 1 meter.
+ scale = 100.0
+ cx = width // 2
+ cy = height // 2
+
+ # Draw grid
+ # We iterate a range around the robot's world position
+ rx, ry = self.state["x"], self.state["y"]
+ min_gx = int(rx - width/scale/2) - 1
+ max_gx = int(rx + width/scale/2) + 1
+ min_gy = int(ry - height/scale/2) - 1
+ max_gy = int(ry + height/scale/2) + 1
+
+ for gx in range(min_gx, max_gx + 1):
+ # Transform world grid line x=gx to robot space, then to pixels
+ p1 = self._world_to_robot(gx, ry - 10) # Line stretches far
+ p2 = self._world_to_robot(gx, ry + 10)
+
+ # Project to pixels: x_pix = cx + x_rob * scale, y_pix = cy - y_rob * scale (since image y is down)
+ # Note: In robot frame, +X is forward? No, usually +X is forward in ROS, but here I used +X is East, Theta is CCW from East.
+ # Let's assume Robot "Forward" is aligned with its Theta.
+ # So in "_world_to_robot", rx is distance "Forward" (along heading), ry is distance "Left" (cross product).
+ # Let's check math in _world_to_robot:
+ # If theta=0, rx=dx, ry=dy. Robot facing East. Obj at (1,0) -> rx=1 (Forward), ry=0.
+ # If theta=90, rx=dy, ry=-dx. Robot facing North. Obj at (0,1) -> rx=1 (Forward), ry=0.
+ # Yes, rx is "Forward", ry is "Left".
+
+ # So: Screen UP should be Robot Forward (rx). Screen RIGHT should be Robot Right (-ry).
+ # Screen X = cx - ry * scale
+ # Screen Y = cy - rx * scale
+
+ pt1_screen = (int(cx - p1[1] * scale), int(cy - p1[0] * scale))
+ pt2_screen = (int(cx - p2[1] * scale), int(cy - p2[0] * scale))
+ cv2.line(img, pt1_screen, pt2_screen, (180, 180, 180), 1)
+
+ for gy in range(min_gy, max_gy + 1):
+ p1 = self._world_to_robot(rx - 10, gy)
+ p2 = self._world_to_robot(rx + 10, gy)
+ pt1_screen = (int(cx - p1[1] * scale), int(cy - p1[0] * scale))
+ pt2_screen = (int(cx - p2[1] * scale), int(cy - p2[0] * scale))
+ cv2.line(img, pt1_screen, pt2_screen, (180, 180, 180), 1)
+
+ # Draw Robot (Triangle)
+ # Robot is at 0,0 in robot frame.
+ robot_pts = np.array([
+ [0, -10], # Back Center (slightly back) -> wait.
+ # Forward is +rx -> Up on screen (-y).
+ # Right is -ry -> Right on screen (+x).
+ # Triangle pointing up:
+ [cx, cy - 20], # Tip (Forward)
+ [cx - 15, cy + 15], # Back Left
+ [cx + 15, cy + 15] # Back Right
+ ], np.int32)
+ cv2.fillPoly(img, [robot_pts], (50, 50, 50))
+
+ detections = []
+
+ # Draw Objects
+ for obj in self.objects:
+ # Transform to robot frame
+ rx, ry = self._world_to_robot(obj.x, obj.y)
+
+ # Project to screen
+ sx = int(cx - ry * scale)
+ sy = int(cy - rx * scale)
+
+ # Check if roughly in view
+ if 0 <= sx < width and 0 <= sy < height:
+ # Size in pixels
+ w_pix = int(obj.width * scale)
+ h_pix = int(obj.height * scale)
+
+ # Bounding box for object
+ top_left = (sx - w_pix // 2, sy - h_pix // 2)
+ bottom_right = (sx + w_pix // 2, sy + h_pix // 2)
+
+ # Draw
+ if obj.shape == "circle":
+ cv2.circle(img, (sx, sy), w_pix // 2, obj.color, -1)
+ else:
+ cv2.rectangle(img, top_left, bottom_right, obj.color, -1)
+
+ # Add label
+ cv2.putText(img, obj.label, (sx - w_pix//2, sy - h_pix//2 - 5),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
+
+ # Create Detection (Perfect Detector)
+ # Bounding box in pixel coordinates [x, y, w, h] or [x1, y1, x2, y2]
+ # Let's use [x1, y1, x2, y2]
+ # Clip to screen
+ x1 = max(0, top_left[0])
+ y1 = max(0, top_left[1])
+ x2 = min(width, bottom_right[0])
+ y2 = min(height, bottom_right[1])
+
+ if x2 > x1 and y2 > y1:
+ detections.append({
+ "label": obj.label,
+ "confidence": 1.0,
+ "box": [x1, y1, x2, y2] # Pixel coords
+ })
+
+ return img, detections
+
def get_observation(self) -> dict:
if not self.is_connected:
raise DeviceNotConnectedError("Not connected")
@@ -112,14 +274,14 @@ def get_observation(self) -> dict:
obs["lift_axis.height_mm"] = self.joints["lift_axis"]
- # Simulated Cameras (Noise or patterns)
+ # Simulated Perception
+ obs["detections"] = {}
+ obs["rooms"] = self.rooms # Broadcast room locations too
+
for cam, cfg in self.config.cameras.items():
- h, w = cfg.height, cfg.width
- # Create a noisy image
- img = np.random.randint(0, 255, (h, w, 3), dtype=np.uint8)
- # Draw some text
- cv2.putText(img, f"{cam}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
+ img, dets = self.render_camera(cam, cfg.width, cfg.height)
obs[cam] = img
+ obs["detections"][cam] = dets
return obs
@@ -205,6 +367,13 @@ def main():
# 3. Encode Images
encoded_obs = obs.copy()
+
+ # Remove raw image data from encoded_obs to save bandwidth/processing if not needed,
+ # but we need to encode it first.
+
+ # Special handling for detections: keep them as object
+ # (they are already json serializable)
+
for cam in config.cameras:
if cam in obs:
ret, buffer = cv2.imencode(".jpg", obs[cam], [int(cv2.IMWRITE_JPEG_QUALITY), 90])
@@ -228,6 +397,6 @@ def main():
socket_pub.close()
socket_sub.close()
context.term()
-
+
if __name__ == "__main__":
main()