diff --git a/adapters.py b/adapters.py index e13dd86f..d1221674 100644 --- a/adapters.py +++ b/adapters.py @@ -12,9 +12,14 @@ import serial import pyvesc import re -#import RPi.GPIO as GPIO from serial import SerialException +import numpy as np +from abc import ABC, abstractmethod +import os +import gi +gi.require_version('Aravis', '0.8') +from gi.repository import Aravis class SmoothieAdapter: RESPONSE_OK = "ok\r\n" @@ -907,90 +912,66 @@ def __calibrate_axis(self, self.__smc.write("G92 {0}{1}".format(axis_label, sm_val)) return self.__smc.read_some() +class CameraAdapterInterface(ABC): + """ + Abstract camera adapter interface. + Defines the minimal API every camera backend must implement. + """ -class PiCameraAdapter: - - def __init__(self): - from picamera.array import PiRGBArray - from picamera import PiCamera - self._camera = PiCamera() - self._camera.resolution = (config.CAMERA_W, config.CAMERA_H) - self._camera.framerate = config.CAMERA_FRAMERATE - self._raw_capture = PiRGBArray(self._camera, size=(config.CAMERA_W, config.CAMERA_H)) - self._gen = self._camera.capture_continuous(self._raw_capture, format="rgb") - time.sleep(2) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() - - def release(self): - self._camera.close() + @abstractmethod + def __init__(self, + crop_w_from, + crop_w_to, + crop_h_from, + crop_h_to, + cv_rotate_code, + ispdigitalgainrange_from, + ispdigitalgainrange_to, + gainrange_from, + gainrange_to, + exposuretimerange_from, + exposuretimerange_to, + aelock, + capture_width, + capture_height, + display_width, + display_height, + framerate, + nvidia_flip_method): + """Initialize the camera with configuration parameters.""" + pass + @abstractmethod def get_image(self): - image = cv.cvtColor(next(self._gen).array, cv.COLOR_RGB2BGR) - self._raw_capture.truncate(0) - return image - - -''' -# test -class CameraAdapterIMX219_170_BS1: - """Buffer size is set to 1 frame, getting 2 frames per call, return last""" + """Return the latest frame (as np.ndarray in BGR).""" + pass - def __init__(self, - capture_width=config.CAMERA_W, - capture_height=config.CAMERA_H, - display_width=config.CAMERA_W, - display_height=config.CAMERA_H, - framerate=config.CAMERA_FRAMERATE, - flip_method=config.CAMERA_FLIP_METHOD): - - gst_config = ( - "nvarguscamerasrc ! " - "video/x-raw(memory:NVMM), " - "width=(int)%d, height=(int)%d, " - "format=(string)NV12, framerate=(fraction)%d/1 ! " - "nvvidconv flip-method=%d ! " - "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! " - "videoconvert ! " - "video/x-raw, format=(string)BGR ! appsink" - % ( - capture_width, - capture_height, - framerate, - flip_method, - display_width, - display_height - ) - ) - self._cap = cv.VideoCapture(gst_config, cv.CAP_GSTREAMER) - self._cap = cv.VideoCapture(cv.CAP_PROP_BUFFERSIZE, 1) + @abstractmethod + def release(self): + """Stop acquisition and release all resources.""" + pass def __enter__(self): + """Context manager entry point.""" return self def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit point.""" self.release() - def release(self): - self._cap.release() - - def get_image(self): - if self._cap.isOpened(): - for i in range(self._cap.get(cv.CAP_PROP_BUFFERSIZE) + 1): - ret, image = self._cap.read() - # rotate for 90 degrees and crop black zones - return cv.rotate(image, 2)[config.CROP_H_FROM:config.CROP_H_TO, config.CROP_W_FROM:config.CROP_W_TO] - else: - raise RuntimeError("Unable to open camera") -''' - + # ─────────── Common utility ─────────── + def whoami(self): + """ + Return the readable name of the active adapter class. + Example: "CameraAdapterAravis_ZeroLatency" + """ + return self.__class__.__name__ -# old with no shutter, gain and rest camera control -class CameraAdapterIMX219_170_Auto: +class CameraAdapterManager(CameraAdapterInterface): + """ + Universal camera adapter manager. + Automatically selects and delegates to the appropriate backend. + """ def __init__(self, crop_w_from, @@ -1011,69 +992,98 @@ def __init__(self, display_height, framerate, nvidia_flip_method): + """ + backend: "auto" | "aravis" | "imx219" + - "auto": try Aravis first, fallback to IMX219 if no camera found + - "aravis": force Aravis backend + - "imx219": force Jetson IMX219 backend + """ + self._adapter = None + self._backend_name = None + backend = config.CAMERA_BACKEND.lower() + self._backend_mode = backend - self._crop_w_from = crop_w_from - self._crop_w_to = crop_w_to - self._crop_h_from = crop_h_from - self._crop_h_to = crop_h_to - self._cv_rotate_code = cv_rotate_code - aelock = "aelock=true " if aelock else "" - - gst_config = ( - "nvarguscamerasrc ! " - "video/x-raw(memory:NVMM), " - "width=(int)%d, height=(int)%d, " - "format=(string)NV12, framerate=(fraction)%d/1 ! " - "nvvidconv flip-method=%d ! " - "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! " - "videoconvert ! " - "video/x-raw, format=(string)BGR ! appsink" - % ( - capture_width, - capture_height, - framerate, - nvidia_flip_method, - display_width, - display_height + # ─────────── Backend selection ─────────── + if self._backend_mode == "auto": + try: + print(f"[{self.__class__.__name__}] Trying Aravis backend...") + self._adapter = CameraAdapterAravis( + crop_w_from, crop_w_to, crop_h_from, crop_h_to, cv_rotate_code, + ispdigitalgainrange_from, ispdigitalgainrange_to, + gainrange_from, gainrange_to, + exposuretimerange_from, exposuretimerange_to, + aelock, capture_width, capture_height, + display_width, display_height, framerate, nvidia_flip_method + ) + print(f"[{self.__class__.__name__}] ✅ Aravis backend selected: {self._adapter.whoami()}") + except Exception as e: + print(f"[{self.__class__.__name__}] ⚠️ Aravis unavailable: {e}") + print(f"[{self.__class__.__name__}] → Falling back to IMX219 backend.") + print(f"[{self.__class__.__name__}] 🔄 Restarting nvargus-daemon (if available)...") + os.system("sudo systemctl restart nvargus-daemon") + self._adapter = CameraAdapterIMX219_170( + crop_w_from, crop_w_to, crop_h_from, crop_h_to, cv_rotate_code, + ispdigitalgainrange_from, ispdigitalgainrange_to, + gainrange_from, gainrange_to, + exposuretimerange_from, exposuretimerange_to, + aelock, capture_width, capture_height, + display_width, display_height, framerate, nvidia_flip_method ) - ) - if config.APPLY_THREAD_BUFF_CLEANING: - self._cap = VideoCaptureNoBuffer(gst_config, cv.CAP_GSTREAMER) + elif self._backend_mode == "aravis": + self._adapter = CameraAdapterAravis( + crop_w_from, crop_w_to, crop_h_from, crop_h_to, cv_rotate_code, + ispdigitalgainrange_from, ispdigitalgainrange_to, + gainrange_from, gainrange_to, + exposuretimerange_from, exposuretimerange_to, + aelock, capture_width, capture_height, + display_width, display_height, framerate, nvidia_flip_method + ) + + elif self._backend_mode == "imx219": + print(f"[{self.__class__.__name__}] 🔄 Restarting nvargus-daemon (if available)...") + os.system("sudo systemctl restart nvargus-daemon") + self._adapter = CameraAdapterIMX219_170( + crop_w_from, crop_w_to, crop_h_from, crop_h_to, cv_rotate_code, + ispdigitalgainrange_from, ispdigitalgainrange_to, + gainrange_from, gainrange_to, + exposuretimerange_from, exposuretimerange_to, + aelock, capture_width, capture_height, + display_width, display_height, framerate, nvidia_flip_method + ) + else: - self._cap = cv.VideoCapture(gst_config, cv.CAP_GSTREAMER) + raise ValueError(f"[{self.__class__.__name__}] Unknown backend '{backend}', expected 'auto', 'aravis' or 'imx219'.") - def __enter__(self): - return self + # ─────────── Automatic backend name ─────────── + raw_name = self._adapter.whoami() + self._backend_name = raw_name.replace("CameraAdapter", "").split("_")[0].lower() + print(f"[{self.__class__.__name__}] Active backend: {self._backend_name} ({raw_name})") - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() + # ─────────── Interface delegation ─────────── + def get_image(self): + return self._adapter.get_image() def release(self): - self._cap.release() + return self._adapter.release() - def get_image(self): - if self._cap.isOpened(): - if config.BUFF_CLEANING_DELAY > 0: - time.sleep(config.BUFF_CLEANING_DELAY) + def __enter__(self): + self._adapter.__enter__() + return self - if config.APPLY_THREAD_BUFF_CLEANING: - image = self._cap.read() - else: - ret, image = self._cap.read() + def __exit__(self, exc_type, exc_val, exc_tb): + self._adapter.__exit__(exc_type, exc_val, exc_tb) - if config.CV_APPLY_ROTATION: - image = cv.rotate(image, self._cv_rotate_code) + def whoami(self): + """Return the manager and its active backend class name.""" + return f"[{self.__class__.__name__}] → {self._adapter.whoami()} ({self._backend_name})" - # crop black zones - #if config.APPLY_IMAGE_CROPPING: - # image = image[self._crop_h_from:self._crop_h_to, self._crop_w_from:self._crop_w_to] - return image - else: - raise RuntimeError("Unable to open camera") + def backend(self): + """Return short backend identifier, e.g., 'aravis' or 'imx219'.""" + return self._backend_name -class CameraAdapterIMX219_170: +class CameraAdapterIMX219_170(CameraAdapterInterface): def __init__(self, crop_w_from, @@ -1205,6 +1215,278 @@ def get_image(self): raise RuntimeError(f"[{self.__class__.__name__}] -> Unable to open camera") +class CameraAdapterAravis(CameraAdapterInterface): + """ + Aravis-based version compatible with the DR-U3-50Y2C-C3-S camera. + Uses a double-buffer (ping-pong) numpy architecture for zero-latency capture. + """ + + def __init__(self, + crop_w_from, + crop_w_to, + crop_h_from, + crop_h_to, + cv_rotate_code, + ispdigitalgainrange_from, + ispdigitalgainrange_to, + gainrange_from, + gainrange_to, + exposuretimerange_from, + exposuretimerange_to, + aelock, + capture_width, + capture_height, + display_width, + display_height, + framerate, + nvidia_flip_method): + + # Convert exposure time from nvarguscamerasrc units (nanoseconds) to Aravis units (µs). + # Therefore: 1 µs = 1000 ns → divide by 1000 + exposuretimerange_from = exposuretimerange_from / 1000 + exposuretimerange_to = exposuretimerange_to / 1000.0 + + self._running = False + self._ready_index = 0 + self._lock = threading.Lock() + self._framerate = framerate + + self._cv_codes = { + "BayerBG8": cv.COLOR_BAYER_RG2BGR, + "BayerGB8": cv.COLOR_BAYER_GR2BGR, + "BayerRG8": cv.COLOR_BAYER_BG2BGR, + "BayerGR8": cv.COLOR_BAYER_GB2BGR, + } + + # ─────────── Camera initialization ─────────── + Aravis.update_device_list() + if Aravis.get_n_devices() == 0: + raise RuntimeError(f"[{self.__class__.__name__}] No Aravis camera detected.") + self.cam = Aravis.Camera.new(None) + print(f"[{self.__class__.__name__}] Aravis camera detected : {self.cam.get_model_name()}") + + # ─────────── Binning configuration ─────────── + try: + self.cam.set_integer("BinningHorizontal", config.CAMERA_BINNING_H) + self.cam.set_integer("BinningVertical", config.CAMERA_BINNING_V) + except Exception: + pass + + sensor_w, sensor_h = self.cam.get_sensor_size() + bx = self.cam.get_integer("BinningHorizontal") if self.cam.is_feature_available("BinningHorizontal") else 1 + by = self.cam.get_integer("BinningVertical") if self.cam.is_feature_available("BinningVertical") else 1 + eff_w, eff_h = sensor_w // bx, sensor_h // by + + # ─────────── Configure ROI ─────────── + if config.APPLY_IMAGE_CROPPING: + # ─────────── Centered ROI ─────────── + target_w, target_h = min(capture_width, eff_w), min(capture_height, eff_h) + ox = ((eff_w - target_w) // 2) + oy = (eff_h - target_h) + self.cam.set_region(ox, oy, target_w, target_h) + self._width, self._height = target_w, target_h + else: + # ─────────── Full sensor ROI ─────────── + self.cam.set_region(0, 0, eff_w, eff_h) + self._width, self._height = eff_w, eff_h + + # ─────────── ExposureTime, gain and framerate ─────────── + + if gainrange_from != gainrange_to: + raise ValueError(f"[{self.__class__.__name__}] gainrange_from and gainrange_to must be equal.") + if exposuretimerange_from != exposuretimerange_to: + raise ValueError(f"[{self.__class__.__name__}] exposuretimerange_from and exposuretimerange_to must be equal.") + + for (k, v) in [("ExposureAuto", "Off"), ("GainAuto", "Off")]: + try: self.cam.set_string(k, v) + except Exception: pass + for (k, v) in [("ExposureTime", float(exposuretimerange_from)), ("Gain", float(gainrange_from))]: + try: self.cam.set_float(k, v) + except Exception: pass + + try: + self.cam.set_boolean("AcquisitionFrameRateEnable", True) + self.cam.set_float("AcquisitionFrameRate", self._framerate) + except Exception: + pass + + # ─────────── Flip / rotation mapping (hardware + software hybrid) ─────────── + # nvarguscamerasrc flip-method values: + # 0: none → ReverseX=0, ReverseY=0 + # 1: rotate 90° CCW → emulate in software + # 2: rotate 180° → ReverseX=1, ReverseY=1 + # 3: rotate 90° CW → emulate in software + # 4: horizontal flip → ReverseX=1, ReverseY=0 + # 5: vertical flip + rotate 180° (upside down) → emulate in software + # 6: vertical flip → ReverseX=0, ReverseY=1 + # 7: transverse (transpose + 180°) → emulate in software + + flip_mapping = {0: (False, False), 2: (True, True), 4: (True, False), 6: (False, True)} + + self._software_rotate_code = None + + if nvidia_flip_method in flip_mapping: + reverse_x, reverse_y = flip_mapping[nvidia_flip_method] + try: + if self.cam.is_feature_available("ReverseX"): + self.cam.set_boolean("ReverseX", reverse_x) + if self.cam.is_feature_available("ReverseY"): + self.cam.set_boolean("ReverseY", reverse_y) + print(f"[{self.__class__.__name__}] → Applied ReverseX={reverse_x}, ReverseY={reverse_y} (flip-method={nvidia_flip_method})") + except Exception as e: + print(f"[{self.__class__.__name__}] ⚠️ Unable to set ReverseX/ReverseY: {e}") + else: + self._software_rotate_code = nvidia_flip_method + print(f"[{self.__class__.__name__}] ⚠️ flip-method={nvidia_flip_method} not supported by Aravis (only 0,2,4,6).") + + # ─────────── Stream & Buffers ─────────── + self.stream = self.cam.create_stream(None, None) + self.stream.set_emit_signals(False) + payload = self.cam.get_payload() + for _ in range(2): + self.stream.push_buffer(Aravis.Buffer.new_allocate(payload)) + + # ─────────── Double buffer numpy ─────────── + self._buffers = [ + np.empty((self._height, self._width), dtype=np.uint8), + np.empty((self._height, self._width), dtype=np.uint8) + ] + + self.cam.start_acquisition() + + # ─────────── Timestamp offset calibration ─────────── + if config.CAMERA_PRINT_LATENCY: + print(f"[{self.__class__.__name__}] Waiting for first buffer to calibrate timestamp offset...") + buf = None + while buf is None: + buf = self.stream.try_pop_buffer() + + if buf.get_status() == Aravis.BufferStatus.SUCCESS: + # Get the camera’s hardware timestamp (in ns) and convert to seconds + cam_ts = buf.get_timestamp() / 1e9 + # Get the current system time in seconds + sys_ts = time.time() + # Compute the offset so that future timestamps are expressed in system time + self._timestamp_offset = sys_ts - cam_ts + print(f"[{self.__class__.__name__}] Timestamp offset calibrated: {self._timestamp_offset:.6f}s") + + # Push buffer back to the stream for reuse + self.stream.push_buffer(buf) + + self._running = True + self._thread = threading.Thread(target=self._capture_loop, daemon=True) + self._thread.start() + print(f"[{self.__class__.__name__}] Aravis acquisition launched in {self._framerate} FPS ({self._width}x{self._height})") + + self._pixel_format = self.cam.get_pixel_format_as_string() + + + # ─────────── Capture thread ─────────── + def _capture_loop(self): + idx = 0 + while self._running: + buf = self.stream.try_pop_buffer() + if not buf: + continue + if buf.get_status() == Aravis.BufferStatus.SUCCESS: + data = buf.get_data() + + if config.CAMERA_PRINT_LATENCY: + # Hardware timestamp in seconds + ts_ns = buf.get_timestamp() # timestamp in nanoseconds + capture_time = ts_ns / 1e9 # conversion in seconds + + np.copyto(self._buffers[idx], np.frombuffer(data, dtype=np.uint8).reshape(self._buffers[idx].shape)) + + with self._lock: + self._ready_index = idx + if config.CAMERA_PRINT_LATENCY: + self._last_hw_timestamp = capture_time + + idx = 1 - idx + + self.stream.push_buffer(buf) + + # ─────────── Public interface ─────────── + def get_image(self): + """Return the latest RGB frame and log latency statistics every 10 seconds.""" + + # Get the current frame safely + with self._lock: + frame = self._buffers[self._ready_index] + hw_ts = getattr(self, "_last_hw_timestamp", None) + + # Convert Bayer → RGB + frame_rgb = cv.cvtColor(frame, self._cv_codes.get(self._pixel_format, cv.COLOR_BAYER_GB2BGR)) + + # Apply rotation if needed + if self._software_rotate_code is not None: + code = self._software_rotate_code + if code == 1: + frame_rgb = cv.transpose(frame_rgb) + frame_rgb = cv.flip(frame_rgb, 0) + elif code == 3: + frame_rgb = cv.transpose(frame_rgb) + frame_rgb = cv.flip(frame_rgb, 1) + elif code == 5: + frame_rgb = cv.flip(frame_rgb, -1) + elif code == 7: + frame_rgb = cv.transpose(frame_rgb) + frame_rgb = cv.flip(frame_rgb, -1) + + # ─────────── Latency & FPS stats ─────────── + if config.CAMERA_PRINT_LATENCY: + hw_ts += getattr(self, "_timestamp_offset", 0.0) + now = time.time() + if hw_ts is not None: + latency_ms = (now - hw_ts) * 1000.0 + + # Initialize stats on first frame + if not hasattr(self, "_stats_start_time"): + self._stats_start_time = now + self._latencies = [] + self._frames = 0 + + self._latencies.append(latency_ms) + self._frames += 1 + + # Every 10 seconds → compute and print statistics + if (now - self._stats_start_time) >= 10.0: + avg_latency = sum(self._latencies) / len(self._latencies) + min_latency = min(self._latencies) + max_latency = max(self._latencies) + + print( + f"[{self.__class__.__name__}] " + f"Latency avg: {avg_latency:.2f} ms " + f"(min: {min_latency:.2f}, max: {max_latency:.2f})" + ) + + # Reset stats window + self._stats_start_time = now + self._latencies.clear() + self._frames = 0 + + return frame_rgb + + + def release(self): + """Stop the capture process cleanly.""" + self._running = False + time.sleep(0.1) + try: + self.cam.stop_acquisition() + except Exception: + pass + print(f"[{self.__class__.__name__}] 🛑 Aravis capture stopped.") + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.release() + + class VideoCaptureNoBuffer: """Minimalistic layer for cv2's VideoCapture with buffer cleaning thread ()""" @@ -1238,75 +1520,6 @@ def read(self): return self._queue.get() -class CompassOldAdapter: - """Provides to the robot's on-board compass (some old card, the first one, not sure about model)""" - - def __init__(self): - import smbus - self._register_a = 0 # Address of Configuration register A - self._register_b = 0x01 # Address of configuration register B - self._register_mode = 0x02 # Address of mode register - self._x_axis_h = 0x03 # Address of X-axis MSB data register - self._z_axis_h = 0x05 # Address of Z-axis MSB data register - self._y_axis_h = 0x07 # Address of Y-axis MSB data register - self._bus = smbus.SMBus(1) # or bus = smbus.SMBus(0) for older version boards - - # write to Configuration Register A - self._bus.write_byte_data(config.COMPASS_DEVICE_ADDRESS, self._register_a, 0x70) - # Write to Configuration Register B for gain - self._bus.write_byte_data(config.COMPASS_DEVICE_ADDRESS, self._register_b, 0xa0) - # Write to mode Register for selecting mode - self._bus.write_byte_data(config.COMPASS_DEVICE_ADDRESS, self._register_mode, 0) - - def _read_raw_data(self, address): - """Reads raw data from compass""" - - # Read raw 16-bit value - high = self._bus.read_byte_data(config.COMPASS_DEVICE_ADDRESS, address) - low = self._bus.read_byte_data(config.COMPASS_DEVICE_ADDRESS, address + 1) - - # concatenate higher and lower value - value = ((high << 8) | low) - - # get signed value from module - return value - 65536 if value > 32768 else value - - def get_heading_angle(self): - """Returns current heading angle in degrees""" - - x = self._read_raw_data(self._x_axis_h) - y = self._read_raw_data(self._y_axis_h) - heading = math.atan2(y, x) + config.COMPASS_DECLINATION - - # Due to declination check for > 360 degree - if heading > 2 * math.pi: - heading -= 2 * math.pi - # check for sign - if heading < 0: - heading += 2 * math.pi - - # convert into angle - return int(heading * 180 / math.pi) - - -class CompassBNO055Adapter: - """Provides access to the robot's on-board compass""" - - def __init__(self): - import adafruit_bno055 - from busio import I2C - import board - - self._i2c = I2C(board.SCL, board.SDA) - self._sensor = adafruit_bno055.BNO055(self._i2c) - # turn on "compass mode" - self._sensor.mode = adafruit_bno055.COMPASS_MODE - # sensor.mode = adafruit_bno055.M4G_MODE - - def get_euler_angle(self): - return self._sensor.euler - - class VescAdapter: """Provides navigation engines (forward/backward) control using vesc""" diff --git a/config/config_v20_defaults.py b/config/config_v20_defaults.py index 9f16a357..8ed9dbec 100644 --- a/config/config_v20_defaults.py +++ b/config/config_v20_defaults.py @@ -1,7 +1,7 @@ """Configuration file.""" -CONFIG_VERSION = "2.2.1" +CONFIG_VERSION = "2.2.2" # ====================================================================================================================== @@ -91,6 +91,7 @@ CONTINUE_PREVIOUS_PATH = False PREVIOUS_PATH_POINTS_FILE = "path_points.dat" PREVIOUS_PATH_INDEX_FILE = "path_index.txt" +PREVIOUS_GNSS_INDEX_FILE = "path_gnss_index.txt" #Cyril covid ORIGIN_AVERAGE_SAMPLES = 8 @@ -623,22 +624,28 @@ # ====================================================================================================================== # CAMERA SETTINGS # ====================================================================================================================== -CAMERA_W = 1920 -CAMERA_H = 1080 -APPLY_IMAGE_CROPPING = False +CAMERA_BACKEND = "auto" # "auto" | "aravis" | "imx219" +#- "auto": try Aravis first, fallback to IMX219 if no camera found +#- "aravis": force Aravis backend +#- "imx219": force Jetson IMX219 backend "gstreamer", "v4l2", "auto" +CAMERA_BINNING_H = 2 +CAMERA_BINNING_V = 2 +CAMERA_W = 2160//CAMERA_BINNING_H#2592//CAMERA_BINNING_H +CAMERA_H = 1216//CAMERA_BINNING_V#1944//CAMERA_BINNING_V +APPLY_IMAGE_CROPPING = True CROP_W_FROM = 0 CROP_W_TO = 1920 CROP_H_FROM = 0 CROP_H_TO = 1080 -CAMERA_FRAMERATE = 16 -CAMERA_FLIP_METHOD = 0 -SCENE_CENTER_X = 1000 -SCENE_CENTER_Y = 980 -ONE_MM_IN_PX = 3.2 +CAMERA_FRAMERATE = 30 +CAMERA_FLIP_METHOD = 2 # 0=none, 1=counterclockwise, 2=rotate 180, 3=clockwise, 4=horizontal flip, 5=upside down flip, 6=transpose, 7=transverse +SCENE_CENTER_X = CAMERA_W//2 +SCENE_CENTER_Y = 1944//2//2 +ONE_MM_IN_PX = 2.9 ISP_DIGITAL_GAIN_RANGE_FROM = 4 ISP_DIGITAL_GAIN_RANGE_TO = 4 -GAIN_RANGE_FROM = 4 -GAIN_RANGE_TO = 4 +GAIN_RANGE_FROM = 12 +GAIN_RANGE_TO = 12 EXPOSURE_TIME_RANGE_FROM = 660000 EXPOSURE_TIME_RANGE_TO = 660000 AE_LOCK = True diff --git a/detection.py b/detection.py index ce18dfb2..dc2cd26b 100644 --- a/detection.py +++ b/detection.py @@ -164,7 +164,7 @@ def detect(self, image, disable_frame_show=False): if config.FRAME_SHOW and not disable_frame_show: - t1 = time.time() + #t1 = time.time() #img = draw_boxes(image, plant_boxes) img = image @@ -184,19 +184,19 @@ def detect(self, image, disable_frame_show=False): #print(time.time() - t1) - if not YoloDarknetDetector.WEBSTREAM: - template_dir = os.path.abspath('./liveMain') - app = Flask("webstreaming", template_folder=template_dir) - CORS(app) - app.add_url_rule('/', view_func=webstreaming.index) - app.add_url_rule('/video_feed', view_func=webstreaming.video_feed) + # if not YoloDarknetDetector.WEBSTREAM: + # template_dir = os.path.abspath('./liveMain') + # app = Flask("webstreaming", template_folder=template_dir) + # CORS(app) + # app.add_url_rule('/', view_func=webstreaming.index) + # app.add_url_rule('/video_feed', view_func=webstreaming.video_feed) - logging.getLogger('werkzeug').disabled = True - os.environ['WERKZEUG_RUN_MAIN'] = 'true' + # logging.getLogger('werkzeug').disabled = True + # os.environ['WERKZEUG_RUN_MAIN'] = 'true' - YoloDarknetDetector.webStream = Process(target=app.run, args=("0.0.0.0",8888,False)) - YoloDarknetDetector.webStream.start() - YoloDarknetDetector.WEBSTREAM = True + # YoloDarknetDetector.webStream = Process(target=app.run, args=("0.0.0.0",8888,False)) + # YoloDarknetDetector.webStream.start() + # YoloDarknetDetector.WEBSTREAM = True return plant_boxes @@ -286,7 +286,7 @@ def detect(self, image, disable_frame_show=False): if config.FRAME_SHOW and not disable_frame_show: - t1 = time.time() + #t1 = time.time() #img = draw_boxes(image, plant_boxes) img = image @@ -306,19 +306,19 @@ def detect(self, image, disable_frame_show=False): #print(time.time() - t1) - if not YoloTRTDetector.WEBSTREAM: - template_dir = os.path.abspath('./liveMain') - app = Flask("webstreaming", template_folder=template_dir) - CORS(app) - app.add_url_rule('/', view_func=webstreaming.index) - app.add_url_rule('/video_feed', view_func=webstreaming.video_feed) + # if not YoloTRTDetector.WEBSTREAM: + # template_dir = os.path.abspath('./liveMain') + # app = Flask("webstreaming", template_folder=template_dir) + # CORS(app) + # app.add_url_rule('/', view_func=webstreaming.index) + # app.add_url_rule('/video_feed', view_func=webstreaming.video_feed) - logging.getLogger('werkzeug').disabled = True - os.environ['WERKZEUG_RUN_MAIN'] = 'true' + # logging.getLogger('werkzeug').disabled = True + # os.environ['WERKZEUG_RUN_MAIN'] = 'true' - YoloTRTDetector.webStream = Process(target=app.run, args=("0.0.0.0",8888,False)) - YoloTRTDetector.webStream.start() - YoloTRTDetector.WEBSTREAM = True + # YoloTRTDetector.webStream = Process(target=app.run, args=("0.0.0.0",8888,False)) + # YoloTRTDetector.webStream.start() + # YoloTRTDetector.WEBSTREAM = True return plant_boxes @@ -353,8 +353,34 @@ def _allocate_buffers(self, engine): outputs.append(HostDeviceMem(host_mem, device_mem)) return inputs, outputs, bindings, stream + + def letterbox_fast(self, img, target_size=(416, 416)): + ih, iw = img.shape[:2] + h, w = target_size + + scale = min(w / iw, h / ih) + nw, nh = int(iw * scale), int(ih * scale) + + # Resize + img_resized = cv.resize(img, (nw, nh), interpolation=cv.INTER_LINEAR) + + # Padding avec copyMakeBorder (plus rapide) + top = (h - nh) // 2 + bottom = h - nh - top + left = (w - nw) // 2 + right = w - nw - left + + img_padded = cv.copyMakeBorder( + img_resized, + top, bottom, left, right, + cv.BORDER_CONSTANT, + value=[114, 114, 114] + ) + + return img_padded, scale, (left, top) def _preprocess(self, image): + #processed_image = self.letterbox_fast(image, (self.__width, self.__height))[0] processed_image = cv.resize(image, (self.__width, self.__height)) processed_image = cv.cvtColor(processed_image, cv.COLOR_BGR2RGB) processed_image = processed_image.transpose((2, 0, 1)).astype(np.float32) diff --git a/extraction.py b/extraction.py index bf9d65f9..4cb66df4 100644 --- a/extraction.py +++ b/extraction.py @@ -16,7 +16,7 @@ class ExtractionManagerV3: def __init__(self, smoothie: adapters.SmoothieAdapter, - camera: adapters.CameraAdapterIMX219_170, + camera: adapters.CameraAdapterInterface, logger_full: utility.Logger, data_collector: datacollection.DataCollector, image_saver: utility.ImageSaver, diff --git a/index_webrtc.html b/index_webrtc.html new file mode 100644 index 00000000..bb348f61 --- /dev/null +++ b/index_webrtc.html @@ -0,0 +1,67 @@ + + + +
+ +Connecting...
+ + + + + \ No newline at end of file diff --git a/liveMain/webstreaming.py b/liveMain/webstreaming.py index 6770da8a..b2af775d 100644 --- a/liveMain/webstreaming.py +++ b/liveMain/webstreaming.py @@ -21,15 +21,17 @@ def rescale_frame(frame, percent=75): def generate(): sharedMemory = posix_ipc.SharedMemory(config.SHARED_MEMORY_NAME_DETECTED_FRAME) + sharedMem = mmap(fileno=sharedMemory.fd, length=0) sharedMemory.close_fd() - if config.APPLY_IMAGE_CROPPING: - img_w = config.CROP_W_TO - config.CROP_W_FROM - img_h = config.CROP_H_TO - config.CROP_H_FROM - else: - img_w = config.CAMERA_W - img_h = config.CAMERA_H + # if config.APPLY_IMAGE_CROPPING: + # img_w = config.CROP_W_TO - config.CROP_W_FROM + # img_h = config.CROP_H_TO - config.CROP_H_FROM + # else: + + img_w = config.CAMERA_W + img_h = config.CAMERA_H sharedArray = np.ndarray((img_h, img_w, 3), dtype=np.uint8, buffer=sharedMem) diff --git a/main.py b/main.py index 1615987e..3f60df5a 100644 --- a/main.py +++ b/main.py @@ -144,7 +144,7 @@ def move_to_point_and_extract(coords_from_to: list, gps: adapters.GPSUbloxAdapter, vesc_engine: adapters.VescAdapterV4, smoothie: adapters.SmoothieAdapter, - camera: adapters.CameraAdapterIMX219_170, + camera: adapters.CameraAdapterInterface, periphery_det: detection.YoloOpenCVDetection, precise_det: detection.YoloOpenCVDetection, logger_full: utility.Logger, @@ -2162,7 +2162,7 @@ def main(): config.VESC_STOPPER_CHECK_FREQ, logger_full) as vesc_engine, \ adapters.SmoothieAdapter(smoothie_address) as smoothie, \ adapters.GPSUbloxAdapter(config.GPS_PORT, config.GPS_BAUDRATE, config.GPS_POSITIONS_TO_KEEP) as gps, \ - adapters.CameraAdapterIMX219_170(config.CROP_W_FROM, config.CROP_W_TO, config.CROP_H_FROM, + adapters.CameraAdapterManager(config.CROP_W_FROM, config.CROP_W_TO, config.CROP_H_FROM, config.CROP_H_TO, config.CV_ROTATE_CODE, config.ISP_DIGITAL_GAIN_RANGE_FROM, config.ISP_DIGITAL_GAIN_RANGE_TO, diff --git a/serverCamLive.py b/serverCamLive.py new file mode 100644 index 00000000..b7d7a1f9 --- /dev/null +++ b/serverCamLive.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python3 +import cv2 +import threading +import signal +import sys +import numpy as np +from flask import Flask, Response, jsonify, render_template, request +from flask_cors import CORS +from config import config +import detection +from adapters import CameraAdapterManager + + +class ServerCamLive: + """ + Camera streaming server based on Flask + CameraAdapterManager. + - If `app` is provided: attaches /video and /info routes to it. + - If `app` is None: creates its own Flask app and runs it. + """ + + def __init__(self, app=None, port=8080, use_detector=True, display_zones=False): + """ + :param app: Optional Flask app instance. + :param port: Port used if running standalone. + :param use_detector: Enable YOLO TRT detection on frames. + """ + self.external_app = app is not None + self.app = app or Flask(__name__, template_folder="./serverCamLiveTemplate") + CORS(self.app) + + self.port = port + self.use_detector = use_detector + self.display_zones = display_zones + + self.cam = None + self.detector = None + self.thread_alive = False + self.video_frame = None + self.thread_lock = threading.Lock() + self.capture_thread = None + self._running = True + + # Register routes (always added) + self._register_routes() + + # ─────────── DRAWING ZONES ─────────── + def draw_zone_circle(self, image, cx, cy, radius): + """Draws a circle showing the undistorted zone.""" + return cv2.circle(image, (cx, cy), radius, (0, 0, 255), thickness=3) + + def draw_zone_poly(self, image, np_poly_points): + """Draws polygon of the working zone.""" + return cv2.polylines(image, [np_poly_points], isClosed=True, color=(0, 255, 255), thickness=5) + + # ─────────── ROUTES ─────────── + def _register_routes(self): + """Attach /video and /info routes to the Flask app.""" + self.app.add_url_rule("/video", view_func=self.stream_frames) + self.app.add_url_rule("/info", view_func=self.camera_info) + self.app.add_url_rule("/focus", view_func=self.liveFocusPage) + self.app.add_url_rule("/update_rectangles", view_func=self.update_rectangles, methods=["POST"]) + + def _unregister_routes(self): + """Remove previously added routes from the Flask app.""" + removed = [] + for rule in list(self.app.url_map.iter_rules()): + if rule.rule in self._registered_routes: + removed.append(rule.rule) + self.app.url_map._rules.remove(rule) + self.app.view_functions.pop(rule.endpoint, None) + if removed: + print(f"[{self.__class__.__name__}] 🧹 Removed routes: {', '.join(removed)}") + + def camera_info(self): + """Return camera backend and adapter info as JSON.""" + if self.cam: + return jsonify({ + "backend": self.cam.backend(), + "class": self.cam.whoami() + }) + return jsonify({"status": "camera not initialized"}) + + def liveFocusPage(self): + """Serve the index.html page for focus""" + return render_template("index.html") + + def update_rectangles(self): + """calculates the focus percentage using the Sobel method""" + + data = request.get_json() + if data: + rectangles = data.get("rectangles") + img_size = data.get("image_size") + + # Crop frames + + with self.thread_lock: + frame = self.video_frame.copy() if self.video_frame is not None else None + + if frame is not None: + scale_x = frame.shape[1] / img_size["width"] + scale_y = frame.shape[0] / img_size["height"] + scores = dict() + for i, rect in enumerate(rectangles): + x_real = int(rect["x"] * scale_x) + y_real = int(rect["y"] * scale_y) + w_real = int(rect["w"] * scale_x) + h_real = int(rect["h"] * scale_y) + + # Limites + x_real = max(0, x_real) + y_real = max(0, y_real) + w_real = min(w_real, frame.shape[1] - x_real) + h_real = min(h_real, frame.shape[0] - y_real) + + crop = frame[y_real:y_real+h_real, x_real:x_real+w_real] + + # Conversion to gray frames + if len(crop.shape) == 3: + crop_gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY) + else: + crop_gray = crop + + # Sobel horizontal and vertical + sobelx = cv2.Sobel(crop_gray, cv2.CV_64F, 1, 0, ksize=3) + sobely = cv2.Sobel(crop_gray, cv2.CV_64F, 0, 1, ksize=3) + + # Gradient magnitude + gradient_magnitude = np.sqrt(sobelx**2 + sobely**2) + + # focus score + score = np.mean(gradient_magnitude) + scores[rect["color"]] = f"{score:.0f}" + + + #filename = f"/tmp/crop_{i}.jpg" + #cv2.imwrite(filename, crop) + return jsonify({"status": "ok", "scores": scores}) + + return jsonify({"status": "error"}), 400 + + + + # ─────────── CAMERA ─────────── + def init_camera(self): + """Initialize camera with CameraAdapterManager.""" + try: + self.cam = CameraAdapterManager( + crop_w_from=config.CROP_W_FROM, + crop_w_to=config.CROP_W_TO, + crop_h_from=config.CROP_H_FROM, + crop_h_to=config.CROP_H_TO, + cv_rotate_code=None, + ispdigitalgainrange_from=config.ISP_DIGITAL_GAIN_RANGE_FROM, + ispdigitalgainrange_to=config.ISP_DIGITAL_GAIN_RANGE_TO, + gainrange_from=config.GAIN_RANGE_FROM, + gainrange_to=config.GAIN_RANGE_TO, + exposuretimerange_from=config.EXPOSURE_TIME_RANGE_FROM, + exposuretimerange_to=config.EXPOSURE_TIME_RANGE_TO, + aelock=config.AE_LOCK, + capture_width=config.CAMERA_W, + capture_height=config.CAMERA_H, + display_width=config.CAMERA_W, + display_height=config.CAMERA_H, + framerate=config.CAMERA_FRAMERATE, + nvidia_flip_method=config.CAMERA_FLIP_METHOD + ) + print(f"[{self.__class__.__name__}] ✅ Camera initialized using backend: {self.cam.backend()} ({self.cam.whoami()})") + except Exception as e: + print(f"[{self.__class__.__name__}] ❌ Failed to initialize camera: {e}") + sys.exit(1) + + def capture_loop(self): + """Continuously capture frames from the camera.""" + try: + while self.thread_alive: + frame = self.cam.get_image() + with self.thread_lock: + self.video_frame = frame + except Exception as e: + print(f"[{self.__class__.__name__}] ⚠️ Capture error: {e}") + finally: + if self.cam: + self.cam.release() + print(f"[{self.__class__.__name__}] 🛑 Camera released.") + + # ─────────── STREAMING ─────────── + def rescale_frame(self, frame, percent=50): + """Resize frame to reduce stream bandwidth.""" + width = int(frame.shape[1] * percent / 100) + height = int(frame.shape[0] * percent / 100) + return cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA) + + def encode_frame(self): + """Generate MJPEG byte stream.""" + while self._running: + with self.thread_lock: + frame = self.video_frame + + if frame is None: + continue + + # Optional detector + if self.detector: + try: + boxes = self.detector.detect(frame, True) + frame = detection.draw_boxes(frame, boxes) + except: + frame = frame + pass + + # Optional zone display + if self.display_zones: + try: + poly_zone_points_cv = np.array(config.WORKING_ZONE_POLY_POINTS, np.int32).reshape((-1, 1, 2)) + frame = self.draw_zone_circle(frame, config.SCENE_CENTER_X, config.SCENE_CENTER_Y, config.UNDISTORTED_ZONE_RADIUS) + frame = self.draw_zone_circle(frame, config.SCENE_CENTER_X, config.SCENE_CENTER_Y, 2) + frame = self.draw_zone_poly(frame, poly_zone_points_cv) + except Exception as e: + print(f"[{self.__class__.__name__}] ⚠️ Zone drawing failed: {e}") + + #frame = self.rescale_frame(frame, percent=50) + ok, encoded = cv2.imencode(".jpg", frame) + if not ok: + continue + + yield ( + b"--frame\r\n" + b"Content-Type: image/jpeg\r\n\r\n" + + bytearray(encoded) + + b"\r\n" + ) + + def stream_frames(self): + """Flask endpoint returning the MJPEG stream.""" + return Response(self.encode_frame(), mimetype="multipart/x-mixed-replace; boundary=frame") + + # ─────────── CONTROL ─────────── + def start(self): + """Initialize camera, detector, and capture thread. + If no external app, also runs Flask server.""" + + self.init_camera() + + if self.use_detector: + print(f"[{self.__class__.__name__}] 🧠 Initializing detector...") + self.detector = detection.YoloTRTDetector( + config.PERIPHERY_MODEL_PATH, + config.PERIPHERY_CLASSES_FILE, + config.PERIPHERY_CONFIDENCE_THRESHOLD, + config.PERIPHERY_NMS_THRESHOLD, + config.PERIPHERY_INPUT_SIZE, + ) + + self.thread_alive = True + self.capture_thread = threading.Thread(target=self.capture_loop, daemon=True) + self.capture_thread.start() + + if not self.external_app: + print(f"[{self.__class__.__name__}] 🌐 Starting internal Flask server on http://0.0.0.0:{self.port}/video") + self.app.run("0.0.0.0", self.port, debug=False, use_reloader=False) + else: + print(f"[{self.__class__.__name__}] 🔗 ServerCamLive attached to external Flask app.") + + def stop(self): + """Stop the capture thread and remove routes.""" + print(f"[{self.__class__.__name__}] 🛑 Stopping camera stream...") + + self._running = False + self.thread_alive = False + if self.cam: + self.cam.release() + + # Remove routes if attached to an external app + if self.app and self.external_app: + self._unregister_routes() + + print(f"[{self.__class__.__name__}] ✅ Streaming stopped.") + + +# ─────────── Main Entrypoint ─────────── +if __name__ == "__main__": + display_zones = "--zones" in sys.argv + use_detector = not ("--no-detector" in sys.argv) + + server = ServerCamLive(port=8080, use_detector=use_detector, display_zones=display_zones) + + def handle_exit(sig, frame): + print("\n[ServerCamLive] 🛑 Ctrl+C detected — stopping server and releasing camera...") + server.stop() + sys.exit(0) + + # Capture Ctrl+C (SIGINT) et kill (SIGTERM) + signal.signal(signal.SIGINT, handle_exit) + signal.signal(signal.SIGTERM, handle_exit) + + server.start() \ No newline at end of file diff --git a/serverCamLiveTemplate/index.html b/serverCamLiveTemplate/index.html new file mode 100644 index 00000000..ddcea690 --- /dev/null +++ b/serverCamLiveTemplate/index.html @@ -0,0 +1,330 @@ + + + + + +