From 32d42ed6ce94d6b223de056ff2ddf3fbe9c444c4 Mon Sep 17 00:00:00 2001 From: Henry-YiW Date: Fri, 16 May 2025 04:30:33 -0500 Subject: [PATCH 1/6] lane detection code using CLRerNet model. Used in my effort to test the lane detection --- .../onboard/perception/CLRerNet/README.MD | 18 + GEMstack/onboard/perception/lane_detection.py | 329 ++++++++++++++++++ GEMstack/state/roadgraph.py | 39 +++ 3 files changed, 386 insertions(+) create mode 100644 GEMstack/onboard/perception/CLRerNet/README.MD create mode 100644 GEMstack/onboard/perception/lane_detection.py diff --git a/GEMstack/onboard/perception/CLRerNet/README.MD b/GEMstack/onboard/perception/CLRerNet/README.MD new file mode 100644 index 00000000..202d0b6d --- /dev/null +++ b/GEMstack/onboard/perception/CLRerNet/README.MD @@ -0,0 +1,18 @@ +### To Use Lane Detection, You Need to first install the CLRerNet here +- First, clone the repository at +- Second, install the virtual env using these commands + ```python + conda create -n mmlab python=3.10 -y + conda activate mmlab + pip install torch==2.0.1+cu117 torchvision==0.15.2+cu117 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu117 + pip install -U openmim + mim install "mmcv>=2.0.0rc4,<2.2.0" + pip install mmdet + pip install -r requirements.txt + pip install "numpy<2" + conda install -c conda-forge cudatoolkit-dev + cd libs/models/layers/nms + pip install -e . + ``` +- Third, download the checkpoints and put them under a folder called 'checkpoints' + diff --git a/GEMstack/onboard/perception/lane_detection.py b/GEMstack/onboard/perception/lane_detection.py new file mode 100644 index 00000000..d382d26a --- /dev/null +++ b/GEMstack/onboard/perception/lane_detection.py @@ -0,0 +1,329 @@ + +from CLRerNet.libs.datasets.pipelines import Compose +from CLRerNet.libs.api.inference import get_prediction +from CS588AD.GEMstack.GEMstack.state.roadgraph import RoadgraphSimpleLane +import cv2 +import torch + +from mmdet.apis import init_detector + +from ...utils import settings +from ...state import AllState, VehicleState, ObjectPose, ObjectFrameEnum, Obstacle, ObstacleMaterialEnum, \ + ObstacleStateEnum +from ..interface.gem import GEMInterface +from ..component import Component +from .perception_utils import * +from ultralytics import YOLO +import cv2 +from typing import Dict +import open3d as o3d +import numpy as np +from scipy.spatial.transform import Rotation as R +import rospy +from sensor_msgs.msg import PointCloud2, Image +from message_filters import Subscriber, ApproximateTimeSynchronizer +from cv_bridge import CvBridge +import time +import os +import yaml + + +class LaneDetector3D(Component): + """ + Detects lanes by using CLRerNet and do ICM. + + Tracking is optional: set `enable_tracking=False` to disable persistent tracking + and return only detections from the current frame. + + Supports multiple cameras; each camera’s intrinsics and extrinsics are + loaded from a single YAML calibration file via plain PyYAML. + """ + + def __init__( + self, + vehicle_interface: GEMInterface, + camera_name: str, + camera_calib_file: str, + enable_tracking: bool = True, + visualize_2d: bool = False, + use_cyl_roi: bool = False, + save_data: bool = True, + orientation: bool = True, + use_start_frame: bool = True, + **kwargs + ): + # Core interfaces and state + self.vehicle_interface = vehicle_interface + self.current_lanes = {} + # self.tracked_lanes = {} + self.lane_counter = 0 + self.latest_image = None + self.bridge = CvBridge() + self.start_pose_abs = None + self.start_time = None + + # Config flags + self.camera_name = camera_name + self.enable_tracking = enable_tracking + self.visualize_2d = visualize_2d + self.use_cyl_roi = use_cyl_roi + self.save_data = save_data + self.orientation = orientation + self.use_start_frame = use_start_frame + + # 2) Load camera intrinsics/extrinsics from the supplied YAML + with open(camera_calib_file, 'r') as f: + calib = yaml.safe_load(f) + + # Expect structure: + # cameras: + # front: + # K: [[...], [...], [...]] + # D: [...] + # T_l2c: [[...], ..., [...]] + cam_cfg = calib['cameras'][camera_name] + self.K = np.array(cam_cfg['K']) + self.D = np.array(cam_cfg['D']) + self.T_l2c = np.array(cam_cfg['T_l2c']) + self.T_l2v = np.array(cam_cfg['T_l2v']) + + + self.R = np.array(settings.get(f'calibration.{camera_name}_camera.extrinsics.rotation')) + self.T = np.array(settings.get(f'calibration.{camera_name}_camera.extrinsics.position')) + + # Derived transforms + + self.undistort_map1 = None + self.undistort_map2 = None + self.camera_front = (camera_name == 'front') + + def rate(self) -> float: + return 8 + + def state_inputs(self) -> list: + return ['vehicle'] + + def state_outputs(self) -> list: + return ['lanes'] + + def initialize(self): + # --- Determine the correct RGB topic for this camera --- + rgb_topic_map = { + 'front': '/oak/rgb/image_raw', + 'front_right': '/camera_fr/arena_camera_node/image_raw', + # add additional camera mappings here if needed + } + rgb_topic = rgb_topic_map.get( + self.camera_name, + f'/{self.camera_name}/rgb/image_raw' + ) + + # Subscribe to the RGB and LiDAR streams + self.rgb_sub = Subscriber(rgb_topic, Image) + self.sync = ApproximateTimeSynchronizer([ + self.rgb_sub, self.lidar_sub + ], queue_size=500, slop=0.03) + self.sync.registerCallback(self.synchronized_callback) + + # Initialize the YOLO detector + + self.detector = init_detector('CLRerNet/configs/lane_detection.py', 'CLRerNet/checkpoints/lane_detection.pth', device='cuda') + self.detector.to('cuda') + self.T_c2l = np.linalg.inv(self.T_l2c) + self.R_c2l = self.T_c2l[:3, :3] + + def synchronized_callback(self, image_msg, lidar_msg): + step1 = time.time() + try: + self.latest_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8") + except Exception as e: + rospy.logerr("Failed to convert image: {}".format(e)) + self.latest_image = None + step2 = time.time() + # print('image callback: ', step2 - step1, 'lidar callback ', step3 - step2) + + def undistort_image(self, image, K, D): + h, w = image.shape[:2] + newK, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h)) + if self.undistort_map1 is None or self.undistort_map2 is None: + self.undistort_map1, self.undistort_map2 = cv2.initUndistortRectifyMap(K, D, R=None, + newCameraMatrix=newK, size=(w, h), + m1type=cv2.CV_32FC1) + + start = time.time() + undistorted = cv2.remap(image, self.undistort_map1, self.undistort_map2, interpolation=cv2.INTER_NEAREST) + end = time.time() + # print('--------undistort', end-start) + return undistorted, newK + + def inference_one_imave(self, model, img): + """Inference on an image with the detector. + Args: + model (nn.Module): The loaded detector. + img_path (str): Image path. + Returns: + img (np.ndarray): Image data with shape (width, height, channel). + preds (List[np.ndarray]): Detected lanes. + """ + ori_shape = img.shape + data = dict( + sub_img_name=None, + img=img, + gt_points=[], + id_classes=[], + id_instances=[], + img_shape=ori_shape, + ori_shape=ori_shape, + ) + + cfg = model.cfg + model.bbox_head.test_cfg.as_lanes = False + + test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline) + + data = test_pipeline(data) + data_ = dict( + inputs=[data["inputs"]], + data_samples=[data["data_samples"]], + ) + + # forward the model + with torch.no_grad(): + results = model.test_step(data_) + + lanes = results[0]['lanes'] + preds = get_prediction(lanes, ori_shape[0], ori_shape[1]) + + return img, preds + + + def ipm_2d_to_3d(self, points_2d, K, R, T): + """ + Projects 2D image lane points to 3D ground-plane coordinates using IPM. + + Args: + points_2d: (N, 2) array of 2D points in image space (u, v) + K: (3, 3) camera intrinsic matrix + R: (3, 3) camera rotation matrix (world to camera) + T: (3, 1) camera translation vector (world to camera) + + Returns: + points_3d: (N, 3) array of 3D ground-plane points in world coordinates + """ + # Inverse camera extrinsics: camera-to-world transformation + Rt = np.hstack((R, T)) # (3,4) + cam_to_world = np.linalg.inv(np.vstack((Rt, [0, 0, 0, 1]))) # (4,4) + + # Inverse of camera intrinsics + K_inv = np.linalg.inv(K) + + points_3d = [] + for (u, v) in points_2d: + uv1 = np.array([u, v, 1.0]) + ray_cam = K_inv @ uv1 # normalized camera coords + ray_cam = ray_cam / np.linalg.norm(ray_cam) + + # Extend to homogeneous 4D ray direction + ray_dir_homo = np.append(ray_cam, [0.0]) # direction (no translation) + cam_origin_homo = np.array([0.0, 0.0, 0.0, 1.0]) # camera center + + # Transform ray to world coordinates + ray_origin_world = cam_to_world @ cam_origin_homo + ray_dir_world = cam_to_world @ ray_dir_homo + ray_dir_world = ray_dir_world[:3] + ray_origin_world = ray_origin_world[:3] + + # Intersect with ground plane Z=0: solve for t in O + tD → Z=0 + t = -ray_origin_world[2] / ray_dir_world[2] + point_ground = ray_origin_world + t * ray_dir_world + points_3d.append(point_ground) + + return np.array(points_3d) + + + def update(self, vehicle: VehicleState) -> Dict[str, RoadgraphSimpleLane]: + downsample = False + # Gate guards against data not being present for both sensors: + if self.latest_image is None or self.latest_lidar is None: + return {} + latest_image = self.latest_image.copy() + + # Set up current time variables + start = time.time() + current_time = self.vehicle_interface.time() + + if downsample: + lidar_down = downsample_points(self.latest_lidar, voxel_size=0.1) + else: + lidar_down = self.latest_lidar.copy() + + if self.start_time is None: + self.start_time = current_time + time_elapsed = current_time - self.start_time + + # Ensure data/ exists and build timestamp + # if self.save_data: + # self.save_sensor_data(vehicle=vehicle, latest_image=latest_image) + + if self.camera_front == False: + start = time.time() + undistorted_img, current_K = self.undistort_image(latest_image, self.K, self.D) + end = time.time() + # print('-------processing time undistort_image---', end -start) + self.current_K = current_K + orig_H, orig_W = undistorted_img.shape[:2] + + # --- Begin modifications for three-angle detection --- + img_normal = undistorted_img + else: + img_normal = latest_image.copy() + undistorted_img = latest_image.copy() + orig_H, orig_W = latest_image.shape[:2] + self.current_K = self.K + img, results_normal = self.inference_one_imave(self.detector, img_normal) + combined_lanes = [] + combined_lanes_3d = [] + if not self.enable_tracking: + self.lane_counter = 0 + + for lane in results_normal: + combined_lanes.append(lane) + combined_lanes_3d.append(self.ipm_2d_to_3d(lane, self.current_K, self.R, self.T)) + + # Visualize the received images in 2D with their corresponding labels + # It draws rectangles and labels on the images: + if getattr(self, 'visualize_2d', False): + for (x, y) in combined_lanes: + color = (0, 255, 0) + + # Radius of circle + radius = 2 + + # Blue color in BGR + color = (255, 0, 0) + + # Line thickness of 2 px + thickness = 2 + + # Using cv2.circle() method + # Draw a circle with blue line borders of thickness of 2 px + cv2.circle(undistorted_img, (x, y), radius, color, thickness) + cv2.imshow("Detection - Lane 2D", undistorted_img) + + # print('-------processing time1---', end -start) + + lanes = {f"Lane{index}": RoadgraphSimpleLane(points=lane) for index, lane in enumerate(combined_lanes_3d)} + self.current_lanes = lanes + + # If tracking not enabled, return only current frame detections + if not self.enable_tracking: + for lane_id, lane in self.current_lanes.items(): + p = lane + rospy.loginfo( + f"Lane ID: {lane_id}\n" + f"Points: {p}\n" + ) + end = time.time() + # print('-------processing time', end -start) + + return self.current_lanes diff --git a/GEMstack/state/roadgraph.py b/GEMstack/state/roadgraph.py index 670307d5..1f7a6eb0 100644 --- a/GEMstack/state/roadgraph.py +++ b/GEMstack/state/roadgraph.py @@ -88,6 +88,45 @@ def polyline(self) -> List[Tuple[float,float,float]]: return sum(self.segments,[]) +@dataclass +@register +class RoadgraphSimpleLane: + """A lane in the roadgrap represented by points. + + By convention, the left and right boundaries are oriented in back to + forward order. The end boundary is from right to left, and the start + boundary is from left to right. + """ + type : RoadgraphLaneEnum = RoadgraphLaneEnum.LANE # type of lane + surface : RoadgraphSurfaceEnum = RoadgraphSurfaceEnum.PAVEMENT # surface of lane + route_name : str = '' # name of the route (e.g., street name) that this lane is on + points : List[Tuple[float,float,float]] = field(default_factory=list) # points of the lane + + def to_frame(self, orig_frame : ObjectFrameEnum, new_frame : ObjectFrameEnum, + current_origin = None, global_origin = None) -> RoadgraphSimpleLane: + return replace(self,points=[convert_point(p,orig_frame,new_frame,current_origin,global_origin) for p in self.points]) + + def outline(self) -> List[Tuple[float,float,float]]: + """Produces a 2D outline of the lane, including elevation. + + The first point is the beginning-right of the lane, and the points + proceed CCW around the lane. + + If the begin and end are None, then straight lines are used to connect + the left and right boundaries. Otherwise, the begin and end curves + are used to connect the boundaries. + """ + if self.left is None or self.right is None: + raise RuntimeError("Cannot produce outline of lane with missing left or right boundary") + points = self.right.polyline() + if self.end is not None: + points += self.end.polyline() + points += self.left.polyline()[::-1] + if self.begin is not None: + points += self.begin.polyline()[::-1] + return [key for key, _group in itertools.groupby(points)] + + @dataclass @register class RoadgraphLane: From 1a831155a95ed4463fb56155d1fe9bf660cbb4a1 Mon Sep 17 00:00:00 2001 From: Henry-YiW Date: Fri, 16 May 2025 10:57:36 -0500 Subject: [PATCH 2/6] clean the code further --- GEMstack/onboard/perception/lane_detection.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/GEMstack/onboard/perception/lane_detection.py b/GEMstack/onboard/perception/lane_detection.py index d382d26a..65954e50 100644 --- a/GEMstack/onboard/perception/lane_detection.py +++ b/GEMstack/onboard/perception/lane_detection.py @@ -36,7 +36,7 @@ class LaneDetector3D(Component): and return only detections from the current frame. Supports multiple cameras; each camera’s intrinsics and extrinsics are - loaded from a single YAML calibration file via plain PyYAML. + loaded from a single YAML calibration file via plain PyYAML and global settings. """ def __init__( @@ -87,7 +87,11 @@ def __init__( self.T_l2c = np.array(cam_cfg['T_l2c']) self.T_l2v = np.array(cam_cfg['T_l2v']) - + # Expect structure: + #front_camera: + # extrinsics: + # rotation: [[...], [...], [...]] + # position: [[...], [...], [...]] self.R = np.array(settings.get(f'calibration.{camera_name}_camera.extrinsics.rotation')) self.T = np.array(settings.get(f'calibration.{camera_name}_camera.extrinsics.position')) @@ -133,6 +137,9 @@ def initialize(self): self.R_c2l = self.T_c2l[:3, :3] def synchronized_callback(self, image_msg, lidar_msg): + ''' + Get the latest image. + ''' step1 = time.time() try: self.latest_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8") @@ -143,6 +150,9 @@ def synchronized_callback(self, image_msg, lidar_msg): # print('image callback: ', step2 - step1, 'lidar callback ', step3 - step2) def undistort_image(self, image, K, D): + ''' + Undistort the image using the camera intrinsic matrix and distortion coefficients. + ''' h, w = image.shape[:2] newK, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h)) if self.undistort_map1 is None or self.undistort_map2 is None: @@ -294,8 +304,6 @@ def update(self, vehicle: VehicleState) -> Dict[str, RoadgraphSimpleLane]: # It draws rectangles and labels on the images: if getattr(self, 'visualize_2d', False): for (x, y) in combined_lanes: - color = (0, 255, 0) - # Radius of circle radius = 2 From bcf18d2457c4433ed2b71b007c6b4a315619b847 Mon Sep 17 00:00:00 2001 From: Henry-YiW Date: Fri, 16 May 2025 15:32:03 -0500 Subject: [PATCH 3/6] use my offboard testing code instead since it is not justified to use my customized RoadgraphSimpleLane --- GEMstack/offboard/lane_detection/README.md | 111 ++++++ .../offboard/lane_detection/lane_detection.py | 298 ++++++++++++++++ .../onboard/perception/CLRerNet/README.MD | 18 - GEMstack/onboard/perception/lane_detection.py | 337 ------------------ 4 files changed, 409 insertions(+), 355 deletions(-) create mode 100644 GEMstack/offboard/lane_detection/README.md create mode 100644 GEMstack/offboard/lane_detection/lane_detection.py delete mode 100644 GEMstack/onboard/perception/CLRerNet/README.MD delete mode 100644 GEMstack/onboard/perception/lane_detection.py diff --git a/GEMstack/offboard/lane_detection/README.md b/GEMstack/offboard/lane_detection/README.md new file mode 100644 index 00000000..ce267749 --- /dev/null +++ b/GEMstack/offboard/lane_detection/README.md @@ -0,0 +1,111 @@ +# Lane Detection and 3D Reconstruction with CLRerNet + +This project detects 2D lane lines from camera images using [CLRerNet](https://github.com/naver/clrernet) and projects them to 3D ground coordinates using Inverse Perspective Mapping (IPM). The final 3D lane points are saved in `.ply` and `.npy` formats for visualization and downstream tasks. + +## Features + +- Lane detection using MMDetection-compatible CLRerNet +- Image undistortion using camera intrinsics +- 2D to 3D projection using inverse camera model +- Per-image 2D visualization with overlayed lane predictions +- Exports `.ply` point cloud and `.npy` array of all 3D lanes + +## Installation + +First, clone this repository and install dependencies: + +```bash +git clone https://github.com/naver/clrernet.git +cd clrernet +conda create -n mmlab python=3.10 -y +conda activate mmlab +pip install torch==2.0.1+cu117 torchvision==0.15.2+cu117 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu117 +pip install -U openmim +mim install "mmcv>=2.0.0rc4,<2.2.0" +pip install mmdet +pip install -r requirements.txt +pip install "numpy<2" +conda install -c conda-forge cudatoolkit-dev +cd libs/models/layers/nms +pip install -e . +``` + +Then, make sure your project directory has the following structure: +download the checkpoints and put them under a folder called 'checkpoints' +``` +your_project/ +├── run_lane_detection.py # The main lane detection script +├── clrernet/ # The cloned CLRerNet repo + └── checkpoints # The checkpoint folder +├── image_data/ +│ └── sample1.jpg # Example input images +└── output/ + └── ... # Outputs will be written here +``` + +## Configuration + +Ensure your `settings.py` file provides the following parameters for each camera type used: + +```python +settings = { + "calibration": { + "front_right_camera": { + "intrinsics": { + "focal": [fx, fy], + "center": [cx, cy], + "skew": 0.0, + "distort": [k1, k2, p1, p2, k3] + }, + "extrinsics": { + "rotation": [[...], [...], [...]], + "position": [x, y, z] + } + }, + ... + } +} +``` + +## Usage + +Run the lane detection and projection pipeline: + +```bash +python run_lane_detection.py \ + --image_folder image_data \ + --output_folder output \ + --output_file_prefix sample \ + --camera_type front_right \ + --config CLRerNet/configs/lane_detection.py \ + --checkpoint CLRerNet/checkpoints/lane_detection.pth \ + --device cuda:0 +``` + +### Arguments + +| Argument | Description | +|----------|-------------| +| `--image_folder` | Folder with input images | +| `--output_folder` | Output directory for saving results | +| `--output_file_prefix` | Prefix for output filenames | +| `--camera_type` | Camera ID: `fr`, `fl`, `rr`, `rl` | +| `--config` | Path to the CLRerNet config file | +| `--checkpoint` | Path to model checkpoint | +| `--device` | PyTorch device (e.g., `cuda:0`, `cpu`) | + +## Output + +- `output/image_name_with_detected_lanes.png`: Visualizations with 2D lane overlays +- `output/sample_all_lanes_3d.ply`: 3D point cloud of projected lane points +- `output/sample_all_lanes_3d.npy`: Numpy array of all 3D lane points + +## Acknowledgments + +- [CLRerNet](https://github.com/naver/clrernet) +- [OpenCV](https://opencv.org/) +- [Open3D](http://www.open3d.org/) + + +**Author**: Henry Yi +**License**: MIT diff --git a/GEMstack/offboard/lane_detection/lane_detection.py b/GEMstack/offboard/lane_detection/lane_detection.py new file mode 100644 index 00000000..85205c6f --- /dev/null +++ b/GEMstack/offboard/lane_detection/lane_detection.py @@ -0,0 +1,298 @@ + +import argparse +from CLRerNet.libs.datasets.pipelines import Compose +from CLRerNet.libs.api.inference import get_prediction +from CLRerNet.libs.utils.visualizer import visualize_lanes +import cv2 +import open3d as o3d +import numpy as np +import os +import cv2 +import torch + +from mmdet.apis import init_detector + +from ...utils import settings + + +def undistort_image(image, K, D): + ''' + Undistort the image using the camera intrinsic matrix and distortion coefficients. + ''' + h, w = image.shape[:2] + newK, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h)) + undistort_map1, undistort_map2 = cv2.initUndistortRectifyMap(K, D, R=None, + newCameraMatrix=newK, size=(w, h), + m1type=cv2.CV_32FC1) + undistorted = cv2.remap(image, undistort_map1, undistort_map2, interpolation=cv2.INTER_NEAREST) + return undistorted, newK + +def inference_one_imave(model, img): + """Inference on an image with the detector. + Args: + model (nn.Module): The loaded detector. + img_path (str): Image path. + Returns: + img (np.ndarray): Image data with shape (width, height, channel). + preds (List[np.ndarray]): Detected lanes. + """ + ori_shape = img.shape + data = dict( + sub_img_name=None, + img=img, + gt_points=[], + id_classes=[], + id_instances=[], + img_shape=ori_shape, + ori_shape=ori_shape, + ) + + cfg = model.cfg + model.bbox_head.test_cfg.as_lanes = False + + test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline) + + data = test_pipeline(data) + data_ = dict( + inputs=[data["inputs"]], + data_samples=[data["data_samples"]], + ) + + # forward the model + with torch.no_grad(): + results = model.test_step(data_) + + lanes = results[0]['lanes'] + preds = get_prediction(lanes, ori_shape[0], ori_shape[1]) + + return img, preds + + +def ipm_2d_to_3d(points_2d, K, R, T): + """ + Projects 2D image lane points to 3D ground-plane coordinates using IPM. + + Args: + points_2d: (N, 2) array of 2D points in image space (u, v) + K: (3, 3) camera intrinsic matrix + R: (3, 3) camera rotation matrix (world to camera) + T: (3, 1) camera translation vector (world to camera) + + Returns: + points_3d: (N, 3) array of 3D ground-plane points in world coordinates + """ + # Inverse camera extrinsics: camera-to-world transformation + Rt = np.hstack((R, T)) # (3,4) + cam_to_world = np.linalg.inv(np.vstack((Rt, [0, 0, 0, 1]))) # (4,4) + + # Inverse of camera intrinsics + K_inv = np.linalg.inv(K) + + points_3d = [] + for (u, v) in points_2d: + uv1 = np.array([u, v, 1.0]) + ray_cam = K_inv @ uv1 # normalized camera coords + ray_cam = ray_cam / np.linalg.norm(ray_cam) + + # Extend to homogeneous 4D ray direction + ray_dir_homo = np.append(ray_cam, [0.0]) # direction (no translation) + cam_origin_homo = np.array([0.0, 0.0, 0.0, 1.0]) # camera center + + # Transform ray to world coordinates + ray_origin_world = cam_to_world @ cam_origin_homo + ray_dir_world = cam_to_world @ ray_dir_homo + ray_dir_world = ray_dir_world[:3] + ray_origin_world = ray_origin_world[:3] + + # Intersect with ground plane Z=0: solve for t in O + tD → Z=0 + t = -ray_origin_world[2] / ray_dir_world[2] + point_ground = ray_origin_world + t * ray_dir_world + points_3d.append(point_ground) + + return np.array(points_3d) + +def detect_lanes(model, img, K, D, R, T, save_path): + undistorted_img, current_K = undistort_image(img, K, D) + resized_img = cv2.resize(undistorted_img, (1640, 590)) + img, results_normal = inference_one_imave(model, resized_img) + combined_lanes = [] + combined_lanes_3d = [] + for lane in results_normal: + combined_lanes.append(lane) + combined_lanes_3d.append(ipm_2d_to_3d(lane, current_K, R, T)) + + visualize_lanes(img, combined_lanes, save_path=save_path) + return combined_lanes_3d + +def get_camera_intrinsic_matrix(camera_type): + ''' + Get the camera intrinsic matrix for the given camera type + ''' + if camera_type == "front_right" or camera_type == "fr": + # Provided intrinsics + focal = settings.get("calibration.front_right_camera.intrinsics.focal") # fx, fy + center = settings.get("calibration.front_right_camera.intrinsics.center") # cx, cy + fr_cam_distort = settings.get("calibration.front_right_camera.intrinsics.distort") + skew = settings.get("calibration.front_right_camera.intrinsics.skew") + + fx, fy = focal + cx, cy = center + + # Build K matrix + fr_cam_K = np.array([ + [fx, skew, cx], + [0, fy, cy], + [0, 0, 1] + ]) + return fr_cam_K, np.array(fr_cam_distort) + elif camera_type == "rear_right" or camera_type == "rr": + # Provided intrinsics + focal = settings.get("calibration.rear_right_camera.intrinsics.focal") # fx, fy + center = settings.get("calibration.rear_right_camera.intrinsics.center") # cx, cy + rr_cam_distort = settings.get("calibration.rear_right_camera.intrinsics.distort") + skew = settings.get("calibration.rear_right_camera.intrinsics.skew") + + fx, fy = focal + cx, cy = center + + # Build K matrix + rr_cam_K = np.array([ + [fx, skew, cx], + [0, fy, cy], + [0, 0, 1] + ]) + return rr_cam_K, np.array(rr_cam_distort) + elif camera_type == "front_left" or camera_type == "fl": + # Provided intrinsics + focal = settings.get("calibration.front_left_camera.intrinsics.focal") # fx, fy + center = settings.get("calibration.front_left_camera.intrinsics.center") # cx, cy + fl_cam_distort = settings.get("calibration.front_left_camera.intrinsics.distort") + skew = settings.get("calibration.front_left_camera.intrinsics.skew") + + fx, fy = focal + cx, cy = center + + # Build K matrix + fl_cam_K = np.array([ + [fx, skew, cx], + [0, fy, cy], + [0, 0, 1] + ]) + return fl_cam_K, np.array(fl_cam_distort) + elif camera_type == "rear_left" or camera_type == "rl": + # Provided intrinsics + focal = settings.get("calibration.rear_left_camera.intrinsics.focal") # fx, fy + center = settings.get("calibration.rear_left_camera.intrinsics.center") # cx, cy + rl_cam_distort = settings.get("calibration.rear_left_camera.intrinsics.distort") + skew = settings.get("calibration.rear_left_camera.intrinsics.skew") + + fx, fy = focal + cx, cy = center + + # Build K matrix + rl_cam_K = np.array([ + [fx, skew, cx], + [0, fy, cy], + [0, 0, 1] + ]) + return rl_cam_K, np.array(rl_cam_distort) + else: + raise ValueError(f"Camera type {camera_type} not supported") + +def get_camera_extrinsic_matrix(camera_type): + ''' + Get the camera extrinsic matrix for the given camera type + ''' + if camera_type == "front_right" or camera_type == "fr": + # From the settings + rotation = np.array(settings.get("calibration.front_right_camera.extrinsics.rotation")) + + translation = np.array(settings.get("calibration.front_right_camera.extrinsics.position")) + + # Build 4x4 homogeneous transformation matrix + fr_to_vehicle = np.eye(4) + fr_to_vehicle[:3, :3] = rotation + fr_to_vehicle[:3, 3] = translation + return fr_to_vehicle + elif camera_type == "rear_right" or camera_type == "rr": + # From the settings + rotation = np.array(settings.get("calibration.rear_right_camera.extrinsics.rotation")) + + translation = np.array(settings.get("calibration.rear_right_camera.extrinsics.position")) + + # Build 4x4 homogeneous transformation matrix + rr_to_vehicle = np.eye(4) + rr_to_vehicle[:3, :3] = rotation + rr_to_vehicle[:3, 3] = translation + return rr_to_vehicle + elif camera_type == "front_left" or camera_type == "fl": + rotation = np.array(settings.get("calibration.front_left_camera.extrinsics.rotation")) + translation = np.array(settings.get("calibration.front_left_camera.extrinsics.position")) + + # Build 4x4 homogeneous transformation matrix + fl_to_vehicle = np.eye(4) + fl_to_vehicle[:3, :3] = rotation + fl_to_vehicle[:3, 3] = translation + return fl_to_vehicle + elif camera_type == "rear_left" or camera_type == "rl": + rotation = np.array(settings.get("calibration.rear_left_camera.extrinsics.rotation")) + translation = np.array(settings.get("calibration.rear_left_camera.extrinsics.position")) + + # Build 4x4 homogeneous transformation matrix + rl_to_vehicle = np.eye(4) + rl_to_vehicle[:3, :3] = rotation + rl_to_vehicle[:3, 3] = translation + return rl_to_vehicle + else: + raise ValueError(f"Camera type {camera_type} not supported") + +def add_args(parser): + parser.add_argument('--image_folder', type=str, help='Path to the folder containing the images') + parser.add_argument('--output_folder', type=str, help='Path to the folder to save the output') + parser.add_argument('--output_file_prefix', type=str, help='Prefix of the output file') + parser.add_argument('--camera_type', type=str, choices=['fr', 'fl', 'rr', 'rl', 'front_right', 'rear_right', 'front_left', 'rear_left'], default='front_right', help='Camera type to load the camera intrinsic and extrinsic matrix') + parser.add_argument('--config', type=str, default='clrernet/configs/lane_detection.py', help='Path to the config file') + parser.add_argument('--checkpoint', type=str, default='clrernet/checkpoints/lane_detection.pth', help='Path to the checkpoint file') + parser.add_argument('--device', type=str, default='cuda:0', help='Device to run the model on') + +def run_lane_detection(): + ''' + Run the lane detection pipeline + ''' + parser = argparse.ArgumentParser() + add_args(parser) + args = parser.parse_args() + + K, D = get_camera_intrinsic_matrix(args.camera_type) + extrinsic_matrix = get_camera_extrinsic_matrix(args.camera_type) + R = extrinsic_matrix[:3, :3] + T = extrinsic_matrix[:3, 3] + + model = init_detector(args.config, args.checkpoint, device=args.device) + image_folder = args.image_folder + output_folder = args.output_folder + inputfiles = [os.path.join(image_folder, f) for f in os.listdir(image_folder) if f.lower().endswith(('.jpg', '.jpeg', '.png'))] + + predicted_lanes = [] + for img_path in inputfiles: + img = cv2.imread(img_path) + img_name = os.path.basename(img_path) + save_path = os.path.join(output_folder, f'{img_name.split(".")[0]}_with_detected_lanes.png') + os.makedirs(os.path.dirname(save_path), exist_ok=True) + predicted_lanes.append(detect_lanes(model, img, K, D, R, T, save_path)) + + # Convert predicted_lanes to a single array of 3D points + all_lanes_3d = np.concatenate(predicted_lanes, axis=0) + + # Save all_lanes_3d to a .ply file + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(all_lanes_3d) + o3d.io.write_point_cloud(os.path.join(output_folder, f'{args.output_file_prefix}_all_lanes_3d.ply'), pcd) + + # save numpy array + np.save(os.path.join(output_folder, f'{args.output_file_prefix}_all_lanes_3d.npy'), all_lanes_3d) + + +if __name__ == '__main__': + run_lane_detection() + diff --git a/GEMstack/onboard/perception/CLRerNet/README.MD b/GEMstack/onboard/perception/CLRerNet/README.MD deleted file mode 100644 index 202d0b6d..00000000 --- a/GEMstack/onboard/perception/CLRerNet/README.MD +++ /dev/null @@ -1,18 +0,0 @@ -### To Use Lane Detection, You Need to first install the CLRerNet here -- First, clone the repository at -- Second, install the virtual env using these commands - ```python - conda create -n mmlab python=3.10 -y - conda activate mmlab - pip install torch==2.0.1+cu117 torchvision==0.15.2+cu117 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu117 - pip install -U openmim - mim install "mmcv>=2.0.0rc4,<2.2.0" - pip install mmdet - pip install -r requirements.txt - pip install "numpy<2" - conda install -c conda-forge cudatoolkit-dev - cd libs/models/layers/nms - pip install -e . - ``` -- Third, download the checkpoints and put them under a folder called 'checkpoints' - diff --git a/GEMstack/onboard/perception/lane_detection.py b/GEMstack/onboard/perception/lane_detection.py deleted file mode 100644 index 65954e50..00000000 --- a/GEMstack/onboard/perception/lane_detection.py +++ /dev/null @@ -1,337 +0,0 @@ - -from CLRerNet.libs.datasets.pipelines import Compose -from CLRerNet.libs.api.inference import get_prediction -from CS588AD.GEMstack.GEMstack.state.roadgraph import RoadgraphSimpleLane -import cv2 -import torch - -from mmdet.apis import init_detector - -from ...utils import settings -from ...state import AllState, VehicleState, ObjectPose, ObjectFrameEnum, Obstacle, ObstacleMaterialEnum, \ - ObstacleStateEnum -from ..interface.gem import GEMInterface -from ..component import Component -from .perception_utils import * -from ultralytics import YOLO -import cv2 -from typing import Dict -import open3d as o3d -import numpy as np -from scipy.spatial.transform import Rotation as R -import rospy -from sensor_msgs.msg import PointCloud2, Image -from message_filters import Subscriber, ApproximateTimeSynchronizer -from cv_bridge import CvBridge -import time -import os -import yaml - - -class LaneDetector3D(Component): - """ - Detects lanes by using CLRerNet and do ICM. - - Tracking is optional: set `enable_tracking=False` to disable persistent tracking - and return only detections from the current frame. - - Supports multiple cameras; each camera’s intrinsics and extrinsics are - loaded from a single YAML calibration file via plain PyYAML and global settings. - """ - - def __init__( - self, - vehicle_interface: GEMInterface, - camera_name: str, - camera_calib_file: str, - enable_tracking: bool = True, - visualize_2d: bool = False, - use_cyl_roi: bool = False, - save_data: bool = True, - orientation: bool = True, - use_start_frame: bool = True, - **kwargs - ): - # Core interfaces and state - self.vehicle_interface = vehicle_interface - self.current_lanes = {} - # self.tracked_lanes = {} - self.lane_counter = 0 - self.latest_image = None - self.bridge = CvBridge() - self.start_pose_abs = None - self.start_time = None - - # Config flags - self.camera_name = camera_name - self.enable_tracking = enable_tracking - self.visualize_2d = visualize_2d - self.use_cyl_roi = use_cyl_roi - self.save_data = save_data - self.orientation = orientation - self.use_start_frame = use_start_frame - - # 2) Load camera intrinsics/extrinsics from the supplied YAML - with open(camera_calib_file, 'r') as f: - calib = yaml.safe_load(f) - - # Expect structure: - # cameras: - # front: - # K: [[...], [...], [...]] - # D: [...] - # T_l2c: [[...], ..., [...]] - cam_cfg = calib['cameras'][camera_name] - self.K = np.array(cam_cfg['K']) - self.D = np.array(cam_cfg['D']) - self.T_l2c = np.array(cam_cfg['T_l2c']) - self.T_l2v = np.array(cam_cfg['T_l2v']) - - # Expect structure: - #front_camera: - # extrinsics: - # rotation: [[...], [...], [...]] - # position: [[...], [...], [...]] - self.R = np.array(settings.get(f'calibration.{camera_name}_camera.extrinsics.rotation')) - self.T = np.array(settings.get(f'calibration.{camera_name}_camera.extrinsics.position')) - - # Derived transforms - - self.undistort_map1 = None - self.undistort_map2 = None - self.camera_front = (camera_name == 'front') - - def rate(self) -> float: - return 8 - - def state_inputs(self) -> list: - return ['vehicle'] - - def state_outputs(self) -> list: - return ['lanes'] - - def initialize(self): - # --- Determine the correct RGB topic for this camera --- - rgb_topic_map = { - 'front': '/oak/rgb/image_raw', - 'front_right': '/camera_fr/arena_camera_node/image_raw', - # add additional camera mappings here if needed - } - rgb_topic = rgb_topic_map.get( - self.camera_name, - f'/{self.camera_name}/rgb/image_raw' - ) - - # Subscribe to the RGB and LiDAR streams - self.rgb_sub = Subscriber(rgb_topic, Image) - self.sync = ApproximateTimeSynchronizer([ - self.rgb_sub, self.lidar_sub - ], queue_size=500, slop=0.03) - self.sync.registerCallback(self.synchronized_callback) - - # Initialize the YOLO detector - - self.detector = init_detector('CLRerNet/configs/lane_detection.py', 'CLRerNet/checkpoints/lane_detection.pth', device='cuda') - self.detector.to('cuda') - self.T_c2l = np.linalg.inv(self.T_l2c) - self.R_c2l = self.T_c2l[:3, :3] - - def synchronized_callback(self, image_msg, lidar_msg): - ''' - Get the latest image. - ''' - step1 = time.time() - try: - self.latest_image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8") - except Exception as e: - rospy.logerr("Failed to convert image: {}".format(e)) - self.latest_image = None - step2 = time.time() - # print('image callback: ', step2 - step1, 'lidar callback ', step3 - step2) - - def undistort_image(self, image, K, D): - ''' - Undistort the image using the camera intrinsic matrix and distortion coefficients. - ''' - h, w = image.shape[:2] - newK, _ = cv2.getOptimalNewCameraMatrix(K, D, (w, h), 1, (w, h)) - if self.undistort_map1 is None or self.undistort_map2 is None: - self.undistort_map1, self.undistort_map2 = cv2.initUndistortRectifyMap(K, D, R=None, - newCameraMatrix=newK, size=(w, h), - m1type=cv2.CV_32FC1) - - start = time.time() - undistorted = cv2.remap(image, self.undistort_map1, self.undistort_map2, interpolation=cv2.INTER_NEAREST) - end = time.time() - # print('--------undistort', end-start) - return undistorted, newK - - def inference_one_imave(self, model, img): - """Inference on an image with the detector. - Args: - model (nn.Module): The loaded detector. - img_path (str): Image path. - Returns: - img (np.ndarray): Image data with shape (width, height, channel). - preds (List[np.ndarray]): Detected lanes. - """ - ori_shape = img.shape - data = dict( - sub_img_name=None, - img=img, - gt_points=[], - id_classes=[], - id_instances=[], - img_shape=ori_shape, - ori_shape=ori_shape, - ) - - cfg = model.cfg - model.bbox_head.test_cfg.as_lanes = False - - test_pipeline = Compose(cfg.test_dataloader.dataset.pipeline) - - data = test_pipeline(data) - data_ = dict( - inputs=[data["inputs"]], - data_samples=[data["data_samples"]], - ) - - # forward the model - with torch.no_grad(): - results = model.test_step(data_) - - lanes = results[0]['lanes'] - preds = get_prediction(lanes, ori_shape[0], ori_shape[1]) - - return img, preds - - - def ipm_2d_to_3d(self, points_2d, K, R, T): - """ - Projects 2D image lane points to 3D ground-plane coordinates using IPM. - - Args: - points_2d: (N, 2) array of 2D points in image space (u, v) - K: (3, 3) camera intrinsic matrix - R: (3, 3) camera rotation matrix (world to camera) - T: (3, 1) camera translation vector (world to camera) - - Returns: - points_3d: (N, 3) array of 3D ground-plane points in world coordinates - """ - # Inverse camera extrinsics: camera-to-world transformation - Rt = np.hstack((R, T)) # (3,4) - cam_to_world = np.linalg.inv(np.vstack((Rt, [0, 0, 0, 1]))) # (4,4) - - # Inverse of camera intrinsics - K_inv = np.linalg.inv(K) - - points_3d = [] - for (u, v) in points_2d: - uv1 = np.array([u, v, 1.0]) - ray_cam = K_inv @ uv1 # normalized camera coords - ray_cam = ray_cam / np.linalg.norm(ray_cam) - - # Extend to homogeneous 4D ray direction - ray_dir_homo = np.append(ray_cam, [0.0]) # direction (no translation) - cam_origin_homo = np.array([0.0, 0.0, 0.0, 1.0]) # camera center - - # Transform ray to world coordinates - ray_origin_world = cam_to_world @ cam_origin_homo - ray_dir_world = cam_to_world @ ray_dir_homo - ray_dir_world = ray_dir_world[:3] - ray_origin_world = ray_origin_world[:3] - - # Intersect with ground plane Z=0: solve for t in O + tD → Z=0 - t = -ray_origin_world[2] / ray_dir_world[2] - point_ground = ray_origin_world + t * ray_dir_world - points_3d.append(point_ground) - - return np.array(points_3d) - - - def update(self, vehicle: VehicleState) -> Dict[str, RoadgraphSimpleLane]: - downsample = False - # Gate guards against data not being present for both sensors: - if self.latest_image is None or self.latest_lidar is None: - return {} - latest_image = self.latest_image.copy() - - # Set up current time variables - start = time.time() - current_time = self.vehicle_interface.time() - - if downsample: - lidar_down = downsample_points(self.latest_lidar, voxel_size=0.1) - else: - lidar_down = self.latest_lidar.copy() - - if self.start_time is None: - self.start_time = current_time - time_elapsed = current_time - self.start_time - - # Ensure data/ exists and build timestamp - # if self.save_data: - # self.save_sensor_data(vehicle=vehicle, latest_image=latest_image) - - if self.camera_front == False: - start = time.time() - undistorted_img, current_K = self.undistort_image(latest_image, self.K, self.D) - end = time.time() - # print('-------processing time undistort_image---', end -start) - self.current_K = current_K - orig_H, orig_W = undistorted_img.shape[:2] - - # --- Begin modifications for three-angle detection --- - img_normal = undistorted_img - else: - img_normal = latest_image.copy() - undistorted_img = latest_image.copy() - orig_H, orig_W = latest_image.shape[:2] - self.current_K = self.K - img, results_normal = self.inference_one_imave(self.detector, img_normal) - combined_lanes = [] - combined_lanes_3d = [] - if not self.enable_tracking: - self.lane_counter = 0 - - for lane in results_normal: - combined_lanes.append(lane) - combined_lanes_3d.append(self.ipm_2d_to_3d(lane, self.current_K, self.R, self.T)) - - # Visualize the received images in 2D with their corresponding labels - # It draws rectangles and labels on the images: - if getattr(self, 'visualize_2d', False): - for (x, y) in combined_lanes: - # Radius of circle - radius = 2 - - # Blue color in BGR - color = (255, 0, 0) - - # Line thickness of 2 px - thickness = 2 - - # Using cv2.circle() method - # Draw a circle with blue line borders of thickness of 2 px - cv2.circle(undistorted_img, (x, y), radius, color, thickness) - cv2.imshow("Detection - Lane 2D", undistorted_img) - - # print('-------processing time1---', end -start) - - lanes = {f"Lane{index}": RoadgraphSimpleLane(points=lane) for index, lane in enumerate(combined_lanes_3d)} - self.current_lanes = lanes - - # If tracking not enabled, return only current frame detections - if not self.enable_tracking: - for lane_id, lane in self.current_lanes.items(): - p = lane - rospy.loginfo( - f"Lane ID: {lane_id}\n" - f"Points: {p}\n" - ) - end = time.time() - # print('-------processing time', end -start) - - return self.current_lanes From f9768a8dc926f7125d8d56a3ab8a2bb5b19ab839 Mon Sep 17 00:00:00 2001 From: Henry-YiW Date: Fri, 16 May 2025 15:37:51 -0500 Subject: [PATCH 4/6] use my offboard testing code instead since it is not justified to use my customized RoadgraphSimpleLane --- GEMstack/offboard/lane_detection/._README.md | Bin 0 -> 4096 bytes GEMstack/offboard/lane_detection/README.md | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 GEMstack/offboard/lane_detection/._README.md diff --git a/GEMstack/offboard/lane_detection/._README.md b/GEMstack/offboard/lane_detection/._README.md new file mode 100644 index 0000000000000000000000000000000000000000..bafc2652b8c23a5c115e554ef6b0dea11bf457c0 GIT binary patch literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103vf+zv$ zV3+~K+-O=D5#plB`MG+D1qC^&dId%KWvO|IdC92^j7$vbXEyyeU9;~xn$}V7Xb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDU=IOMXA^|MKrSRBvsj@hwK%`DC^=Oj yEx#yRAv3QeHLoNyKQA#Sr&1v&HLXM;DJL;68`u|y>Kf7%s{i3$kztVg{~rJ_`6*=p literal 0 HcmV?d00001 diff --git a/GEMstack/offboard/lane_detection/README.md b/GEMstack/offboard/lane_detection/README.md index ce267749..68820578 100644 --- a/GEMstack/offboard/lane_detection/README.md +++ b/GEMstack/offboard/lane_detection/README.md @@ -34,7 +34,7 @@ Then, make sure your project directory has the following structure: download the checkpoints and put them under a folder called 'checkpoints' ``` your_project/ -├── run_lane_detection.py # The main lane detection script +├── lane_detection.py # The main lane detection script ├── clrernet/ # The cloned CLRerNet repo └── checkpoints # The checkpoint folder ├── image_data/ From 81fda6b2eed7a7c3eb5b9430b6c31fad77ca0224 Mon Sep 17 00:00:00 2001 From: Henry-YiW Date: Fri, 16 May 2025 15:39:22 -0500 Subject: [PATCH 5/6] use my offboard testing code instead since it is not justified to use my customized RoadgraphSimpleLane --- GEMstack/offboard/lane_detection/._README.md | Bin 4096 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 GEMstack/offboard/lane_detection/._README.md diff --git a/GEMstack/offboard/lane_detection/._README.md b/GEMstack/offboard/lane_detection/._README.md deleted file mode 100644 index bafc2652b8c23a5c115e554ef6b0dea11bf457c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4096 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFz{^v(m+1nBL)UWIUt(=a103vf+zv$ zV3+~K+-O=D5#plB`MG+D1qC^&dId%KWvO|IdC92^j7$vbXEyyeU9;~xn$}V7Xb6mk zz-S1JhQMeDjE2By2#kinXb6mkz-S1JhQMeDU=IOMXA^|MKrSRBvsj@hwK%`DC^=Oj yEx#yRAv3QeHLoNyKQA#Sr&1v&HLXM;DJL;68`u|y>Kf7%s{i3$kztVg{~rJ_`6*=p From 23cd57ea53dbce6dbcf01e3c7ffd44905d525930 Mon Sep 17 00:00:00 2001 From: Henry-YiW Date: Fri, 16 May 2025 15:42:28 -0500 Subject: [PATCH 6/6] delete my RoadgraphSimpleLane in state --- GEMstack/state/roadgraph.py | 39 ------------------------------------- 1 file changed, 39 deletions(-) diff --git a/GEMstack/state/roadgraph.py b/GEMstack/state/roadgraph.py index 1f7a6eb0..670307d5 100644 --- a/GEMstack/state/roadgraph.py +++ b/GEMstack/state/roadgraph.py @@ -88,45 +88,6 @@ def polyline(self) -> List[Tuple[float,float,float]]: return sum(self.segments,[]) -@dataclass -@register -class RoadgraphSimpleLane: - """A lane in the roadgrap represented by points. - - By convention, the left and right boundaries are oriented in back to - forward order. The end boundary is from right to left, and the start - boundary is from left to right. - """ - type : RoadgraphLaneEnum = RoadgraphLaneEnum.LANE # type of lane - surface : RoadgraphSurfaceEnum = RoadgraphSurfaceEnum.PAVEMENT # surface of lane - route_name : str = '' # name of the route (e.g., street name) that this lane is on - points : List[Tuple[float,float,float]] = field(default_factory=list) # points of the lane - - def to_frame(self, orig_frame : ObjectFrameEnum, new_frame : ObjectFrameEnum, - current_origin = None, global_origin = None) -> RoadgraphSimpleLane: - return replace(self,points=[convert_point(p,orig_frame,new_frame,current_origin,global_origin) for p in self.points]) - - def outline(self) -> List[Tuple[float,float,float]]: - """Produces a 2D outline of the lane, including elevation. - - The first point is the beginning-right of the lane, and the points - proceed CCW around the lane. - - If the begin and end are None, then straight lines are used to connect - the left and right boundaries. Otherwise, the begin and end curves - are used to connect the boundaries. - """ - if self.left is None or self.right is None: - raise RuntimeError("Cannot produce outline of lane with missing left or right boundary") - points = self.right.polyline() - if self.end is not None: - points += self.end.polyline() - points += self.left.polyline()[::-1] - if self.begin is not None: - points += self.begin.polyline()[::-1] - return [key for key, _group in itertools.groupby(points)] - - @dataclass @register class RoadgraphLane: