diff --git a/lib/src/isolates/child.dart b/lib/src/isolates/child.dart index e08a358..7309279 100644 --- a/lib/src/isolates/child.dart +++ b/lib/src/isolates/child.dart @@ -5,7 +5,6 @@ import "dart:io"; import "package:burt_network/burt_network.dart"; import "package:dartcv4/dartcv.dart"; import "package:typed_isolate/typed_isolate.dart"; -import "package:video/src/targeting/frame_properties.dart"; import "package:video/video.dart"; /// The maximum size of a UDP packet, in bytes (minus a few to be safe). @@ -40,6 +39,12 @@ abstract class CameraIsolate extends IsolateChild /// Holds the current details of the camera. final CameraDetails details; + + /// The Aruco detector for detecting markers in an RGB video image + late final RoverArucoDetector arucoDetector = RoverArucoDetector( + config: defaultArucoConfig, + ); + /// Frame properties used for target tracking calculations FrameProperties? frameProperties; diff --git a/lib/src/isolates/opencv.dart b/lib/src/isolates/opencv.dart index 6b71cf8..a40e46b 100644 --- a/lib/src/isolates/opencv.dart +++ b/lib/src/isolates/opencv.dart @@ -1,6 +1,5 @@ import "package:dartcv4/dartcv.dart"; import "package:burt_network/burt_network.dart"; -import "package:video/src/targeting/frame_properties.dart"; import "package:video/utils.dart"; import "package:video/video.dart"; @@ -82,8 +81,14 @@ class OpenCVCameraIsolate extends CameraIsolate { final (success, matrix) = camera!.read(); if (!success || matrix.width <= 0 || matrix.height <= 0) return; - final detectedMarkers = await detectAndProcessMarkers(matrix, frameProperties!); - sendToParent(ObjectDetectionPayload(details: details, tags: detectedMarkers)); + final detectedMarkers = await arucoDetector.process( + matrix, + frameProperties!, + ); + + sendToParent( + ObjectDetectionPayload(details: details, tags: detectedMarkers), + ); // await matrix.drawCrosshair(center: frameProperties!.center); diff --git a/lib/src/isolates/realsense.dart b/lib/src/isolates/realsense.dart index 82b3e96..424ad4e 100644 --- a/lib/src/isolates/realsense.dart +++ b/lib/src/isolates/realsense.dart @@ -3,7 +3,6 @@ import "dart:ffi"; import "package:burt_network/burt_network.dart"; import "package:dartcv4/dartcv.dart"; import "package:protobuf/protobuf.dart"; -import "package:video/src/targeting/frame_properties.dart"; import "package:video/utils.dart"; import "package:video/video.dart"; @@ -132,8 +131,14 @@ class RealSenseIsolate extends CameraIsolate { Future sendRgbFrame(Pointer rawFrames) async { final rawRGB = rawFrames.ref.rgb_data; if (rawRGB == nullptr) return; - final rgbMatrix = rawRGB.toOpenCVMat(camera.rgbResolution, length: rawFrames.ref.rgb_length); - final detectedMarkers = await detectAndProcessMarkers(rgbMatrix, frameProperties!); + final rgbMatrix = rawRGB.toOpenCVMat( + camera.rgbResolution, + length: rawFrames.ref.rgb_length, + ); + final detectedMarkers = await arucoDetector.process( + rgbMatrix, + frameProperties!, + ); sendToParent( ObjectDetectionPayload( details: details.deepCopy()..name = CameraName.ROVER_FRONT, diff --git a/lib/src/targeting/aruco_detector.dart b/lib/src/targeting/aruco_detector.dart new file mode 100644 index 0000000..add8c4e --- /dev/null +++ b/lib/src/targeting/aruco_detector.dart @@ -0,0 +1,236 @@ +import "dart:math"; + +import "package:burt_network/protobuf.dart"; +import "package:dartcv4/dartcv.dart"; +import "package:video/video.dart"; + +/// The raw 3d output from opencv solvepnp +typedef ArucoRaw3DResult = ({ + int rval, + VecMat rvecs, + VecMat tvecs, + Mat reprojectionError, +}); + +/// Utility methods for raw 3d solvepnp outputs +extension ArucoRaw3DUtil on ArucoRaw3DResult { + /// Disposes all native resources used by the result objects + void dispose() { + rvecs.dispose(); + tvecs.dispose(); + reprojectionError.dispose(); + } +} + +/// Class to store the configuration +class RoverArucoConfig { + /// The size of the aruco marker in meters + final double markerSize; + + /// Whether or not to draw detected aruco markers + final bool draw; + + /// The native Mat of the object's corners in a 3D coordinate space + late final Mat _objectPoints = Mat.fromVec( + VecPoint3f.fromList([ + Point3f(-markerSize / 2, markerSize / 2, 0), + Point3f(markerSize / 2, markerSize / 2, 0), + Point3f(markerSize / 2, -markerSize / 2, 0), + Point3f(-markerSize / 2, -markerSize / 2, 0), + ]), + rows: 4, + cols: 1, + type: MatType.CV_32FC3, + ); + + /// The Aruco dictionary to use for marker detection + final ArucoDictionary dictionary; + + /// The Aruco detection parameters + final ArucoDetectorParameters detectorParams; + + /// The color to use to draw the frames of the markers + final Scalar arucoColor; + + /// Const constructor for rover aruco config + RoverArucoConfig({ + required this.markerSize, + required this.dictionary, + required this.detectorParams, + required this.arucoColor, + this.draw = true, + }); +} + +/// Class to handle Aruco marker detection and processing +class RoverArucoDetector { + /// The configuration for the aruco detection + final RoverArucoConfig config; + + /// The native OpenCV aruco detector object + late final ArucoDetector _arucoDetector = ArucoDetector.create( + config.dictionary, + config.detectorParams, + ); + + /// Const constructor for Rover Aruco Detector + RoverArucoDetector({required this.config}); + + /// Processes an incoming [Mat] and returns any detected Aruco tags in view + /// + /// Calculations for the tag's 2d (or 3d) position will be made using the camera + /// intrinsics from the provided [frameProperties] + /// + /// 3D calculations are made using the SolvePnP Algorithm, + /// documentation of which can be found here: https://docs.opencv.org/4.x/d5/d1f/calib3d_solvePnP.html + /// + /// If the [config] specifies to draw, an indicator will be drawn around the detected markers + Future> process( + Mat image, + FrameProperties frameProperties, + ) async { + // Get the raw aruco detections + final (corners, ids, rejected) = await _arucoDetector.detectMarkersAsync( + image, + ); + + // If there's no markers, save time and return nothing + if (ids.isEmpty) { + return []; + } + + if (config.draw) { + await arucoDrawDetectedMarkersAsync( + image, + corners, + ids, + config.arucoColor, + ); + } + + final detectedMarkers = []; + + // Iterate through each of the detections and perform the + // target calculations on each of them + for (int i = 0; i < ids.length; i++) { + var centerX = 0.0; + var centerY = 0.0; + for (final corner in corners[i]) { + centerX += corner.x; + centerY += corner.y; + } + centerX /= 4; + centerY /= 4; + + final (:yaw, :pitch) = calculateYawPitch( + frameProperties, + centerX, + centerY, + ); + + final pnpResult = _calculateRaw3DPose(frameProperties, corners[i]); + Pose3d? bestCameraToTarget; + double? bestReprojectionError; + + if (pnpResult != null) { + // (0, 0) of each respective matrix is the best pnp result + // (1, 0) of each respective matrix is the alternate PnP result + + // Translation vector is [x, y, z], where +x is to the right, + // +y is down, and +z is away from the camera + final bestTranslation = pnpResult.tvecs[0].at(0, 0); + // Rotation vector is [x, y, z], for yaw, pitch, and roll respectively + final bestRotation = pnpResult.rvecs[0].at(0, 0); + + bestCameraToTarget = Pose3d( + translation: Coordinates( + x: bestTranslation.val1, + y: -bestTranslation.val2, + z: bestTranslation.val3, + ), + rotation: Orientation( + x: bestRotation.val1 * (180 / pi), + y: bestRotation.val2 * (180 / pi), + z: bestRotation.val3 * (180 / pi), + ), + ); + bestReprojectionError = pnpResult.reprojectionError.at(0, 0); + + if (config.draw) { + drawFrameAxes( + image, + frameProperties.calibrationData!.intrinsics!, + frameProperties.calibrationData!.distCoefficients!, + pnpResult.rvecs[0], + pnpResult.tvecs[0], + config.markerSize, + ); + } + } + + // Create a vector of the corners of the aruco marker to determine the total area + final cornerVec = VecPoint.generate( + corners[i].length, + (idx) => Point(corners[i][idx].x.toInt(), corners[i][idx].y.toInt()), + ); + final area = contourArea(cornerVec); + + cornerVec.dispose(); + + // Create the proto message from the target calculations + PnpResult? pnpResultProto; + if (bestCameraToTarget != null) { + pnpResultProto = PnpResult( + cameraToTarget: bestCameraToTarget, + reprojectionError: bestReprojectionError, + ); + } + + detectedMarkers.add( + DetectedObject( + objectType: DetectedObjectType.ARUCO, + arucoTagId: ids[i], + yaw: yaw, + pitch: pitch, + centerX: centerX.toInt(), + centerY: centerY.toInt(), + relativeSize: area / (image.width * image.height), + bestPnpResult: pnpResultProto, + ), + ); + + pnpResult?.dispose(); + } + corners.dispose(); + ids.dispose(); + rejected.dispose(); + return detectedMarkers; + } + + /// Calculates the 3D pose from the frame properties and target corners + /// + /// If the frame properties does not contain a calibration, this will return null + ArucoRaw3DResult? _calculateRaw3DPose( + FrameProperties properties, + VecPoint2f corners, + ) { + if (properties.calibrationData == null) { + return null; + } + + final (rval, rvecs, tvecs, reprojectionError) = solvePnPGeneric( + config._objectPoints, + Mat.fromVec(corners), + properties.calibrationData!.intrinsics!, + properties.calibrationData!.distCoefficients!, + flags: SOLVEPNP_IPPE_SQUARE, + ); + + return ( + rval: rval, + rvecs: rvecs, + tvecs: tvecs, + reprojectionError: reprojectionError, + ); + } +} diff --git a/lib/src/targeting/calibration_coefficients.dart b/lib/src/targeting/calibration_coefficients.dart index ae8a9dd..269bbb7 100644 --- a/lib/src/targeting/calibration_coefficients.dart +++ b/lib/src/targeting/calibration_coefficients.dart @@ -2,25 +2,42 @@ import "package:burt_network/burt_network.dart"; import "package:dartcv4/dartcv.dart"; /// A class representing the intrinsics and distortion coefficients of a camera -/// -/// These camera coefficients are loaded from a json file in the opencv calibration -/// format +/// +/// These camera coefficients are loaded from a json file in 2 different OpenCV camera +/// calibration formats: +/// +/// For calibrations from CalibDb, it will be in the format: +/// ```json +/// { +/// "camera_matrix": { +/// "data": [ +/// ... +/// ] +/// } +/// } +/// ``` +/// +/// For calibrations from WPICal, it will be in the format: +/// ```json +/// { +/// "camera_matrix": [ +/// ... +/// ] +/// } +/// ``` class CalibrationCoefficients { /// The camera intrinsics matrix, this will always be a 3x3 double matrix Mat? intrinsics; + /// The distortion coefficients matrix, this matrix will always have one row Mat? distCoefficients; - /// Creates the camera calibration coefficients from + /// Creates the camera calibration coefficients from a Json object CalibrationCoefficients.fromJson({required Json json}) { if (json["camera_matrix"] is Map) { - _initIntrinsics( - (json["camera_matrix"]["data"] as List).cast(), - ); + _initIntrinsics((json["camera_matrix"]["data"] as List).cast()); } else if (json["camera_matrix"] is List) { - _initIntrinsics( - (json["camera_matrix"] as List).cast(), - ); + _initIntrinsics((json["camera_matrix"] as List).cast()); } if (json["distortion_coefficients"] is Map) { @@ -35,10 +52,20 @@ class CalibrationCoefficients { } void _initIntrinsics(List intrinsicsMatrix) { - intrinsics = Mat.fromList(3, 3, MatType.CV_64FC1, intrinsicsMatrix.map((e) => e.toDouble()).toList()); + intrinsics = Mat.fromList( + 3, + 3, + MatType.CV_64FC1, + intrinsicsMatrix.map((e) => e.toDouble()).toList(), + ); } void _initDistCoefficients(List distMatrix) { - distCoefficients = Mat.fromList(1, distMatrix.length, MatType.CV_64FC1, distMatrix.map((e) => e.toDouble()).toList()); + distCoefficients = Mat.fromList( + 1, + distMatrix.length, + MatType.CV_64FC1, + distMatrix.map((e) => e.toDouble()).toList(), + ); } } diff --git a/lib/src/targeting/frame_properties.dart b/lib/src/targeting/frame_properties.dart index 2ec8e50..3c6d2dd 100644 --- a/lib/src/targeting/frame_properties.dart +++ b/lib/src/targeting/frame_properties.dart @@ -4,11 +4,14 @@ import "dart:math" hide Point; import "package:burt_network/protobuf.dart"; import "package:dartcv4/dartcv.dart" show Point; -import "package:video/src/targeting/calibration_coefficients.dart"; import "package:video/video.dart"; -/// A class to store and calculate the capture intrinsics of a camera -/// Used mainly in target calculations for converting pixels to degrees +/// A class to store the intrinsics of a camera, such as the FoV, focal length, and calibration data. +/// +/// Each camera has unique properties in how objects appear as pixels. For most cameras, +/// these will differ due to tiny manufacturing inconsitencies in the lenses. For simple +/// 2D calculations, this isn't bad, but for 3D calculations where more info is needed to +/// detect, a calibration is necessary. class FrameProperties { /// The width of the image being taken by the camera final int captureWidth; @@ -40,9 +43,10 @@ class FrameProperties { /// The calibration data of the camera, null if there is no calibration for the specified resolution late final CalibrationCoefficients? calibrationData; - /// Constructor for frame properties where the horizontal and focal FoV are known + /// Constructor for frame properties where the horizontal FoV and focal length are known /// - /// Initializes capture resolutions, fov, and calculates the horizontal and vertical focal lengths + /// Initializes capture resolutions, fov, and calculates the horizontal + /// and vertical focal lengths FrameProperties({ required this.captureWidth, required this.captureHeight, @@ -62,9 +66,9 @@ class FrameProperties { focalLength = focal; } - /// Constructor for frame properties from calibration data - /// - /// Calculates the fov and focal length from the camera matrix + /// Constructor for frame properties from raw calibration data + /// + /// Calculates the fov and focal length from the calibration data FrameProperties.fromCalibrationData({ required this.captureWidth, required this.captureHeight, @@ -72,8 +76,14 @@ class FrameProperties { }) { centerX = calibrationData!.intrinsics!.at(0, 2); centerY = calibrationData!.intrinsics!.at(1, 2); - horizontalFoV = 2 * atan(captureWidth / (2 * calibrationData!.intrinsics!.at(0, 0))) * (180 / pi); - verticalFoV = 2 * atan(captureHeight / (2 * calibrationData!.intrinsics!.at(1, 1))) * (180 / pi); + horizontalFoV = + 2 * + atan(captureWidth / (2 * calibrationData!.intrinsics!.at(0, 0))) * + (180 / pi); + verticalFoV = + 2 * + atan(captureHeight / (2 * calibrationData!.intrinsics!.at(1, 1))) * + (180 / pi); final (:diagonal, :focal) = calculateDiagonalFoV( horizontalFoV: horizontalFoV, @@ -85,7 +95,8 @@ class FrameProperties { } /// Constructor for frame properties, initializes capture resolutions, FOV, - /// and determines the horizontal and vertical focal lengths + /// and determines the horizontal and vertical focal lengths from the provided + /// [diagonalFoV] and [captureWidth] + [captureHeight] FrameProperties.fromDiagonalFoV({ required this.captureWidth, required this.captureHeight, @@ -93,7 +104,11 @@ class FrameProperties { }) { centerX = (captureWidth / 2.0) - 0.5; centerY = (captureHeight / 2.0) - 0.5; - final (:horizontal, :vertical, :focal) = calculateHorizontalVerticalFoV(diagonalFoV, captureWidth, captureHeight); + final (:horizontal, :vertical, :focal) = calculateHorizontalVerticalFoV( + diagonalFoV, + captureWidth, + captureHeight, + ); horizontalFoV = horizontal; verticalFoV = vertical; focalLength = focal; @@ -103,7 +118,7 @@ class FrameProperties { /// /// This will search for a calibration file under the calibrations directory for the /// specified resolution. If a json file is found, it will attempt to load the calibration data. - /// + /// /// If the details has a horizontal and vertical fov, then it will initialize /// based on the horizontal and vertical fov, otherwise, it will assume the /// details have a diagonal fov @@ -112,11 +127,15 @@ class FrameProperties { required int captureHeight, required CameraDetails details, }) { - final calibrationFile = File("${CameraIsolate.baseDirectory}/calibrations/${details.name}/${captureWidth}x$captureHeight.json"); + final calibrationFile = File( + "${CameraIsolate.baseDirectory}/calibrations/${details.name}/${captureWidth}x$captureHeight.json", + ); if (calibrationFile.existsSync()) { try { final calibrationJson = jsonDecode(calibrationFile.readAsStringSync()); - final calibrationCoefficients = CalibrationCoefficients.fromJson(json: calibrationJson); + final calibrationCoefficients = CalibrationCoefficients.fromJson( + json: calibrationJson, + ); if (calibrationCoefficients.intrinsics != null && calibrationCoefficients.distCoefficients != null) { @@ -151,7 +170,8 @@ class FrameProperties { } /// Determines the horizontal and vertical focal lengths of the camera based on the diagonal FOV and capture resolution - ({double horizontal, double vertical, double focal}) calculateHorizontalVerticalFoV( + ({double horizontal, double vertical, double focal}) + calculateHorizontalVerticalFoV( double diagonalFoV, int captureWidth, int captureHeight, @@ -159,10 +179,14 @@ class FrameProperties { // Math taken from PhotonVision: // https://github.com/PhotonVision/photonvision/blob/5df189d306be89a80b14e6bee9e9df2c31f7f589/photon-core/src/main/java/org/photonvision/vision/frame/FrameStaticProperties.java#L127 final diagonalRadians = diagonalFoV * (pi / 180); - final diagonalAspect = sqrt(captureWidth * captureWidth + captureHeight * captureHeight); + final diagonalAspect = sqrt( + captureWidth * captureWidth + captureHeight * captureHeight, + ); - final horizontalView = atan(tan(diagonalRadians / 2) * (captureWidth / diagonalAspect)) * 2; - final verticalView = atan(tan(diagonalRadians / 2) * (captureHeight / diagonalAspect)) * 2; + final horizontalView = + atan(tan(diagonalRadians / 2) * (captureWidth / diagonalAspect)) * 2; + final verticalView = + atan(tan(diagonalRadians / 2) * (captureHeight / diagonalAspect)) * 2; final focalLength = captureWidth / (2 * tan(horizontalView / 2)); return ( @@ -173,7 +197,9 @@ class FrameProperties { } /// Determines the diagonal FoV and focal length from the capture resolution and horizontal FoV - /// + /// + /// [horizontalFoV] should be in degrees + /// /// Math derived from https://www.litchiutilities.com/docs/fov.php ({double diagonal, double focal}) calculateDiagonalFoV({ required double horizontalFoV, @@ -182,9 +208,14 @@ class FrameProperties { }) { final horizontalFoVRad = horizontalFoV * (pi / 180); - final aspectDiag = sqrt(captureWidth * captureWidth + captureHeight * captureHeight); - final diagonalFoV = atan(tan(horizontalFoVRad / 2) * (aspectDiag / captureWidth)) * 2 * (180 / pi); - final focalLength = captureWidth / (2 * tan(horizontalFoVRad/ 2)); + final aspectDiag = sqrt( + captureWidth * captureWidth + captureHeight * captureHeight, + ); + final diagonalFoV = + atan(tan(horizontalFoVRad / 2) * (aspectDiag / captureWidth)) * + 2 * + (180 / pi); + final focalLength = captureWidth / (2 * tan(horizontalFoVRad / 2)); return (diagonal: diagonalFoV, focal: focalLength); } diff --git a/lib/src/targeting/targeting_calculations.dart b/lib/src/targeting/targeting_calculations.dart new file mode 100644 index 0000000..a4de1c0 --- /dev/null +++ b/lib/src/targeting/targeting_calculations.dart @@ -0,0 +1,50 @@ +import "dart:math"; + +import "package:dartcv4/dartcv.dart"; +import "package:video/video.dart"; + +/// Calculates the yaw and pitch of a coordinate in pixels using the undistorted center +/// and camera focal length +/// +/// Math taken from FRC Team 254: https://www.team254.com/documents/vision-control/ +({double yaw, double pitch}) calculateYawPitch( + FrameProperties properties, + double tagCenterX, + double tagCenterY, +) { + double centerX = tagCenterX; + double centerY = tagCenterY; + + /// If there's calibration data, undistort the tag's center + if (properties.calibrationData != null) { + final centerVec = VecPoint2f.fromList([Point2f(tagCenterX, tagCenterY)]); + final temp = Mat.fromVec(centerVec); + final result = undistortPoints( + temp, + properties.calibrationData!.intrinsics!, + properties.calibrationData!.distCoefficients!, + criteria: (TERM_COUNT + TERM_EPS, 30, 1e-6), + ); + final undistortedVec = result.at(0, 0); + + // The output coordinates are normalized, see: https://stackoverflow.com/a/65861232 + if (undistortedVec.val1.isFinite) { + centerX = + undistortedVec.val1 * properties.focalLength + properties.centerX; + } + if (undistortedVec.val2.isFinite) { + centerY = + undistortedVec.val2 * properties.focalLength + properties.centerY; + } + + centerVec.dispose(); + temp.dispose(); + result.dispose(); + } + + // Source: https://www.team254.com/documents/vision-control/ + final yaw = atan((centerX - properties.centerX) / properties.focalLength); + final pitch = atan((properties.centerY - centerY) / properties.focalLength); + + return (yaw: yaw * (180 / pi), pitch: pitch * (180 / pi)); +} diff --git a/lib/src/utils/aruco.dart b/lib/src/utils/aruco.dart deleted file mode 100644 index 9f98de7..0000000 --- a/lib/src/utils/aruco.dart +++ /dev/null @@ -1,204 +0,0 @@ -import "dart:math"; - -import "package:burt_network/protobuf.dart"; -import "package:dartcv4/dartcv.dart"; -import "package:video/src/targeting/frame_properties.dart"; - -final _arucoDictionary = ArucoDictionary.predefined(PredefinedDictionaryType.DICT_4X4_50); -final _arucoParams = ArucoDetectorParameters.empty(); -final _arucoDetector = ArucoDetector.create(_arucoDictionary, _arucoParams); -final _arucoColor = Scalar.fromRgb(0, 255, 0); - -/// The raw 3d output from opencv solvepnp -typedef ArucoRaw3DResult = ({int rval, VecMat rvecs, VecMat tvecs, Mat reprojectionError}); - -/// Utility methods for raw 3d solvepnp outputs -extension ArucoRaw3DUtil on ArucoRaw3DResult { - /// Disposes all native resources used by the result objects - void dispose() { - rvecs.dispose(); - tvecs.dispose(); - reprojectionError.dispose(); - } -} - -/// The size of the aruco marker in meters -const double markerSize = 6.0 / 39.37; //20.0 / 100; - -final _objectPoints = Mat.fromVec( - VecPoint3f.fromList([ - Point3f(-markerSize / 2, markerSize / 2, 0), - Point3f(markerSize / 2, markerSize / 2, 0), - Point3f(markerSize / 2, -markerSize / 2, 0), - Point3f(-markerSize / 2, -markerSize / 2, 0), - ]), - rows: 4, - cols: 1, - type: MatType.CV_32FC3, - ); - -/// Detects and processes Aruco markers as a target message list, optionally draws them to the image -Future> detectAndProcessMarkers( - Mat image, - FrameProperties frameProperties, { - bool draw = true, -}) async { - final (corners, ids, rejected) = await _arucoDetector.detectMarkersAsync(image); - - if (draw) { - await arucoDrawDetectedMarkersAsync(image, corners, ids, _arucoColor); - } - - final detectedMarkers = []; - for (int i = 0; i < ids.length; i++) { - var centerX = 0.0; - var centerY = 0.0; - for (final corner in corners[i]) { - centerX += corner.x; - centerY += corner.y; - } - centerX /= 4; - centerY /= 4; - - final (:yaw, :pitch) = calculateYawPitch(frameProperties, centerX, centerY); - - final pnpResult = calculate3DPose(frameProperties, corners[i]); - Pose3d? bestCameraToTarget; - double? bestReprojectionError; - - if (pnpResult != null) { - final bestTranslation = pnpResult.tvecs[0].at(0, 0); - final bestRotation = pnpResult.rvecs[0].at(0, 0); - - bestCameraToTarget = Pose3d( - translation: Coordinates( - x: bestTranslation.val1, - y: -bestTranslation.val2, - z: bestTranslation.val3, - ), - rotation: Orientation( - x: bestRotation.val1 * (180 / pi), - y: bestRotation.val2 * (180 / pi), - z: bestRotation.val3 * (180 / pi), - ), - ); - bestReprojectionError = pnpResult.reprojectionError.at(0, 0); - - drawFrameAxes( - image, - frameProperties.calibrationData!.intrinsics!, - frameProperties.calibrationData!.distCoefficients!, - pnpResult.rvecs[0], - pnpResult.tvecs[0], - markerSize, - ); - } - - final cornerVec = VecPoint.generate( - corners[i].length, - (idx) => Point(corners[i][idx].x.toInt(), corners[i][idx].y.toInt()), - ); - final area = contourArea(cornerVec); - - cornerVec.dispose(); - - PnpResult? pnpResultProto; - if (bestCameraToTarget != null) { - pnpResultProto = PnpResult( - cameraToTarget: bestCameraToTarget, - reprojectionError: bestReprojectionError, - ); - } - - detectedMarkers.add( - DetectedObject( - objectType: DetectedObjectType.ARUCO, - arucoTagId: ids[i], - yaw: yaw, - pitch: pitch, - centerX: centerX.toInt(), - centerY: centerY.toInt(), - relativeSize: area / (image.width * image.height), - bestPnpResult: pnpResultProto, - ), - ); - - pnpResult?.dispose(); - } - corners.dispose(); - ids.dispose(); - rejected.dispose(); - return detectedMarkers; -} - -/// Detect ArUco tags in the cv::aruco::DICT_4X4_50 dictionary and annotate them -Future detectAndAnnotateFrames(Mat image) async { - final (corners, ids, rejected) = await _arucoDetector.detectMarkersAsync(image); - await arucoDrawDetectedMarkersAsync(image, corners, ids, _arucoColor); - corners.dispose(); - ids.dispose(); - rejected.dispose(); -} - -/// Calculates the yaw and pitch of a coordinate in pixels using the undistorted center -/// and camera focal length -/// -/// Math taken from FRC Team 254: https://www.team254.com/documents/vision-control/ -({double yaw, double pitch}) calculateYawPitch( - FrameProperties properties, - double tagCenterX, - double tagCenterY, -) { - double centerX = tagCenterX; - double centerY = tagCenterY; - - if (properties.calibrationData != null) { - final centerVec = VecPoint2f.fromList([Point2f(tagCenterX, tagCenterY)]); - final temp = Mat.fromVec(centerVec); - final result = undistortPoints( - temp, - properties.calibrationData!.intrinsics!, - properties.calibrationData!.distCoefficients!, - criteria: (TERM_COUNT + TERM_EPS, 30, 1e-6), - ); - final undistortedVec = result.at(0, 0); - - // The output coordinates are normalized, see: https://stackoverflow.com/a/65861232 - if (undistortedVec.val1.isFinite) { - centerX = undistortedVec.val1 * properties.focalLength + properties.centerX; - } - if (undistortedVec.val2.isFinite) { - centerY = undistortedVec.val2 * properties.focalLength + properties.centerY; - } - - centerVec.dispose(); - temp.dispose(); - result.dispose(); - } - - final yaw = atan((centerX - properties.centerX) / properties.focalLength); - final pitch = atan((properties.centerY - centerY) / properties.focalLength); - // final yaw = atan((tagCenterX - properties.centerX) / properties.horizontalFoV); - // final pitch = atan((properties.centerY - tagCenterY) / (properties.horizontalFoV / cos(yaw))); - - return (yaw: yaw * (180 / pi), pitch: pitch * (180 / pi)); -} - -/// Calculates the 3D pose from the frame properties and target corners -/// -/// If the frame properties does not contain a calibration, this will return null -ArucoRaw3DResult? calculate3DPose(FrameProperties properties, VecPoint2f corners) { - if (properties.calibrationData == null) { - return null; - } - - final (rval, rvecs, tvecs, reprojectionError) = solvePnPGeneric( - _objectPoints, - Mat.fromVec(corners), - properties.calibrationData!.intrinsics!, - properties.calibrationData!.distCoefficients!, - flags: SOLVEPNP_IPPE_SQUARE, - ); - - return (rval: rval, rvecs: rvecs, tvecs: tvecs, reprojectionError: reprojectionError); -} diff --git a/lib/src/utils/constants.dart b/lib/src/utils/constants.dart index 3e63d75..fe04eb2 100644 --- a/lib/src/utils/constants.dart +++ b/lib/src/utils/constants.dart @@ -40,23 +40,30 @@ const findObjectsInCameraFeed = CameraName.CAMERA_NAME_UNDEFINED; /// /// Uses [cameraNames] or [cameraIndexes] VideoCapture getCamera(CameraName name) => Platform.isWindows - ? VideoCapture.fromDevice(cameraIndexes[name]!) - : VideoCapture.fromFile(cameraNames[name]!); + ? VideoCapture.fromDevice(cameraIndexes[name]!) + : VideoCapture.fromFile(cameraNames[name]!); /// Loads camera details for a specific camera -/// +/// /// If there is no camera config file found or there were /// missing fields in the config json, it will return the value of [baseDetails] CameraDetails loadCameraDetails(CameraDetails baseDetails, CameraName name) { final cameraDetails = baseDetails; - final configFile = File("${CameraIsolate.baseDirectory}/camera_details/${name.name}.json"); + final configFile = File( + "${CameraIsolate.baseDirectory}/camera_details/${name.name}.json", + ); if (!configFile.existsSync()) { return cameraDetails; } try { - cameraDetails.mergeFromProto3Json(jsonDecode(configFile.readAsStringSync())); + cameraDetails.mergeFromProto3Json( + jsonDecode(configFile.readAsStringSync()), + ); } catch (e) { - collection.videoServer.logger.error("Error while loading config for camera $name", body: e.toString()); + collection.videoServer.logger.error( + "Error while loading config for camera $name", + body: e.toString(), + ); } // Ignore the status specified in the json @@ -93,3 +100,11 @@ CameraDetails getRealsenseDetails(CameraName name) => CameraDetails( verticalFov: 42, status: CameraStatus.CAMERA_ENABLED, ); + +/// The default [RoverArucoConfig] for detecting Aruco markers +final RoverArucoConfig defaultArucoConfig = RoverArucoConfig( + markerSize: 6.00 / 39.37, + dictionary: ArucoDictionary.predefined(PredefinedDictionaryType.DICT_4X4_50), + detectorParams: ArucoDetectorParameters.empty(), + arucoColor: Scalar.fromRgb(0, 255, 0), +); diff --git a/lib/utils.dart b/lib/utils.dart index 4b46708..078d934 100644 --- a/lib/utils.dart +++ b/lib/utils.dart @@ -1,3 +1,3 @@ -export "src/utils/aruco.dart"; +export "src/targeting/aruco_detector.dart"; export "src/utils/opencv.dart"; export "src/utils/constants.dart"; diff --git a/lib/video.dart b/lib/video.dart index 79e6027..c76646b 100644 --- a/lib/video.dart +++ b/lib/video.dart @@ -7,7 +7,11 @@ export "src/isolates/realsense.dart"; export "src/realsense/ffi.dart"; export "src/realsense/interface.dart"; -export "src/utils/aruco.dart"; +export "src/targeting/aruco_detector.dart"; +export "src/targeting/calibration_coefficients.dart"; +export "src/targeting/frame_properties.dart"; +export "src/targeting/targeting_calculations.dart"; + export "src/utils/constants.dart"; export "src/utils/periodic_timer.dart";