diff --git a/CHEATSHEET.md b/CHEATSHEET.md new file mode 100644 index 0000000..c44f1ee --- /dev/null +++ b/CHEATSHEET.md @@ -0,0 +1,19 @@ +# Cheatsheet + +## Homography Transformation +### Setup +- Connect to the racecar's docker image (`ssh racecar` -> `connect` -> `cd racecar_ws`) +- Build (`colcon build && source install/setup.bash`) +- Launch the camera (`ros2 launch zed_wrapper zed_camera.launch.py camera_model:=zed`) + - If it errors with something about a display, run `unset DISPLAY` before this command + +### Visualizing Stuff +- Launching noVNC: + - Locally, *not* on the racecar: `ssh -L 6081:localhost:6081 racecar@192.168.1.74` + - Open [noVNC](http://localhost:6081/vnc.html?resize=remote) in your web browser +- Open a terminal, enter `rqt` and go to `Plugins > Visualization > Image View` +- Change the subscription topic (left-most dropdown) to `/zed/zed_node/left/image_rect_color` + - You may need to hit "refresh topics" (button to the right of the dropdown) if it's not working +- Change the publishing topic (text box below the dropdown) to `/zed/rgb/image_rect_color_mouse_left` and click the checkmark to its left + - This enables publishing left mouse clicks' positions to that topic +- Open a terminal, enter `rviz2` \ No newline at end of file diff --git a/pics_vids/90 deg turn steering 2 speed 1.png b/pics_vids/90 deg turn steering 2 speed 1.png new file mode 100644 index 0000000..86cb6d6 Binary files /dev/null and b/pics_vids/90 deg turn steering 2 speed 1.png differ diff --git a/pics_vids/Deadzone_right.gif b/pics_vids/Deadzone_right.gif new file mode 100755 index 0000000..f305146 Binary files /dev/null and b/pics_vids/Deadzone_right.gif differ diff --git a/pics_vids/Too_far_left.gif b/pics_vids/Too_far_left.gif new file mode 100755 index 0000000..fa67bf3 Binary files /dev/null and b/pics_vids/Too_far_left.gif differ diff --git a/pics_vids/deadzone_left.gif b/pics_vids/deadzone_left.gif new file mode 100755 index 0000000..c3e93c0 Binary files /dev/null and b/pics_vids/deadzone_left.gif differ diff --git a/pics_vids/test.png b/pics_vids/test.png new file mode 100644 index 0000000..ad46aeb Binary files /dev/null and b/pics_vids/test.png differ diff --git a/pics_vids/too far right.gif b/pics_vids/too far right.gif new file mode 100755 index 0000000..fe67b0f Binary files /dev/null and b/pics_vids/too far right.gif differ diff --git a/visual_servoing/visual_servoing/PID.py b/visual_servoing/visual_servoing/PID.py new file mode 100755 index 0000000..9c3802f --- /dev/null +++ b/visual_servoing/visual_servoing/PID.py @@ -0,0 +1,43 @@ +class PIDController: + def __init__(self, kp, ki, kd, setpoint): + self.kp = kp + self.ki = ki + self.kd = kd + self.setpoint = setpoint + self.prev_error = 0 + self.integral = 0 + + def set_setpoint(self, setpoint): + self.setpoint = setpoint + + def set_gains(self, kp, ki, kd): + self.kp = kp + self.ki = ki + self.kd = kd + + def compute_control_signal(self, current_value, dt): + error = self.setpoint - current_value + self.integral += error * dt + derivative = (error - self.prev_error) / dt + + kp_file_path = 'kp.txt' + with open(kp_file_path, 'a') as file: + file.write(f'{self.kp*error} ') + + ki_file_path = 'ki.txt' + with open(ki_file_path, 'a') as file: + file.write(f'{self.ki*error} ') + + kd_file_path = 'kd.txt' + with open(kd_file_path, 'a') as file: + file.write(f'{self.kd*error} ') + + error_file_path = 'error.txt' + with open(error_file_path, 'a') as file: + file.write(f'{error} ') + + control_signal = self.kp * error + self.ki * self.integral + self.kd * derivative + + self.prev_error = error + + return control_signal diff --git a/visual_servoing/visual_servoing/computer_vision/.DS_Store b/visual_servoing/visual_servoing/computer_vision/.DS_Store new file mode 100644 index 0000000..1bdf111 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/.DS_Store differ diff --git a/visual_servoing/visual_servoing/computer_vision/__pycache__/color_segmentation.cpython-38.pyc b/visual_servoing/visual_servoing/computer_vision/__pycache__/color_segmentation.cpython-38.pyc index 3fdb461..d9b04a7 100644 Binary files a/visual_servoing/visual_servoing/computer_vision/__pycache__/color_segmentation.cpython-38.pyc and b/visual_servoing/visual_servoing/computer_vision/__pycache__/color_segmentation.cpython-38.pyc differ diff --git a/visual_servoing/visual_servoing/computer_vision/__pycache__/sift_template.cpython-38.pyc b/visual_servoing/visual_servoing/computer_vision/__pycache__/sift_template.cpython-38.pyc index 63384c2..a27f1c8 100644 Binary files a/visual_servoing/visual_servoing/computer_vision/__pycache__/sift_template.cpython-38.pyc and b/visual_servoing/visual_servoing/computer_vision/__pycache__/sift_template.cpython-38.pyc differ diff --git a/visual_servoing/visual_servoing/computer_vision/color_segmentation.py b/visual_servoing/visual_servoing/computer_vision/color_segmentation.py index 6ba1c99..c2a8b34 100644 --- a/visual_servoing/visual_servoing/computer_vision/color_segmentation.py +++ b/visual_servoing/visual_servoing/computer_vision/color_segmentation.py @@ -35,6 +35,37 @@ def cd_color_segmentation(img, template): ########## YOUR CODE STARTS HERE ########## bounding_box = ((0,0),(0,0)) + hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + + # orange color bounds in HSV + # Hue has values from 0 to 180, Saturation and Value from 0 to 255 + #orange_low = (0, 100, 100) + #orange_high = (18, 255, 255) + + orange_low = (0, 120, 150) + orange_high = (20, 255, 255) + + mask = cv2.inRange(hsv_img, orange_low, orange_high) + + # erode and dilate to get rid of noise + # structuring element is what erosion looks at - if everything inside is orange, then it will be kept + kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) + mask = cv2.erode(mask, kernel, iterations=1) + mask = cv2.dilate(mask, kernel, iterations=1) + + # returns list of contours and hiearchy, retr ignores inside countours, approx is for compression + contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + # should only be one if its just cone + if len(contours) != 0: + biggest_contour = max(contours, key = cv2.contourArea) + x, y, w, h = cv2.boundingRect(biggest_contour) + bounding_box = ((x, y), (x + w, y + h)) + else: + print("Cone not found.") + + cv2.rectangle(img, bounding_box[0], bounding_box[1], (0, 255, 0), 2) + #cv2.imshow('Contours', img) + cv2.imwrite("debug.jpg", img) ########### YOUR CODE ENDS HERE ########### diff --git a/visual_servoing/visual_servoing/computer_vision/cv_test.py b/visual_servoing/visual_servoing/computer_vision/cv_test.py index a41673e..b2bbbf8 100644 --- a/visual_servoing/visual_servoing/computer_vision/cv_test.py +++ b/visual_servoing/visual_servoing/computer_vision/cv_test.py @@ -63,7 +63,7 @@ def iou_score(bbox1, bbox2): return score -def test_algorithm(detection_func, csv_file_path, template_file_path, swap=False): +def test_algorithm(detection_func, csv_file_path, template_file_path, swap=False,visual=False): """ Test a cone detection function and return the average score based on all the test images Input: @@ -95,7 +95,17 @@ def test_algorithm(detection_func, csv_file_path, template_file_path, swap=False # Detection bbox bbox_est = detection_func(img, template) score = iou_score(bbox_est, bbox_true) - + + if visual: + # Draw ground truth bbox in green + cv2.rectangle(img, (bbox_true[0][0], bbox_true[0][1]), (bbox_true[1][0], bbox_true[1][1]), (0, 255, 0), 2) + # Draw estimated bbox red + cv2.rectangle(img, (bbox_est[0][0], bbox_est[0][1]), (bbox_est[1][0], bbox_est[1][1]), (0, 0, 255), 2) + # Show image + cv2.imwrite(f'{img_path.split("/")[-1].split(".")[0]}_result.jpg', img) + + + # Add score to dict scores[img_path] = score @@ -169,5 +179,29 @@ def test_all_algorithms(csv_file_path, template_file_path, output_file_path, swa if scores: for (img, val) in scores.items(): print((img, val)) + + elif len(sys.argv) == 4: + scores = None + algo_dict = dict({"color":cd_color_segmentation, + "sift":cd_sift_ransac, + "template":cd_template_matching}) + data_dict = dict({"cone":(cone_csv_path, cone_template_path), + "map":(localization_csv_path, localization_template_path), + "citgo":(citgo_csv_path, citgo_template_path)}) + swap = False + args = sys.argv[1:3] + if args[0] in {"cone", "map", "citgo"} and args[1] in {"color", "template", "sift"}: + if args[0] == "map": + swap = True + scores = test_algorithm(algo_dict[args[1]],data_dict[args[0]][0],data_dict[args[0]][1], swap=swap,visual=sys.argv[3]) + else: + print("Argument/s not recognized") + + if scores: + for (img, val) in scores.items(): + print((img, val)) + + + else: print("too many arguments") diff --git a/visual_servoing/visual_servoing/computer_vision/debug.jpg b/visual_servoing/visual_servoing/computer_vision/debug.jpg new file mode 100644 index 0000000..4bc863b Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/debug.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/sift_template.py b/visual_servoing/visual_servoing/computer_vision/sift_template.py index ed7a2d6..a8bba1e 100644 --- a/visual_servoing/visual_servoing/computer_vision/sift_template.py +++ b/visual_servoing/visual_servoing/computer_vision/sift_template.py @@ -65,10 +65,27 @@ def cd_sift_ransac(img, template): h, w = template.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) + # print("pts",pts) + # print("pts.shape",pts.shape) ########## YOUR CODE STARTS HERE ########## - x_min = y_min = x_max = y_max = 0 + x_min = y_min = x_max = y_max = -1 + + for pt in pts: + # print("point",pt) + point=pt[0] + x=point[0] + y=point[1] + + if xx_max or x_max==-1: + x_max=x + if yy_max or y_max==-1: + y_max=y ########### YOUR CODE ENDS HERE ########### diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test10_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test10_result.jpg new file mode 100644 index 0000000..859fad0 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test10_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test11_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test11_result.jpg new file mode 100644 index 0000000..e2ee101 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test11_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test12_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test12_result.jpg new file mode 100644 index 0000000..4e4d207 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test12_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test13_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test13_result.jpg new file mode 100644 index 0000000..1e45c4b Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test13_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test14_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test14_result.jpg new file mode 100644 index 0000000..402765e Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test14_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test15_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test15_result.jpg new file mode 100644 index 0000000..1111467 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test15_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test16_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test16_result.jpg new file mode 100644 index 0000000..d1f3561 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test16_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test17_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test17_result.jpg new file mode 100644 index 0000000..855e62a Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test17_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test18_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test18_result.jpg new file mode 100644 index 0000000..efc98e3 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test18_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test19_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test19_result.jpg new file mode 100644 index 0000000..c6a8c99 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test19_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test1_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test1_result.jpg new file mode 100644 index 0000000..ca4fb0b Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test1_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test20_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test20_result.jpg new file mode 100644 index 0000000..fd79d88 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test20_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test2_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test2_result.jpg new file mode 100644 index 0000000..4fe49b6 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test2_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test3_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test3_result.jpg new file mode 100644 index 0000000..f08c5f7 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test3_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test4_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test4_result.jpg new file mode 100644 index 0000000..d02c1ad Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test4_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test5_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test5_result.jpg new file mode 100644 index 0000000..d3a1f97 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test5_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test6_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test6_result.jpg new file mode 100644 index 0000000..ef6834b Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test6_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test7_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test7_result.jpg new file mode 100644 index 0000000..557e9ea Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test7_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test8_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test8_result.jpg new file mode 100644 index 0000000..e182f05 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test8_result.jpg differ diff --git a/visual_servoing/visual_servoing/computer_vision/test_colorseg/test9_result.jpg b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test9_result.jpg new file mode 100644 index 0000000..c120591 Binary files /dev/null and b/visual_servoing/visual_servoing/computer_vision/test_colorseg/test9_result.jpg differ diff --git a/visual_servoing/visual_servoing/cone_detector.py b/visual_servoing/visual_servoing/cone_detector.py index 8fc0679..c4330e0 100644 --- a/visual_servoing/visual_servoing/cone_detector.py +++ b/visual_servoing/visual_servoing/cone_detector.py @@ -50,6 +50,19 @@ def image_callback(self, image_msg): image = self.bridge.imgmsg_to_cv2(image_msg, "bgr8") + bounding_box = cd_color_segmentation(image) + + #If valid bounding box send message, invalid bounding box is ((0,0),(0,0)) + if (bounding_box[0][0] + bounding_box[0][1] + bounding_box[1][0] + bounding_box[1][1]) !=0: + bot_y = bounding_box[1][1] + center_bot_x = int((bounding_box[0][0]+bounding_box[1][0])/2) + + pixel_msg = ConeLocationPixel() + pixel_msg.u = center_bot_x + pixel_msg.v = bot_y + + self.cone_pub.publish(pixel_msg) + debug_msg = self.bridge.cv2_to_imgmsg(image, "bgr8") self.debug_pub.publish(debug_msg) diff --git a/visual_servoing/visual_servoing/homography_transformer.py b/visual_servoing/visual_servoing/homography_transformer.py index 21ff020..9539f89 100644 --- a/visual_servoing/visual_servoing/homography_transformer.py +++ b/visual_servoing/visual_servoing/homography_transformer.py @@ -12,6 +12,7 @@ from ackermann_msgs.msg import AckermannDriveStamped from visualization_msgs.msg import Marker from vs_msgs.msg import ConeLocation, ConeLocationPixel +from geometry_msgs.msg import Point #The following collection of pixel locations and corresponding relative #ground plane locations are used to compute our homography matrix @@ -21,10 +22,10 @@ ###################################################### ## DUMMY POINTS -- ENTER YOUR MEASUREMENTS HERE -PTS_IMAGE_PLANE = [[-1, -1], - [-1, -1], - [-1, -1], - [-1, -1]] # dummy points +PTS_IMAGE_PLANE = [[354, 212], + [242, 217], + [354, 277], + [565, 224]] # dummy points ###################################################### # PTS_GROUND_PLANE units are in inches @@ -32,10 +33,10 @@ ###################################################### ## DUMMY POINTS -- ENTER YOUR MEASUREMENTS HERE -PTS_GROUND_PLANE = [[-1, -1], - [-1, -1], - [-1, -1], - [-1, -1]] # dummy points +PTS_GROUND_PLANE = [[36, 0], + [37, 18], + [15, 2], + [30, -18]] # dummy points ###################################################### METERS_PER_INCH = 0.0254 @@ -48,6 +49,7 @@ def __init__(self): self.cone_pub = self.create_publisher(ConeLocation, "/relative_cone", 10) self.marker_pub = self.create_publisher(Marker, "/cone_marker", 1) self.cone_px_sub = self.create_subscription(ConeLocationPixel, "/relative_cone_px", self.cone_detection_callback, 1) + self.mouse_px_sub = self.create_subscription(Point, "/zed/rgb/image_rect_color_mouse_left", self.mouse_click_callback, 1) if not len(PTS_GROUND_PLANE) == len(PTS_IMAGE_PLANE): rclpy.logerr("ERROR: PTS_GROUND_PLANE and PTS_IMAGE_PLANE should be of same length") @@ -68,8 +70,7 @@ def __init__(self): def cone_detection_callback(self, msg): #Extract information from message - u = msg.u - v = msg.v + u, v = msg.u, msg.v #Call to main function x, y = self.transformUvToXy(u, v) @@ -81,6 +82,15 @@ def cone_detection_callback(self, msg): self.cone_pub.publish(relative_xy_msg) + + def mouse_click_callback(self, msg: Point): + u, v = msg.x, msg.y + x, y = self.transformUvToXy(u, v) + + self.get_logger().info(f"({x}m, {y}m)") + + self.draw_marker(x, y, "map") + def transformUvToXy(self, u, v): """ diff --git a/visual_servoing/visual_servoing/parking_controller.py b/visual_servoing/visual_servoing/parking_controller.py index 481aff6..ce7fce3 100644 --- a/visual_servoing/visual_servoing/parking_controller.py +++ b/visual_servoing/visual_servoing/parking_controller.py @@ -5,7 +5,9 @@ import numpy as np from vs_msgs.msg import ConeLocation, ParkingError +from visual_servoing.PID import * from ackermann_msgs.msg import AckermannDriveStamped +import time class ParkingController(Node): """ @@ -15,7 +17,7 @@ class ParkingController(Node): """ def __init__(self): super().__init__("parking_controller") - + self.start_time = time.time() self.declare_parameter("drive_topic") DRIVE_TOPIC = self.get_parameter("drive_topic").value # set in launch file; different for simulator vs racecar @@ -25,10 +27,14 @@ def __init__(self): self.create_subscription(ConeLocation, "/relative_cone", self.relative_cone_callback, 1) - self.parking_distance = .75 # meters; try playing with this number! + self.parking_distance = 1.0 # meters; try playing with this number! self.relative_x = 0 self.relative_y = 0 + self.L = 0.3 #meters, wheel base of the car + self.v = 0.0 #velocity of the car + self.delta = 0.0 #steering angle + self.get_logger().info("Parking Controller Initialized") def relative_cone_callback(self, msg): @@ -40,11 +46,63 @@ def relative_cone_callback(self, msg): # YOUR CODE HERE # Use relative position and your control law to set drive_cmd + x = self.relative_x + y = self.relative_y + + theta = np.tan(y/x) + d = (x**2+y**2)**.5 + + min_turning_radius = self.L/np.tan(0.34) + min_turning_radius = min_turning_radius*1 + + critical_radius = (min_turning_radius**2+self.parking_distance**2)**0.5 + + if ((x**2 + (y-min_turning_radius)**2) < critical_radius**2) or (x**2 + (y+min_turning_radius**2) < critical_radius**2): #handles the case where the cone is in the dead area + u_speed = -0.5 + + thetacontroller = PIDController(2,0,0,0) + end_time = time.time() + dt = self.start_time-end_time + u_steering = thetacontroller.compute_control_signal(theta,dt) + #this section helps fix the annoying spots where it gets confused + if y > 0: + u_steering = -abs(u_steering) + elif y < 0: + u_steering = abs(u_steering) + + else: + thetacontroller = PIDController(-2,0,0,0) + end_time = time.time() + dt = self.start_time-end_time + u_steering = thetacontroller.compute_control_signal(theta,dt) + + #this section helps fix the annoying spots where it gets confused + if y > 0: + u_steering = abs(u_steering) + elif y < 0: + u_steering = -abs(u_steering) + + + + speedcontroller = PIDController(-1,0,0,self.parking_distance) + u_speed = speedcontroller.compute_control_signal(d,dt) + + #publish drive command + u_speed = np.clip(u_speed,-1.,1.) #limit the speed setting + drive_cmd.drive.speed = u_speed + self.v = u_speed + + u_steering = np.clip(u_steering,-0.34,0.34) #limit the steering angle + drive_cmd.drive.steering_angle = u_steering + self.delta = u_steering + + ################################# self.drive_pub.publish(drive_cmd) self.error_publisher() + self.start_time = time.time() def error_publisher(self): """ @@ -57,6 +115,18 @@ def error_publisher(self): # YOUR CODE HERE # Populate error_msg with relative_x, relative_y, sqrt(x^2+y^2) + x = self.relative_x + y = self.relative_y + + d = (x**2+y**2)**.5 + + d_error = d - self.parking_distance + x_error = x - self.parking_distance + y_error = y + + error_msg.x_error = x_error + error_msg.y_error = y_error + error_msg.distance_error = d_error #################################