From bfa4b7fb5a5729292d0b1e5b04bf8ae3423977d1 Mon Sep 17 00:00:00 2001 From: omeym Date: Tue, 5 Nov 2024 14:12:22 -0800 Subject: [PATCH 01/24] Added Kuka env to start testing SERL --- .gitmodules | 9 + examples/async_sac_state_sim/tmux_launch.sh | 2 +- fri | 1 + kuka_server/kuka_server/__init__.py | 0 kuka_server/kuka_server/robot_interface.py | 501 ++++++++++++++++++ kuka_server/kuka_server/utils.py | 51 ++ kuka_server/kuka_server/wait_for_message.py | 71 +++ kuka_server/package.xml | 21 + kuka_server/resource/kuka_server | 0 kuka_server/setup.cfg | 4 + kuka_server/setup.py | 25 + kuka_server/test/test_copyright.py | 25 + kuka_server/test/test_flake8.py | 25 + kuka_server/test/test_pep257.py | 23 + lbr_fri_idl | 1 + lbr_fri_ros2_stack | 1 + serl_robot_infra/kuka_env/envs/kuka_env.py | 433 +++++++++++++++ .../kuka_env/envs/peg_env/__init__.py | 1 + .../kuka_env/envs/peg_env/config.py | 89 ++++ .../kuka_env/envs/peg_env/kuka_peg_insert.py | 33 ++ 20 files changed, 1315 insertions(+), 1 deletion(-) create mode 100644 .gitmodules create mode 160000 fri create mode 100755 kuka_server/kuka_server/__init__.py create mode 100644 kuka_server/kuka_server/robot_interface.py create mode 100644 kuka_server/kuka_server/utils.py create mode 100644 kuka_server/kuka_server/wait_for_message.py create mode 100755 kuka_server/package.xml create mode 100755 kuka_server/resource/kuka_server create mode 100755 kuka_server/setup.cfg create mode 100755 kuka_server/setup.py create mode 100755 kuka_server/test/test_copyright.py create mode 100755 kuka_server/test/test_flake8.py create mode 100755 kuka_server/test/test_pep257.py create mode 160000 lbr_fri_idl create mode 160000 lbr_fri_ros2_stack create mode 100644 serl_robot_infra/kuka_env/envs/kuka_env.py create mode 100644 serl_robot_infra/kuka_env/envs/peg_env/__init__.py create mode 100644 serl_robot_infra/kuka_env/envs/peg_env/config.py create mode 100644 serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..2cbdf96b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "fri"] + path = fri + url = git@github.com:lbr-stack/fri.git +[submodule "lbr_fri_idl"] + path = lbr_fri_idl + url = https://github.com/lbr-stack/lbr_fri_idl.git +[submodule "lbr_fri_ros2_stack"] + path = lbr_fri_ros2_stack + url = https://github.com/lbr-stack/lbr_fri_ros2_stack.git diff --git a/examples/async_sac_state_sim/tmux_launch.sh b/examples/async_sac_state_sim/tmux_launch.sh index 78ff94a8..b5fca4f4 100644 --- a/examples/async_sac_state_sim/tmux_launch.sh +++ b/examples/async_sac_state_sim/tmux_launch.sh @@ -1,7 +1,7 @@ #!/bin/bash EXAMPLE_DIR=${EXAMPLE_DIR:-"examples/async_sac_state_sim"} -CONDA_ENV=${CONDA_ENV:-"serl"} +CONDA_ENV=${CONDA_ENV:-"serl-docker"} cd $EXAMPLE_DIR echo "Running from $(pwd)" diff --git a/fri b/fri new file mode 160000 index 00000000..58119424 --- /dev/null +++ b/fri @@ -0,0 +1 @@ +Subproject commit 581194240a7e05bb2bc4d613ab59e450c3a2291b diff --git a/kuka_server/kuka_server/__init__.py b/kuka_server/kuka_server/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py new file mode 100644 index 00000000..4b227571 --- /dev/null +++ b/kuka_server/kuka_server/robot_interface.py @@ -0,0 +1,501 @@ +import time + +import rclpy +import rclpy.duration +from rclpy.node import Node +from rclpy.action import ActionClient +from rclpy.callback_groups import ReentrantCallbackGroup, MutuallyExclusiveCallbackGroup +from rclpy.executors import MultiThreadedExecutor + +import numpy as np + +from geometry_msgs.msg import Pose, Point, Quaternion +import rclpy.task +from sensor_msgs.msg import JointState +from geometry_msgs.msg import WrenchStamped +from control_msgs.action import FollowJointTrajectory +import os +import sys +from moveit_msgs.msg import ( + RobotState, + RobotTrajectory, + MoveItErrorCodes, + Constraints, + JointConstraint, +) +from moveit_msgs.srv import GetPositionIK, GetMotionPlan, GetPositionFK +from moveit_msgs.action import ExecuteTrajectory +from kuka_server.wait_for_message import wait_for_message + +from kuka_server.utils import quat_to_euler, convert_wrench_to_numpy, euler_to_quat +from lbr_fri_idl.msg import LBRState +import copy + +class RobotInterfaceNode(Node): + timeout_sec_ = 5.0 + + move_group_name_ = "arm" + namespace_ = "lbr" + + joint_state_topic_ = "joint_states" + lbr_state_topic_ = "state" + plan_srv_name_ = "plan_kinematic_path" + ik_srv_name_ = "compute_ik" + fk_srv_name_ = "compute_fk" + execute_action_name_ = "execute_trajectory" + fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" + WRENCH_TOPIC = "/lbr/force_torque_broadcaster/wrench" + + base_ = "lbr_link_0" + end_effector_ = "lbr_link_ee" + + def __init__(self) -> None: + super().__init__("robot_interface_node", namespace=self.namespace_) + + self.fk_client_callback = ReentrantCallbackGroup() + self.ik_client_callback = MutuallyExclusiveCallbackGroup() + self.plan_client_callback = MutuallyExclusiveCallbackGroup() + self.execute_client_callback = MutuallyExclusiveCallbackGroup() + + self.robot_state = {} + + self.wrench_sub = self.create_subscription( + WrenchStamped, + self.WRENCH_TOPIC, + self.wrench_callback, + 10 + ) + + self.ik_client_ = self.create_client(GetPositionIK, self.ik_srv_name_, callback_group=self.ik_client_callback) + if not self.ik_client_.wait_for_service(timeout_sec=self.timeout_sec_): + self.get_logger().error("IK service not available.") + exit(1) + + self.fk_client_ = self.create_client(GetPositionFK, self.fk_srv_name_, callback_group=self.fk_client_callback) + if not self.fk_client_.wait_for_service(timeout_sec=self.timeout_sec_): + self.get_logger().error("FK service not available.") + exit(1) + + self.plan_client_ = self.create_client(GetMotionPlan, self.plan_srv_name_, callback_group=self.plan_client_callback) + if not self.plan_client_.wait_for_service(timeout_sec=self.timeout_sec_): + self.get_logger().error("Plan service not available.") + exit(1) + + self.execute_client_ = ActionClient( + self, ExecuteTrajectory, self.execute_action_name_, callback_group=self.execute_client_callback + ) + if not self.execute_client_.wait_for_server(timeout_sec=self.timeout_sec_): + self.get_logger().error("Execute action not available.") + exit(1) + + self.fri_execute_client_ = ActionClient( + self, + FollowJointTrajectory, + self.fri_execute_action_name_, + callback_group=MutuallyExclusiveCallbackGroup(), + ) + if not self.fri_execute_client_.wait_for_server(timeout_sec=self.timeout_sec_): + self.get_logger().error("FRI Execute action not available.") + exit(1) + + def get_ik(self, target_pose: Pose) -> JointState | None: + request = GetPositionIK.Request() + + request.ik_request.group_name = self.move_group_name_ + tf_prefix = self.get_namespace()[1:] + request.ik_request.pose_stamped.header.frame_id = f"{tf_prefix}/{self.base_}" + request.ik_request.pose_stamped.header.stamp = self.get_clock().now().to_msg() + request.ik_request.pose_stamped.pose = target_pose + request.ik_request.avoid_collisions = True + + future = self.ik_client_.call_async(request) + + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error("Failed to get IK solution") + return None + + response = future.result() + if response.error_code.val != MoveItErrorCodes.SUCCESS: + return None + + return response.solution.joint_state + + def wrench_callback(self, msg): + self.latest_wrench = convert_wrench_to_numpy(msg) + # self.get_logger().info(f"Received wrench data: {self.latest_wrench}") + return + + + def get_fk(self) -> Pose | None: + current_joint_state = self.get_joint_state() + if current_joint_state is None: + self.get_logger().error("Failed to get joint state") + return None + + current_robot_state = RobotState() + current_robot_state.joint_state = current_joint_state + + request = GetPositionFK.Request() + + request.header.frame_id = f"{self.namespace_}/{self.base_}" + request.header.stamp = self.get_clock().now().to_msg() + + request.fk_link_names.append(self.end_effector_) + request.robot_state = current_robot_state + + future = self.fk_client_.call_async(request) + + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error("Failed to get FK solution") + return None + + response = future.result() + if response.error_code.val != MoveItErrorCodes.SUCCESS: + self.get_logger().error( + f"Failed to get FK solution: {response.error_code.val}" + ) + return None + + return response.pose_stamped[0].pose, current_joint_state + + def get_fk_lbr(self, commanded:bool = False) -> Pose | None: + lbr_state_set, lbr_state = wait_for_message(LBRState, self, self.lbr_state_topic_) + joint_position = lbr_state.measured_joint_position.tolist() + if commanded: + joint_position = lbr_state.commanded_joint_position.tolist() + print(joint_position) + joint_position[2], joint_position[3] = joint_position[3], joint_position[2] + + current_robot_state = RobotState() + current_robot_state.joint_state = self.get_joint_state() + current_robot_state.joint_state.position = joint_position + + request = GetPositionFK.Request() + + request.header.frame_id = f"{self.namespace_}/{self.base_}" + request.header.stamp = self.get_clock().now().to_msg() + + request.fk_link_names.append(self.end_effector_) + request.robot_state = current_robot_state + + future = self.fk_client_.call_async(request) + + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error("Failed to get FK solution") + return None + + response = future.result() + if response.error_code.val != MoveItErrorCodes.SUCCESS: + self.get_logger().error( + f"Failed to get FK solution: {response.error_code.val}" + ) + return None + + return response.pose_stamped[0].pose + + def sum_of_square_diff( + self, joint_state_1: JointState, joint_state_2: JointState + ) -> float: + return np.sum( + np.square(np.subtract(joint_state_1.position, joint_state_2.position)) + ) + + def get_current_state(self): + ##Return a dictionary with all the corresponding state variables + #Pose, Velocity, Force, Torque, Jacobian, Joint Angles, Joint Velocity, Gripper Pose + self.robot_state["pose"] = np.zeros((6,)) + self.robot_state["vel"] = np.zeros((6,)) + self.robot_state["force"] = np.zeros((3,)) + self.robot_state["torque"] = np.zeros((3,)) + self.robot_state["q"] = np.zeros((7,)) + self.robot_state["dq"] = np.zeros((7,)) + + ##TCP pose + current_ee_geom_pose, current_joint_state = self.get_fk() + self.robot_state["pose"][:3] = np.array(current_ee_geom_pose.position.x,current_ee_geom_pose.position.y,current_ee_geom_pose.position.z) + euler_angles = quat_to_euler( np.array(current_ee_geom_pose.orientation.x, + current_ee_geom_pose.orientation.y, + current_ee_geom_pose.orientation.z, + current_ee_geom_pose.orientation.w)) + self.robot_state["pose"][3:] = euler_angles + + ##Getting Joint Angles + joint_angles = current_joint_state.position + self.robot_state["q"] = joint_angles + joint_velocity = current_joint_state.velocity + self.robot_state["dq"] = joint_velocity + + ##Getting Force and Wrench + current_wrench = copy.deepcopy(self.latest_wrench) + self.robot_state["force"] = current_wrench[:3] + self.robot_state["torque"] = current_wrench[3:] + + + + + return + + def move_to_pose(self, pose:np.ndarray): + + target_pose = Pose() + + target_pose.position.x = pose[0] + target_pose.position.y = pose[1] + target_pose.position.z = pose[2] + + target_pose_quaternion = euler_to_quat(pose[3:]) + target_pose.orientation.x = target_pose_quaternion[0] + target_pose.orientation.y = target_pose_quaternion[1] + target_pose.orientation.z = target_pose_quaternion[2] + target_pose.orientation.w = target_pose_quaternion[3] + + + traj = self.get_motion_plan(target_pose, True) + if traj: + client = self.get_motion_execute_client() + goal = ExecuteTrajectory.Goal() + goal.trajectory = traj + + future = client.send_goal_async(goal) + rclpy.spin_until_future_complete(self, future) + + goal_handle = future.result() + if not goal_handle.accepted: + self.get_logger().error("Failed to execute trajectory") + else: + self.get_logger().info("Trajectory accepted") + + + result_future = goal_handle.get_result_async() + + expect_duration = traj.joint_trajectory.points[-1].time_from_start + expect_time = time.time() + 2 * expect_duration.sec + while not result_future.done() and time.time() < expect_time: + time.sleep(0.01) + + self.get_logger().info("Trajectory executed") + + self.get_logger().info("Current pose: " + str(self.get_fk()[0]) ) + + + return + + + + def get_best_ik(self, target_pose: Pose, attempts: int = 100) -> JointState | None: + current_joint_state = self.get_joint_state() + if current_joint_state is None: + self.get_logger().error("Failed to get joint state") + return None + + best_cost = np.inf + best_joint_state = None + + for _ in range(attempts): + joint_state = self.get_ik(target_pose) + if joint_state is None: + continue + + cost = self.sum_of_square_diff(current_joint_state, joint_state) + if cost < best_cost: + best_cost = cost + best_joint_state = joint_state + + if not best_joint_state: + self.get_logger().error("Failed to get IK solution") + + return best_joint_state + + def get_joint_state(self) -> JointState: + current_joint_state_set, current_joint_state = wait_for_message( + JointState, self, self.joint_state_topic_ + ) + if not current_joint_state_set: + self.get_logger().error("Failed to get current joint state") + return None + + return current_joint_state + + def get_motion_plan( + self, target_pose: Pose, linear: bool = False, scaling_factor: float = .1, attempts: int = 10 + ) -> RobotTrajectory | None: + current_pose = self.get_fk()[0] + if current_pose is None: + self.get_logger().error("Failed to get current pose") + + # if check_same_pose(current_pose, target_pose): + # self.get_logger().info("Detected same pose as current pose, neglected") + # return RobotTrajectory() + + current_joint_state = self.get_joint_state() + if current_joint_state is None: + self.get_logger().error("Failed to get joint state") + return None + + current_robot_state = RobotState() + current_robot_state.joint_state.position = current_joint_state.position + + target_joint_state = self.get_best_ik(target_pose) + if target_joint_state is None: + self.get_logger().error("Failed to get target joint state") + return None + + target_constraint = Constraints() + for i in range(len(target_joint_state.position)): + joint_constraint = JointConstraint() + joint_constraint.joint_name = target_joint_state.name[i] + joint_constraint.position = target_joint_state.position[i] + joint_constraint.tolerance_above = 0.001 + joint_constraint.tolerance_below = 0.001 + joint_constraint.weight = 1.0 + target_constraint.joint_constraints.append(joint_constraint) + + request = GetMotionPlan.Request() + request.motion_plan_request.group_name = self.move_group_name_ + request.motion_plan_request.start_state = current_robot_state + request.motion_plan_request.goal_constraints.append(target_constraint) + request.motion_plan_request.num_planning_attempts = 10 + request.motion_plan_request.allowed_planning_time = 5.0 + request.motion_plan_request.max_velocity_scaling_factor = scaling_factor + request.motion_plan_request.max_acceleration_scaling_factor = scaling_factor + + if linear: + request.motion_plan_request.pipeline_id = "pilz_industrial_motion_planner" + request.motion_plan_request.planner_id = "LIN" + else: + request.motion_plan_request.pipeline_id = "ompl" + request.motion_plan_request.planner_id = "APSConfigDefault" + + for _ in range(attempts): + plan_future = self.plan_client_.call_async(request) + rclpy.spin_until_future_complete(self, plan_future) + + if plan_future.result() is None: + self.get_logger().error("Failed to get motion plan") + + response = plan_future.result() + if response.motion_plan_response.error_code.val != MoveItErrorCodes.SUCCESS: + self.get_logger().error( + f"Failed to get motion plan: {response.motion_plan_response.error_code.val}" + ) + else: + return response.motion_plan_response.trajectory + + return None + + def get_motion_execute_client(self) -> ActionClient: + return self.execute_client_ + + def get_fri_motion_execute(self, traj: RobotTrajectory, short_wait=False) -> bool: + joint_trajectory_goal = FollowJointTrajectory.Goal() + joint_trajectory_goal.trajectory = traj.joint_trajectory + + goal_future = self.fri_execute_client_.send_goal_async(joint_trajectory_goal) + rclpy.spin_until_future_complete(self, goal_future) + goal_handle = goal_future.result() + if not goal_handle.accepted: + self.get_logger().error("Trajectory goal rejected") + return False + self.get_logger().info("Trajectory goal accepted") + + result_future = goal_handle.get_result_async() + rclpy.spin_until_future_complete(self, result_future) + + execution_finished = False + expected_timout = time.time() + 30 + if short_wait: + expected_timout = time.time() + 2.0 + while not execution_finished and time.time() < expected_timout: + _, lbr_state = wait_for_message(LBRState, self, self.lbr_state_topic_) + if np.max(np.abs(np.subtract(lbr_state.measured_joint_position, traj.joint_trajectory.points[-1].positions))) < 0.0002: + execution_finished = True + + time.sleep(0.01) + + self.get_logger().info("Trajectory executed") + return True + + +def main(args=None): + rclpy.init(args=args) + + robot_interface_node = RobotInterfaceNode() + executor = MultiThreadedExecutor() + executor.add_node(robot_interface_node) + robot_interface_node.get_logger().info("Robot interface node started.") + + target_poses = [] + for i in range(3): + target_poses.append( + Pose( + position=Point(x=0.6, y=-0.1 + 0.1 * i, z=0.6), + orientation=Quaternion(x=0.0, y=-1.0, z=0.0, w=0.0), + ) + ) + + # traj = robot_interface_node.get_motion_plan(target_poses[1]) + # if traj: + # client = robot_interface_node.get_motion_execute_client() + # goal = ExecuteTrajectory.Goal() + # goal.trajectory = traj + + # future = client.send_goal_async(goal) + # rclpy.spin_until_future_complete(robot_interface_node, future) + + # goal_handle = future.result() + # if not goal_handle.accepted: + # robot_interface_node.get_logger().error("Failed to execute trajectory") + # else: + # robot_interface_node.get_logger().info("Trajectory accepted") + + # result_future = goal_handle.get_result_async() + + # expect_duration = traj.joint_trajectory.points[-1].time_from_start + # expect_time = time.time() + 2 * expect_duration.sec + # while not result_future.done() and time.time() < expect_time: + # time.sleep(0.01) + + # robot_interface_node.get_logger().info("Trajectory executed") + + # robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0])) + + for target_pose in target_poses: + traj = robot_interface_node.get_motion_plan(target_pose, True) + if traj: + client = robot_interface_node.get_motion_execute_client() + goal = ExecuteTrajectory.Goal() + goal.trajectory = traj + + future = client.send_goal_async(goal) + rclpy.spin_until_future_complete(robot_interface_node, future) + + goal_handle = future.result() + if not goal_handle.accepted: + robot_interface_node.get_logger().error("Failed to execute trajectory") + else: + robot_interface_node.get_logger().info("Trajectory accepted") + + + result_future = goal_handle.get_result_async() + + expect_duration = traj.joint_trajectory.points[-1].time_from_start + expect_time = time.time() + 2 * expect_duration.sec + while not result_future.done() and time.time() < expect_time: + time.sleep(0.01) + + robot_interface_node.get_logger().info("Trajectory executed") + + robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0]) ) + + # rclpy.spin(robot_interface_node) + + executor.spin() + + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/kuka_server/kuka_server/utils.py b/kuka_server/kuka_server/utils.py new file mode 100644 index 00000000..0ddfb955 --- /dev/null +++ b/kuka_server/kuka_server/utils.py @@ -0,0 +1,51 @@ +from scipy.spatial.transform import Rotation as R +import numpy as np + +def euler_to_quat(euler_angles_abc, degrees = True): + q = R.from_euler('xyz', [euler_angles_abc[2], euler_angles_abc[1], euler_angles_abc[0]], degrees=degrees).as_quat() + + return q + + +def euler_to_rotmat(euler_angles_abc, degrees = True): + rot_mat = R.from_euler('xyz', [euler_angles_abc[2], euler_angles_abc[1], euler_angles_abc[0]], degrees=degrees).as_matrix() + + return rot_mat + + +def rotmat_to_quat(rotmat): + + quat = R.from_matrix(rotmat).as_quat() + + return quat + + +def quat_to_rotmat(quat): + + rotmat = R.from_quat(quat).as_matrix() + + return rotmat + +def quat_to_euler(quat): + + euler = R.from_quat(quat).as_euler("xyz") + return euler + + +def convert_wrench_to_numpy(msg): + force = msg.wrench.force + torque = msg.wrench.torque + return np.array([force.x, force.y, force.z, torque.x, torque.y, torque.z]) + +def xyzabc_to_se3(xyzabc, degrees = True): + + tranformation_mat = np.eye(4) + tranformation_mat[0,3] = xyzabc[0] + tranformation_mat[1,3] = xyzabc[1] + if(len(xyzabc) == 6): + tranformation_mat[2,3] = xyzabc[2] + tranformation_mat[:3,:3] = euler_to_rotmat(xyzabc[3:], degrees) + else: + tranformation_mat[:3,:3] = euler_to_rotmat(xyzabc[2:], degrees) + + return tranformation_mat \ No newline at end of file diff --git a/kuka_server/kuka_server/wait_for_message.py b/kuka_server/kuka_server/wait_for_message.py new file mode 100644 index 00000000..226c5d86 --- /dev/null +++ b/kuka_server/kuka_server/wait_for_message.py @@ -0,0 +1,71 @@ +# Copyright 2022 Sony Group Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union + +from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy +from rclpy.node import Node +from rclpy.qos import QoSProfile +from rclpy.signals import SignalHandlerGuardCondition +from rclpy.utilities import timeout_sec_to_nsec + + +def wait_for_message( + msg_type, + node: 'Node', + topic: str, + *, + qos_profile: Union[QoSProfile, int] = 1, + time_to_wait=-1 +): + """ + Wait for the next incoming message. + + :param msg_type: message type + :param node: node to initialize the subscription on + :param topic: topic name to wait for message + :param qos_profile: QoS profile to use for the subscription + :param time_to_wait: seconds to wait before returning + :returns: (True, msg) if a message was successfully received, (False, None) if message + could not be obtained or shutdown was triggered asynchronously on the context. + """ + context = node.context + wait_set = _rclpy.WaitSet(1, 1, 0, 0, 0, 0, context.handle) + wait_set.clear_entities() + + sub = node.create_subscription(msg_type, topic, lambda _: None, qos_profile=qos_profile) + try: + wait_set.add_subscription(sub.handle) + sigint_gc = SignalHandlerGuardCondition(context=context) + wait_set.add_guard_condition(sigint_gc.handle) + + timeout_nsec = timeout_sec_to_nsec(time_to_wait) + wait_set.wait(timeout_nsec) + + subs_ready = wait_set.get_ready_entities('subscription') + guards_ready = wait_set.get_ready_entities('guard_condition') + + if guards_ready: + if sigint_gc.handle.pointer in guards_ready: + return False, None + + if subs_ready: + if sub.handle.pointer in subs_ready: + msg_info = sub.handle.take_message(sub.msg_type, sub.raw) + if msg_info is not None: + return True, msg_info[0] + finally: + node.destroy_subscription(sub) + + return False, None \ No newline at end of file diff --git a/kuka_server/package.xml b/kuka_server/package.xml new file mode 100755 index 00000000..28ebaa99 --- /dev/null +++ b/kuka_server/package.xml @@ -0,0 +1,21 @@ + + + + kuka_server + 0.0.0 + TODO: Package description + root + TODO: License declaration + + serl_robot_infra + moveit + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/kuka_server/resource/kuka_server b/kuka_server/resource/kuka_server new file mode 100755 index 00000000..e69de29b diff --git a/kuka_server/setup.cfg b/kuka_server/setup.cfg new file mode 100755 index 00000000..a253a0f2 --- /dev/null +++ b/kuka_server/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/kuka_server +[install] +install_scripts=$base/lib/kuka_server diff --git a/kuka_server/setup.py b/kuka_server/setup.py new file mode 100755 index 00000000..3aa55290 --- /dev/null +++ b/kuka_server/setup.py @@ -0,0 +1,25 @@ +from setuptools import find_packages, setup + +package_name = 'kuka_server' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='root', + maintainer_email='root@todo.todo', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': ['robot_server = kuka_server.robot_interface:main' + ], + }, +) diff --git a/kuka_server/test/test_copyright.py b/kuka_server/test/test_copyright.py new file mode 100755 index 00000000..97a39196 --- /dev/null +++ b/kuka_server/test/test_copyright.py @@ -0,0 +1,25 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +# Remove the `skip` decorator once the source file(s) have a copyright header +@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found errors' diff --git a/kuka_server/test/test_flake8.py b/kuka_server/test/test_flake8.py new file mode 100755 index 00000000..27ee1078 --- /dev/null +++ b/kuka_server/test/test_flake8.py @@ -0,0 +1,25 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_flake8.main import main_with_errors +import pytest + + +@pytest.mark.flake8 +@pytest.mark.linter +def test_flake8(): + rc, errors = main_with_errors(argv=[]) + assert rc == 0, \ + 'Found %d code style errors / warnings:\n' % len(errors) + \ + '\n'.join(errors) diff --git a/kuka_server/test/test_pep257.py b/kuka_server/test/test_pep257.py new file mode 100755 index 00000000..b234a384 --- /dev/null +++ b/kuka_server/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found code style errors / warnings' diff --git a/lbr_fri_idl b/lbr_fri_idl new file mode 160000 index 00000000..e422bff2 --- /dev/null +++ b/lbr_fri_idl @@ -0,0 +1 @@ +Subproject commit e422bff2b3d70ec47853145b35645980df623a60 diff --git a/lbr_fri_ros2_stack b/lbr_fri_ros2_stack new file mode 160000 index 00000000..218d723f --- /dev/null +++ b/lbr_fri_ros2_stack @@ -0,0 +1 @@ +Subproject commit 218d723f817c61314988833611effddc5333ca6c diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py new file mode 100644 index 00000000..d8005325 --- /dev/null +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -0,0 +1,433 @@ +"""Gym Interface for Franka""" +import numpy as np +import gym +import cv2 +import copy +from scipy.spatial.transform import Rotation +import time +import requests +import queue +import threading +from datetime import datetime +from collections import OrderedDict +from typing import Dict + +from franka_env.camera.video_capture import VideoCapture +from franka_env.camera.rs_capture import RSCapture +from franka_env.utils.rotations import euler_2_quat, quat_2_euler + +# from kuka_server.robot_interface import RobotInterfaceNode +from kuka_server.kuka_server.robot_interface import RobotInterfaceNode + + +class ImageDisplayer(threading.Thread): + def __init__(self, queue): + threading.Thread.__init__(self) + self.queue = queue + self.daemon = True # make this a daemon thread + + def run(self): + while True: + img_array = self.queue.get() # retrieve an image from the queue + if img_array is None: # None is our signal to exit + break + + frame = np.concatenate( + [v for k, v in img_array.items() if "full" not in k], axis=0 + ) + + cv2.imshow("RealSense Cameras", frame) + cv2.waitKey(1) + + +############################################################################## + + +class DefaultEnvConfig: + """Default configuration for KukaEnv. Fill in the values below.""" + + ROBOT_IP: str = "192.168.10.122" + REALSENSE_CAMERAS: Dict = { + "wrist_1": "130322274175", + "wrist_2": "127122270572", + } + TARGET_POSE: np.ndarray = np.zeros((6,)) + REWARD_THRESHOLD: np.ndarray = np.zeros((6,)) + ACTION_SCALE = np.zeros((3,)) + RESET_POSE = np.zeros((6,)) + RANDOM_RESET = (False,) + RANDOM_XY_RANGE = (0.0,) + RANDOM_RZ_RANGE = (0.0,) + ABS_POSE_LIMIT_HIGH = np.zeros((6,)) + ABS_POSE_LIMIT_LOW = np.zeros((6,)) + COMPLIANCE_PARAM: Dict[str, float] = {} + PRECISION_PARAM: Dict[str, float] = {} + BINARY_GRIPPER_THREASHOLD: float = 0.5 + APPLY_GRIPPER_PENALTY: bool = True + GRIPPER_PENALTY: float = 0.1 + USE_GRIPPER: bool = False + + +############################################################################## + + +class KukaEnv(gym.Env): + def __init__( + self, + hz=10, + fake_env=False, + save_video=False, + config: DefaultEnvConfig = None, + max_episode_length=100, + ): + + self.robot_interface_node = RobotInterfaceNode() + self.action_scale = config.ACTION_SCALE + self._TARGET_POSE = config.TARGET_POSE + self._REWARD_THRESHOLD = config.REWARD_THRESHOLD + self.url = config.ROBOT_IP + self.config = config + self.max_episode_length = max_episode_length + + # convert last 3 elements from euler to quat, from size (6,) to (7,) + self.resetpos = np.concatenate( + [config.RESET_POSE[:3], euler_2_quat(config.RESET_POSE[3:])] + ) + + self.currpos = self.resetpos.copy() + self.currvel = np.zeros((6,)) + self.q = np.zeros((7,)) + self.dq = np.zeros((7,)) + self.currforce = np.zeros((3,)) + self.currtorque = np.zeros((3,)) + self.currjacobian = np.zeros((6, 7)) + + self.curr_gripper_pos = 0 + self.gripper_binary_state = 0 # 0 for open, 1 for closed + self.lastsent = time.time() + self.randomreset = config.RANDOM_RESET + self.random_xy_range = config.RANDOM_XY_RANGE + self.random_rz_range = config.RANDOM_RZ_RANGE + self.hz = hz + self.joint_reset_cycle = 200 # reset the robot joint every 200 cycles + + if save_video: + print("Saving videos!") + self.save_video = save_video + self.recording_frames = [] + + # boundary box + self.xyz_bounding_box = gym.spaces.Box( + config.ABS_POSE_LIMIT_LOW[:3], + config.ABS_POSE_LIMIT_HIGH[:3], + dtype=np.float64, + ) + self.rpy_bounding_box = gym.spaces.Box( + config.ABS_POSE_LIMIT_LOW[3:], + config.ABS_POSE_LIMIT_HIGH[3:], + dtype=np.float64, + ) + # Action/Observation Space + self.action_space = gym.spaces.Box( + np.ones((7,), dtype=np.float32) * -1, + np.ones((7,), dtype=np.float32), + ) + + self.observation_space = gym.spaces.Dict( + { + "state": gym.spaces.Dict( + { + "tcp_pose": gym.spaces.Box( + -np.inf, np.inf, shape=(7,) + ), # xyz + quat + "tcp_vel": gym.spaces.Box(-np.inf, np.inf, shape=(6,)), + "tcp_force": gym.spaces.Box(-np.inf, np.inf, shape=(3,)), + "tcp_torque": gym.spaces.Box(-np.inf, np.inf, shape=(3,)), + } + ), + "images": gym.spaces.Dict( + { + "wrist_1": gym.spaces.Box( + 0, 255, shape=(128, 128, 3), dtype=np.uint8 + ), + "wrist_2": gym.spaces.Box( + 0, 255, shape=(128, 128, 3), dtype=np.uint8 + ), + } + ), + } + ) + self.cycle_count = 0 + + if fake_env: + return + + self.cap = None + self.init_cameras(config.REALSENSE_CAMERAS) + self.img_queue = queue.Queue() + self.displayer = ImageDisplayer(self.img_queue) + self.displayer.start() + self.use_gripper = config.USE_GRIPPER + print("Initialized Kuka") + + def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: + """Clip the pose to be within the safety box.""" + pose[:3] = np.clip( + pose[:3], self.xyz_bounding_box.low, self.xyz_bounding_box.high + ) + euler = Rotation.from_quat(pose[3:]).as_euler("xyz") + + # Clip first euler angle separately due to discontinuity from pi to -pi + sign = np.sign(euler[0]) + euler[0] = sign * ( + np.clip( + np.abs(euler[0]), + self.rpy_bounding_box.low[0], + self.rpy_bounding_box.high[0], + ) + ) + + euler[1:] = np.clip( + euler[1:], self.rpy_bounding_box.low[1:], self.rpy_bounding_box.high[1:] + ) + pose[3:] = Rotation.from_euler("xyz", euler).as_quat() + + return pose + + def step(self, action: np.ndarray) -> tuple: + """standard gym step function.""" + start_time = time.time() + action = np.clip(action, self.action_space.low, self.action_space.high) + xyz_delta = action[:3] + + self.nextpos = self.currpos.copy() + self.nextpos[:3] = self.nextpos[:3] + xyz_delta * self.action_scale[0] + + # GET ORIENTATION FROM ACTION + self.nextpos[3:] = ( + Rotation.from_euler("xyz", action[3:6] * self.action_scale[1]) + * Rotation.from_quat(self.currpos[3:]) + ).as_quat() + + ##Remove gripper action NOTE: Omey + if(self.use_gripper): + gripper_action = action[6] * self.action_scale[2] + gripper_action_effective = self._send_gripper_command(gripper_action) + + self._send_pos_command(self.clip_safety_box(self.nextpos)) + + self.curr_path_length += 1 + dt = time.time() - start_time + time.sleep(max(0, (1.0 / self.hz) - dt)) + + self._update_currpos() + ob = self._get_obs() + if(self.use_gripper): + reward = self.compute_reward(ob, gripper_action_effective) + else: + reward = self.compute_reward(ob) + + done = self.curr_path_length >= self.max_episode_length or reward == 1 + return ob, reward, done, False, {} + + def compute_reward(self, obs, gripper_action_effective = None) -> bool: + """We are using a sparse reward function.""" + current_pose = obs["state"]["tcp_pose"] + # convert from quat to euler first + euler_angles = quat_2_euler(current_pose[3:]) + euler_angles = np.abs(euler_angles) + current_pose = np.hstack([current_pose[:3], euler_angles]) + delta = np.abs(current_pose - self._TARGET_POSE) + if np.all(delta < self._REWARD_THRESHOLD): + reward = 1 + else: + # print(f'Goal not reached, the difference is {delta}, the desired threshold is {_REWARD_THRESHOLD}') + reward = 0 + + if self.config.APPLY_GRIPPER_PENALTY and gripper_action_effective: + reward -= self.config.GRIPPER_PENALTY + + return reward + + def crop_image(self, name, image) -> np.ndarray: + """Crop realsense images to be a square.""" + if name == "wrist_1": + return image[:, 80:560, :] + elif name == "wrist_2": + return image[:, 80:560, :] + else: + return ValueError(f"Camera {name} not recognized in cropping") + + def get_im(self) -> Dict[str, np.ndarray]: + """Get images from the realsense cameras.""" + images = {} + display_images = {} + for key, cap in self.cap.items(): + try: + rgb = cap.read() + cropped_rgb = self.crop_image(key, rgb) + resized = cv2.resize( + cropped_rgb, self.observation_space["images"][key].shape[:2][::-1] + ) + images[key] = resized[..., ::-1] + display_images[key] = resized + display_images[key + "_full"] = cropped_rgb + except queue.Empty: + input( + f"{key} camera frozen. Check connect, then press enter to relaunch..." + ) + cap.close() + self.init_cameras(self.config.REALSENSE_CAMERAS) + return self.get_im() + + self.recording_frames.append( + np.concatenate([display_images[f"{k}_full"] for k in self.cap], axis=0) + ) + self.img_queue.put(display_images) + return images + + def interpolate_move(self, goal: np.ndarray, timeout: float): + """Move the robot to the goal position with linear interpolation.""" + steps = int(timeout * self.hz) + self._update_currpos() + path = np.linspace(self.currpos, goal, steps) + for p in path: + self._send_pos_command(p) + time.sleep(1 / self.hz) + self._update_currpos() + + def go_to_rest(self, joint_reset=False): + """ + The concrete steps to perform reset should be + implemented each subclass for the specific task. + Should override this method if custom reset procedure is needed. + """ + # Change to precision mode for reset + # requests.post(self.url + "update_param", json=self.config.PRECISION_PARAM) + time.sleep(0.5) + + reset_pose = self.resetpos.copy() + self.interpolate_move(reset_pose, timeout=1.5) + + # Change to compliance mode + # requests.post(self.url + "update_param", json=self.config.COMPLIANCE_PARAM) + + def reset(self, joint_reset=False, **kwargs): + # requests.post(self.url + "update_param", json=self.config.COMPLIANCE_PARAM) + + if self.save_video: + self.save_video_recording() + + self.cycle_count += 1 + if self.cycle_count % self.joint_reset_cycle == 0: + self.cycle_count = 0 + joint_reset = True + + self.go_to_rest(joint_reset=joint_reset) + self._recover() + self.curr_path_length = 0 + + self._update_currpos() + obs = self._get_obs() + + return obs, {} + + def save_video_recording(self): + try: + if len(self.recording_frames): + video_writer = cv2.VideoWriter( + f'./videos/{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.mp4', + cv2.VideoWriter_fourcc(*"mp4v"), + 10, + self.recording_frames[0].shape[:2][::-1], + ) + for frame in self.recording_frames: + video_writer.write(frame) + video_writer.release() + self.recording_frames.clear() + except Exception as e: + print(f"Failed to save video: {e}") + + def init_cameras(self, name_serial_dict=None): + """Init both wrist cameras.""" + if self.cap is not None: # close cameras if they are already open + self.close_cameras() + + self.cap = OrderedDict() + for cam_name, cam_serial in name_serial_dict.items(): + cap = VideoCapture( + RSCapture(name=cam_name, serial_number=cam_serial, depth=False) + ) + self.cap[cam_name] = cap + + def close_cameras(self): + """Close both wrist cameras.""" + try: + for cap in self.cap.values(): + cap.close() + except Exception as e: + print(f"Failed to close cameras: {e}") + + def _recover(self): + """Internal function to recover the robot from error state.""" + print("Implement a function to recover from error state") + return + + def _send_pos_command(self, pos: np.ndarray): + """Internal function to send position command to the robot.""" + self._recover() + arr = np.array(pos).astype(np.float32) + self.robot_interface_node.move_to_pose(arr) + print("Done moving the robot") + + def _send_gripper_command(self, pos: float, mode="binary"): + """Internal function to send gripper command to the robot.""" + if mode == "binary": + if ( + pos <= -self.config.BINARY_GRIPPER_THREASHOLD + and self.gripper_binary_state == 0 + ): # close gripper + requests.post(self.url + "close_gripper") + time.sleep(0.6) + self.gripper_binary_state = 1 + return True + elif ( + pos >= self.config.BINARY_GRIPPER_THREASHOLD + and self.gripper_binary_state == 1 + ): # open gripper + requests.post(self.url + "open_gripper") + time.sleep(0.6) + self.gripper_binary_state = 0 + return True + else: # do nothing to the gripper + return False + elif mode == "continuous": + raise NotImplementedError("Continuous gripper control is optional") + + def _update_currpos(self): + """ + Internal function to get the latest state of the robot and its gripper. + """ + ps = self.robot_interface_node.get_current_state() + self.currpos[:] = np.array(ps["pose"]) + self.currvel[:] = np.array(ps["vel"]) + + self.currforce[:] = np.array(ps["force"]) + self.currtorque[:] = np.array(ps["torque"]) + self.currjacobian[:] = np.reshape(np.array(ps["jacobian"]), (6, 7)) + + self.q[:] = np.array(ps["q"]) + self.dq[:] = np.array(ps["dq"]) + + if(self.use_gripper): + self.curr_gripper_pos = np.array(ps["gripper_pos"]) + + def _get_obs(self) -> dict: + images = self.get_im() + state_observation = { + "tcp_pose": self.currpos, + "tcp_vel": self.currvel, + "tcp_force": self.currforce, + "tcp_torque": self.currtorque, + } + return copy.deepcopy(dict(images=images, state=state_observation)) diff --git a/serl_robot_infra/kuka_env/envs/peg_env/__init__.py b/serl_robot_infra/kuka_env/envs/peg_env/__init__.py new file mode 100644 index 00000000..fdb1bb44 --- /dev/null +++ b/serl_robot_infra/kuka_env/envs/peg_env/__init__.py @@ -0,0 +1 @@ +from kuka_env.envs.peg_env.kuka_peg_insert import KukaPegInsert diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py new file mode 100644 index 00000000..1226e6a0 --- /dev/null +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -0,0 +1,89 @@ +import numpy as np +from franka_env.envs.franka_env import DefaultEnvConfig + + +class PegEnvConfig(DefaultEnvConfig): + """Set the configuration for FrankaEnv.""" + + ROBOT_IP: str = "192.168.10.122" + REALSENSE_CAMERAS = { + "wrist_1": "130322274175", + "wrist_2": "127122270572", + } + TARGET_POSE = np.array( + [ + 0.5906439143742067, + 0.07771711953459341, + 0.0937835826958042, + 3.1099675, + 0.0146619, + -0.0078615, + ] + ) + RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.1, 0.0, 0.0, 0.0]) + REWARD_THRESHOLD: np.ndarray = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) + APPLY_GRIPPER_PENALTY = False + ACTION_SCALE = np.array([0.02, 0.1, 1]) + RANDOM_RESET = True + RANDOM_XY_RANGE = 0.05 + RANDOM_RZ_RANGE = np.pi / 6 + ABS_POSE_LIMIT_LOW = np.array( + [ + TARGET_POSE[0] - RANDOM_XY_RANGE, + TARGET_POSE[1] - RANDOM_XY_RANGE, + TARGET_POSE[2], + TARGET_POSE[3] - 0.01, + TARGET_POSE[4] - 0.01, + TARGET_POSE[5] - RANDOM_RZ_RANGE, + ] + ) + ABS_POSE_LIMIT_HIGH = np.array( + [ + TARGET_POSE[0] + RANDOM_XY_RANGE, + TARGET_POSE[1] + RANDOM_XY_RANGE, + TARGET_POSE[2] + 0.1, + TARGET_POSE[3] + 0.01, + TARGET_POSE[4] + 0.01, + TARGET_POSE[5] + RANDOM_RZ_RANGE, + ] + ) + COMPLIANCE_PARAM = { + "translational_stiffness": 2000, + "translational_damping": 0.89, + "rotational_stiffness": 150, + "rotational_damping": 0.7, + "translational_Ki": 0, + "translational_clip_x": 0.003, + "translational_clip_y": 0.003, + "translational_clip_z": 0.01, + "translational_clip_neg_x": 0.003, + "translational_clip_neg_y": 0.003, + "translational_clip_neg_z": 0.01, + "rotational_clip_x": 0.02, + "rotational_clip_y": 0.02, + "rotational_clip_z": 0.02, + "rotational_clip_neg_x": 0.02, + "rotational_clip_neg_y": 0.02, + "rotational_clip_neg_z": 0.02, + "rotational_Ki": 0, + } + PRECISION_PARAM = { + "translational_stiffness": 3000, + "translational_damping": 0.89, + "rotational_stiffness": 300, + "rotational_damping": 0.9, + "translational_Ki": 0.1, + "translational_clip_x": 0.01, + "translational_clip_y": 0.01, + "translational_clip_z": 0.01, + "translational_clip_neg_x": 0.01, + "translational_clip_neg_y": 0.01, + "translational_clip_neg_z": 0.01, + "rotational_clip_x": 0.05, + "rotational_clip_y": 0.05, + "rotational_clip_z": 0.05, + "rotational_clip_neg_x": 0.05, + "rotational_clip_neg_y": 0.05, + "rotational_clip_neg_z": 0.05, + "rotational_Ki": 0.1, + } diff --git a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py new file mode 100644 index 00000000..8eb983b4 --- /dev/null +++ b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py @@ -0,0 +1,33 @@ +import numpy as np +import gym +import time +import requests +import copy + +from kuka_env.envs.kuka_env import KukaEnv +from franka_env.utils.rotations import euler_2_quat +from franka_env.envs.peg_env.config import PegEnvConfig + + +class KukaPegInsert(KukaEnv): + def __init__(self, **kwargs): + super().__init__(**kwargs, config=PegEnvConfig) + + def go_to_rest(self, joint_reset=False): + """ + Move to the rest position defined in base class. + Add a small z offset before going to rest to avoid collision with object. + """ + # self._send_gripper_command(-1) + self._update_currpos() + self._send_pos_command(self.currpos) + time.sleep(0.5) + + # Move up to clear the slot + self._update_currpos() + reset_pose = copy.deepcopy(self.currpos) + reset_pose[2] += 0.10 + self.interpolate_move(reset_pose, timeout=1) + + # execute the go_to_rest method from the parent class + super().go_to_rest(joint_reset) From 1a8ef43c8317a57d8f448c3142693219ec6dcd46 Mon Sep 17 00:00:00 2001 From: omeym Date: Wed, 6 Nov 2024 11:53:01 -0800 Subject: [PATCH 02/24] Made changes in SERL for kuka execution --- .../async_drq_randomized.py | 67 ++++++----- examples/async_peg_insert_drq/run_learner.sh | 4 +- kuka_server/kuka_server/robot_interface.py | 71 ++++++------ lbr_fri_ros2_stack | 2 +- serl_robot_infra/kuka_env/__init__.py | 14 +++ serl_robot_infra/kuka_env/envs/kuka_env.py | 22 +++- .../kuka_env/envs/peg_env/config.py | 2 +- .../kuka_env/envs/peg_env/kuka_peg_insert.py | 2 +- .../kuka_env/envs/relative_env.py | 109 ++++++++++++++++++ 9 files changed, 215 insertions(+), 78 deletions(-) create mode 100644 serl_robot_infra/kuka_env/__init__.py create mode 100644 serl_robot_infra/kuka_env/envs/relative_env.py diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 4fd76f08..66b4f1d2 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 - +import sys +sys.path.append("/home/aero/anaconda3/envs/serl/lib/python3.10/site-packages") import time from functools import partial import jax @@ -28,14 +29,13 @@ ) from serl_launcher.data.data_store import MemoryEfficientReplayBufferDataStore from serl_launcher.wrappers.serl_obs_wrappers import SERLObsWrapper -from franka_env.envs.relative_env import RelativeFrame +from kuka_env.envs.relative_env import RelativeFrame from franka_env.envs.wrappers import ( - GripperCloseEnv, - SpacemouseIntervention, Quat2EulerWrapper, ) import franka_env +import kuka_env FLAGS = flags.FLAGS @@ -210,7 +210,7 @@ def update_params(params): ############################################################################## -def learner(rng, agent: DrQAgent, replay_buffer, demo_buffer): +def learner(rng, agent: DrQAgent, replay_buffer): """ The learner loop, which runs when "--learner" is set to True. """ @@ -262,13 +262,13 @@ def stats_callback(type: str, payload: dict) -> dict: }, device=sharding.replicate(), ) - demo_iterator = demo_buffer.get_iterator( - sample_args={ - "batch_size": FLAGS.batch_size // 2, - "pack_obs_and_next_obs": True, - }, - device=sharding.replicate(), - ) + # demo_iterator = demo_buffer.get_iterator( + # sample_args={ + # "batch_size": FLAGS.batch_size // 2, + # "pack_obs_and_next_obs": True, + # }, + # device=sharding.replicate(), + # ) # wait till the replay buffer is filled with enough data timer = Timer() @@ -278,8 +278,8 @@ def stats_callback(type: str, payload: dict) -> dict: for critic_step in range(FLAGS.critic_actor_ratio - 1): with timer.context("sample_replay_buffer"): batch = next(replay_iterator) - demo_batch = next(demo_iterator) - batch = concat_batches(batch, demo_batch, axis=0) + # demo_batch = next(demo_iterator) + # batch = concat_batches(batch, demo_batch, axis=0) with timer.context("train_critics"): agent, critics_info = agent.update_critics( @@ -288,8 +288,8 @@ def stats_callback(type: str, payload: dict) -> dict: with timer.context("train"): batch = next(replay_iterator) - demo_batch = next(demo_iterator) - batch = concat_batches(batch, demo_batch, axis=0) + # demo_batch = next(demo_iterator) + # batch = concat_batches(batch, demo_batch, axis=0) agent, update_info = agent.update_high_utd(batch, utd_ratio=1) # publish the updated network @@ -324,9 +324,9 @@ def main(_): fake_env=FLAGS.learner, save_video=FLAGS.eval_checkpoint_step, ) - env = GripperCloseEnv(env) - if FLAGS.actor: - env = SpacemouseIntervention(env) + # env = GripperCloseEnv(env) + # if FLAGS.actor: + # env = SpacemouseIntervention(env) env = RelativeFrame(env) env = Quat2EulerWrapper(env) env = SERLObsWrapper(env) @@ -358,27 +358,26 @@ def main(_): capacity=FLAGS.replay_buffer_capacity, image_keys=image_keys, ) - demo_buffer = MemoryEfficientReplayBufferDataStore( - env.observation_space, - env.action_space, - capacity=10000, - image_keys=image_keys, - ) - import pickle as pkl - - with open(FLAGS.demo_path, "rb") as f: - trajs = pkl.load(f) - for traj in trajs: - demo_buffer.insert(traj) - print(f"demo buffer size: {len(demo_buffer)}") + # demo_buffer = MemoryEfficientReplayBufferDataStore( + # env.observation_space, + # env.action_space, + # capacity=10000, + # image_keys=image_keys, + # ) + # import pickle as pkl + + # with open(FLAGS.demo_path, "rb") as f: + # trajs = pkl.load(f) + # for traj in trajs: + # demo_buffer.insert(traj) + # print(f"demo buffer size: {len(demo_buffer)}") # learner loop print_green("starting learner loop") learner( sampling_rng, agent, - replay_buffer, - demo_buffer=demo_buffer, + replay_buffer ) elif FLAGS.actor: diff --git a/examples/async_peg_insert_drq/run_learner.sh b/examples/async_peg_insert_drq/run_learner.sh index c2823a19..a1516d13 100644 --- a/examples/async_peg_insert_drq/run_learner.sh +++ b/examples/async_peg_insert_drq/run_learner.sh @@ -1,8 +1,8 @@ export XLA_PYTHON_CLIENT_PREALLOCATE=false && \ export XLA_PYTHON_CLIENT_MEM_FRACTION=.2 && \ -python async_drq_randomized.py "$@" \ +python3 async_drq_randomized.py "$@" \ --learner \ - --env FrankaPegInsert-Vision-v0 \ + --env KukaPegInsert-Vision-v0 \ --exp_name=serl_dev_drq_rlpd10demos_peg_insert_random_resnet_097 \ --seed 0 \ --random_steps 1000 \ diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index 4b227571..147837fb 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -27,6 +27,7 @@ from moveit_msgs.action import ExecuteTrajectory from kuka_server.wait_for_message import wait_for_message + from kuka_server.utils import quat_to_euler, convert_wrench_to_numpy, euler_to_quat from lbr_fri_idl.msg import LBRState import copy @@ -46,8 +47,8 @@ class RobotInterfaceNode(Node): fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" WRENCH_TOPIC = "/lbr/force_torque_broadcaster/wrench" - base_ = "lbr_link_0" - end_effector_ = "lbr_link_ee" + base_ = "link_0" ###Changed for compatibility with latest lbr stack + end_effector_ = "link_ee" def __init__(self) -> None: super().__init__("robot_interface_node", namespace=self.namespace_) @@ -137,7 +138,7 @@ def get_fk(self) -> Pose | None: current_robot_state.joint_state = current_joint_state request = GetPositionFK.Request() - + self.get_logger().info(f"{self.namespace_}/{self.base_}") request.header.frame_id = f"{self.namespace_}/{self.base_}" request.header.stamp = self.get_clock().now().to_msg() @@ -215,11 +216,11 @@ def get_current_state(self): ##TCP pose current_ee_geom_pose, current_joint_state = self.get_fk() - self.robot_state["pose"][:3] = np.array(current_ee_geom_pose.position.x,current_ee_geom_pose.position.y,current_ee_geom_pose.position.z) - euler_angles = quat_to_euler( np.array(current_ee_geom_pose.orientation.x, + self.robot_state["pose"][:3] = np.array([current_ee_geom_pose.position.x,current_ee_geom_pose.position.y,current_ee_geom_pose.position.z]) + euler_angles = quat_to_euler( np.array([current_ee_geom_pose.orientation.x, current_ee_geom_pose.orientation.y, current_ee_geom_pose.orientation.z, - current_ee_geom_pose.orientation.w)) + current_ee_geom_pose.orientation.w])) self.robot_state["pose"][3:] = euler_angles ##Getting Joint Angles @@ -427,14 +428,14 @@ def main(args=None): executor.add_node(robot_interface_node) robot_interface_node.get_logger().info("Robot interface node started.") - target_poses = [] - for i in range(3): - target_poses.append( - Pose( - position=Point(x=0.6, y=-0.1 + 0.1 * i, z=0.6), - orientation=Quaternion(x=0.0, y=-1.0, z=0.0, w=0.0), - ) - ) + # target_poses = [] + # for i in range(3): + # target_poses.append( + # Pose( + # position=Point(x=0.5, y=-0.1 + 0.1 * i, z=0.6), + # orientation=Quaternion(x=0.0, y=-1.0, z=0.0, w=0.0), + # ) + # ) # traj = robot_interface_node.get_motion_plan(target_poses[1]) # if traj: @@ -462,36 +463,38 @@ def main(args=None): # robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0])) - for target_pose in target_poses: - traj = robot_interface_node.get_motion_plan(target_pose, True) - if traj: - client = robot_interface_node.get_motion_execute_client() - goal = ExecuteTrajectory.Goal() - goal.trajectory = traj + # for target_pose in target_poses: + # traj = robot_interface_node.get_motion_plan(target_pose, True) + # if traj: + # client = robot_interface_node.get_motion_execute_client() + # goal = ExecuteTrajectory.Goal() + # goal.trajectory = traj - future = client.send_goal_async(goal) - rclpy.spin_until_future_complete(robot_interface_node, future) + # future = client.send_goal_async(goal) + # rclpy.spin_until_future_complete(robot_interface_node, future) - goal_handle = future.result() - if not goal_handle.accepted: - robot_interface_node.get_logger().error("Failed to execute trajectory") - else: - robot_interface_node.get_logger().info("Trajectory accepted") + # goal_handle = future.result() + # if not goal_handle.accepted: + # robot_interface_node.get_logger().error("Failed to execute trajectory") + # else: + # robot_interface_node.get_logger().info("Trajectory accepted") - result_future = goal_handle.get_result_async() + # result_future = goal_handle.get_result_async() - expect_duration = traj.joint_trajectory.points[-1].time_from_start - expect_time = time.time() + 2 * expect_duration.sec - while not result_future.done() and time.time() < expect_time: - time.sleep(0.01) + # expect_duration = traj.joint_trajectory.points[-1].time_from_start + # expect_time = time.time() + 2 * expect_duration.sec + # while not result_future.done() and time.time() < expect_time: + # time.sleep(0.01) - robot_interface_node.get_logger().info("Trajectory executed") + # robot_interface_node.get_logger().info("Trajectory executed") - robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0]) ) + # robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0]) ) # rclpy.spin(robot_interface_node) + robot_interface_node.get_current_state() + print("Current State: ", robot_interface_node.robot_state) executor.spin() rclpy.shutdown() diff --git a/lbr_fri_ros2_stack b/lbr_fri_ros2_stack index 218d723f..cca24ae8 160000 --- a/lbr_fri_ros2_stack +++ b/lbr_fri_ros2_stack @@ -1 +1 @@ -Subproject commit 218d723f817c61314988833611effddc5333ca6c +Subproject commit cca24ae8a17d5644a414380d7ac420d37aec6686 diff --git a/serl_robot_infra/kuka_env/__init__.py b/serl_robot_infra/kuka_env/__init__.py new file mode 100644 index 00000000..886837a3 --- /dev/null +++ b/serl_robot_infra/kuka_env/__init__.py @@ -0,0 +1,14 @@ +from gym.envs.registration import register +import numpy as np + +register( + id="KukaEnv-Vision-v0", + entry_point="kuka_env.envs:FrankaEnv", + max_episode_steps=100, +) + +register( + id="KukaPegInsert-Vision-v0", + entry_point="kuka_env.envs.peg_env:KukaPegInsert", + max_episode_steps=100, +) diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index d8005325..c4810a49 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -1,4 +1,7 @@ """Gym Interface for Franka""" +import sys +sys.path.append("/home/aero/anaconda3/envs/serl/lib/python3.10/site-packages") + import numpy as np import gym import cv2 @@ -11,13 +14,14 @@ from datetime import datetime from collections import OrderedDict from typing import Dict +sys.path.append("/home/aero/omey_ws/serl-rros/src/") +from serl_robot_infra.franka_env.camera.video_capture import VideoCapture +from serl_robot_infra.franka_env.camera.rs_capture import RSCapture +from serl_robot_infra.franka_env.utils.rotations import euler_2_quat, quat_2_euler -from franka_env.camera.video_capture import VideoCapture -from franka_env.camera.rs_capture import RSCapture -from franka_env.utils.rotations import euler_2_quat, quat_2_euler -# from kuka_server.robot_interface import RobotInterfaceNode -from kuka_server.kuka_server.robot_interface import RobotInterfaceNode +# from kuka_server.kuka_server.robot_interface import RobotInterfaceNode +from kuka_server.robot_interface import RobotInterfaceNode class ImageDisplayer(threading.Thread): @@ -431,3 +435,11 @@ def _get_obs(self) -> dict: "tcp_torque": self.currtorque, } return copy.deepcopy(dict(images=images, state=state_observation)) + + +if __name__ == '__main__': + + env = gym.make("KukaEnv") + + + \ No newline at end of file diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index 1226e6a0..ce52b798 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -20,7 +20,7 @@ class PegEnvConfig(DefaultEnvConfig): -0.0078615, ] ) - RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.1, 0.0, 0.0, 0.0]) + RESET_POSE = TARGET_POSE + np.array([0.5, 0.1, 0.3, 0.0, 0.0, 0.0]) REWARD_THRESHOLD: np.ndarray = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) APPLY_GRIPPER_PENALTY = False ACTION_SCALE = np.array([0.02, 0.1, 1]) diff --git a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py index 8eb983b4..d4a3212a 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py @@ -6,7 +6,7 @@ from kuka_env.envs.kuka_env import KukaEnv from franka_env.utils.rotations import euler_2_quat -from franka_env.envs.peg_env.config import PegEnvConfig +from kuka_env.envs.peg_env.config import PegEnvConfig class KukaPegInsert(KukaEnv): diff --git a/serl_robot_infra/kuka_env/envs/relative_env.py b/serl_robot_infra/kuka_env/envs/relative_env.py new file mode 100644 index 00000000..7d48f526 --- /dev/null +++ b/serl_robot_infra/kuka_env/envs/relative_env.py @@ -0,0 +1,109 @@ +from scipy.spatial.transform import Rotation as R +import gym +import numpy as np +from gym import Env +from franka_env.utils.transformations import ( + construct_adjoint_matrix, + construct_homogeneous_matrix, +) + + +class RelativeFrame(gym.Wrapper): + """ + This wrapper transforms the observation and action to be expressed in the end-effector frame. + Optionally, it can transform the tcp_pose into a relative frame defined as the reset pose. + + This wrapper is expected to be used on top of the base Franka environment, which has the following + observation space: + { + "state": spaces.Dict( + { + "tcp_pose": spaces.Box(-np.inf, np.inf, shape=(7,)), # xyz + quat + ...... + } + ), + ...... + }, and at least 6 DoF action space with (x, y, z, rx, ry, rz, ...). + By convention, the 7th dimension of the action space is used for the gripper. + + """ + + def __init__(self, env: Env, include_relative_pose=True): + super().__init__(env) + self.adjoint_matrix = np.zeros((6, 6)) + + self.include_relative_pose = include_relative_pose + if self.include_relative_pose: + # Homogeneous transformation matrix from reset pose's relative frame to base frame + self.T_r_o_inv = np.zeros((4, 4)) + + def step(self, action: np.ndarray): + # action is assumed to be (x, y, z, rx, ry, rz, gripper) + # Transform action from end-effector frame to base frame + transformed_action = self.transform_action(action) + + obs, reward, done, truncated, info = self.env.step(transformed_action) + + # this is to convert the spacemouse intervention action + if "intervene_action" in info: + info["intervene_action"] = self.transform_action_inv( + info["intervene_action"] + ) + + # Update adjoint matrix + self.adjoint_matrix = construct_adjoint_matrix(obs["state"]["tcp_pose"]) + + # Transform observation to spatial frame + transformed_obs = self.transform_observation(obs) + return transformed_obs, reward, done, truncated, info + + def reset(self, **kwargs): + obs, info = self.env.reset(**kwargs) + + # Update adjoint matrix + self.adjoint_matrix = construct_adjoint_matrix(obs["state"]["tcp_pose"]) + if self.include_relative_pose: + # Update transformation matrix from the reset pose's relative frame to base frame + self.T_r_o_inv = np.linalg.inv( + construct_homogeneous_matrix(obs["state"]["tcp_pose"]) + ) + + # Transform observation to spatial frame + return self.transform_observation(obs), info + + def transform_observation(self, obs): + """ + Transform observations from spatial(base) frame into body(end-effector) frame + using the adjoint matrix + """ + adjoint_inv = np.linalg.inv(self.adjoint_matrix) + obs["state"]["tcp_vel"] = adjoint_inv @ obs["state"]["tcp_vel"] + + if self.include_relative_pose: + T_b_o = construct_homogeneous_matrix(obs["state"]["tcp_pose"]) + T_b_r = self.T_r_o_inv @ T_b_o + + # Reconstruct transformed tcp_pose vector + p_b_r = T_b_r[:3, 3] + theta_b_r = R.from_matrix(T_b_r[:3, :3]).as_quat() + obs["state"]["tcp_pose"] = np.concatenate((p_b_r, theta_b_r)) + + return obs + + def transform_action(self, action: np.ndarray): + """ + Transform action from body(end-effector) frame into into spatial(base) frame + using the adjoint matrix + """ + action = np.array(action) # in case action is a jax read-only array + action[:6] = self.adjoint_matrix @ action[:6] + return action + + def transform_action_inv(self, action: np.ndarray): + """ + Transform action from spatial(base) frame into body(end-effector) frame + using the adjoint matrix. + """ + action = np.array(action) + action[:6] = np.linalg.inv(self.adjoint_matrix) @ action[:6] + return action From 7aed98d0d815b5390c1036d2db213b8b4c972a2e Mon Sep 17 00:00:00 2001 From: rishabh shukla Date: Mon, 11 Nov 2024 18:10:33 -0800 Subject: [PATCH 03/24] Completed Testing and running the training loop --- .gitignore | 12 +++ .../async_drq_randomized.py | 63 ++++++++++++---- examples/async_peg_insert_drq/run_actor.sh | 4 +- kuka_server/kuka_server/robot_interface.py | 74 ++++++++++++++----- serl_robot_infra/kuka_env/envs/kuka_env.py | 27 ++++--- .../kuka_env/envs/peg_env/config.py | 9 ++- .../kuka_env/envs/relative_env.py | 2 +- 7 files changed, 140 insertions(+), 51 deletions(-) diff --git a/.gitignore b/.gitignore index 9d6232dd..59b5787e 100644 --- a/.gitignore +++ b/.gitignore @@ -172,3 +172,15 @@ MUJOCO_LOG.TXT _METADATA checkpoint wandb/ +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 66b4f1d2..65408615 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -1,6 +1,9 @@ #!/usr/bin/env python3 import sys -sys.path.append("/home/aero/anaconda3/envs/serl/lib/python3.10/site-packages") +sys.path.append("/home/cam/miniconda3/envs/serl-rros/lib/python3.10/site-packages") +import rclpy +import rclpy.duration +from rclpy.executors import MultiThreadedExecutor import time from functools import partial import jax @@ -27,18 +30,26 @@ make_trainer_config, make_wandb_logger, ) +from kuka_server.robot_interface import RobotInterfaceNode from serl_launcher.data.data_store import MemoryEfficientReplayBufferDataStore from serl_launcher.wrappers.serl_obs_wrappers import SERLObsWrapper -from kuka_env.envs.relative_env import RelativeFrame -from franka_env.envs.wrappers import ( +sys.path.append("/home/cam/omey_ws/serl-rros/src/") +from serl_robot_infra.kuka_env.envs.relative_env import RelativeFrame +from serl_robot_infra.franka_env.envs.wrappers import ( Quat2EulerWrapper, ) -import franka_env -import kuka_env +import serl_robot_infra.franka_env +import serl_robot_infra.kuka_env +# sys.argv = sys.argv[:1] -FLAGS = flags.FLAGS +# # `app.run` calls `sys.exit` +# try: +# app.run(lambda argv: None) +# except: +# pass +FLAGS = flags.FLAGS flags.DEFINE_string("env", "FrankaEnv-Vision-v0", "Name of environment.") flags.DEFINE_string("agent", "drq", "Name of agent.") flags.DEFINE_string("exp_name", None, "Name of the experiment for wandb logging.") @@ -47,6 +58,7 @@ flags.DEFINE_bool("save_model", False, "Whether to save model.") flags.DEFINE_integer("critic_actor_ratio", 4, "critic to actor update ratio.") +flags.DEFINE_integer("batch_size", 256, "Batch size for training the policy.") flags.DEFINE_integer("max_steps", 1000000, "Maximum number of training steps.") flags.DEFINE_integer("replay_buffer_capacity", 200000, "Replay buffer capacity.") @@ -58,7 +70,7 @@ flags.DEFINE_integer("eval_period", 2000, "Evaluation period.") # flag to indicate if this is a leaner or a actor -flags.DEFINE_boolean("learner", False, "Is this a learner or a trainer.") +flags.DEFINE_boolean("learner", True, "Is this a learner or a trainer.") flags.DEFINE_boolean("actor", False, "Is this a learner or a trainer.") flags.DEFINE_boolean("render", False, "Render the environment.") flags.DEFINE_string("ip", "localhost", "IP address of the learner.") @@ -215,11 +227,12 @@ def learner(rng, agent: DrQAgent, replay_buffer): The learner loop, which runs when "--learner" is set to True. """ # set up wandb and logging - wandb_logger = make_wandb_logger( - project="serl_dev", - description=FLAGS.exp_name or FLAGS.env, - debug=FLAGS.debug, - ) + wandb_logger = None + # wandb_logger = make_wandb_logger( + # project="serl_dev", + # description=FLAGS.exp_name or FLAGS.env, + # debug=FLAGS.debug, + # ) # To track the step in the training loop update_steps = 0 @@ -234,7 +247,7 @@ def stats_callback(type: str, payload: dict) -> dict: # Create server server = TrainerServer(make_trainer_config(), request_callback=stats_callback) server.register_data_store("actor_env", replay_buffer) - server.start(threaded=True) + server.start(threaded=False) # Loop to wait until replay_buffer is filled pbar = tqdm.tqdm( @@ -314,16 +327,26 @@ def stats_callback(type: str, payload: dict) -> dict: def main(_): + rclpy.init(args=_) + print("ROS2 initialized.") + robot_interface_node = RobotInterfaceNode() + executor = MultiThreadedExecutor() + executor.add_node(robot_interface_node) + robot_interface_node.get_logger().info("Robot interface node started.") + # executor.spin_once() assert FLAGS.batch_size % num_devices == 0 # seed rng = jax.random.PRNGKey(FLAGS.seed) - + print("Initializing environment") # create env and load dataset + print("Value of learner: ", FLAGS.learner) env = gym.make( FLAGS.env, fake_env=FLAGS.learner, save_video=FLAGS.eval_checkpoint_step, + robot_interface_node=robot_interface_node ) + print("Environment initialized") # env = GripperCloseEnv(env) # if FLAGS.actor: # env = SpacemouseIntervention(env) @@ -332,10 +355,12 @@ def main(_): env = SERLObsWrapper(env) env = ChunkingWrapper(env, obs_horizon=1, act_exec_horizon=None) env = RecordEpisodeStatistics(env) + print("Environment wrapped") image_keys = [key for key in env.observation_space.keys() if key != "state"] rng, sampling_rng = jax.random.split(rng) + print("Creating agent") agent: DrQAgent = make_drq_agent( seed=FLAGS.seed, sample_obs=env.observation_space.sample(), @@ -344,13 +369,16 @@ def main(_): encoder_type=FLAGS.encoder_type, ) + print("Agent created") # replicate agent across devices # need the jnp.array to avoid a bug where device_put doesn't recognize primitives + print("Replicating agent") agent: DrQAgent = jax.device_put( jax.tree_map(jnp.array, agent), sharding.replicate() ) if FLAGS.learner: + print("Learner code executed") sampling_rng = jax.device_put(sampling_rng, device=sharding.replicate()) replay_buffer = MemoryEfficientReplayBufferDataStore( env.observation_space, @@ -358,6 +386,7 @@ def main(_): capacity=FLAGS.replay_buffer_capacity, image_keys=image_keys, ) + print("Replay buffer initialized") # demo_buffer = MemoryEfficientReplayBufferDataStore( # env.observation_space, # env.action_space, @@ -381,6 +410,7 @@ def main(_): ) elif FLAGS.actor: + print("Initializing Actor Node") sampling_rng = jax.device_put(sampling_rng, sharding.replicate()) data_store = QueuedDataStore(2000) # the queue size on the actor @@ -390,6 +420,11 @@ def main(_): else: raise NotImplementedError("Must be either a learner or an actor") + + + + + rclpy.shutdown() if __name__ == "__main__": diff --git a/examples/async_peg_insert_drq/run_actor.sh b/examples/async_peg_insert_drq/run_actor.sh index a251e756..3850c8c0 100644 --- a/examples/async_peg_insert_drq/run_actor.sh +++ b/examples/async_peg_insert_drq/run_actor.sh @@ -1,9 +1,9 @@ export XLA_PYTHON_CLIENT_PREALLOCATE=false && \ export XLA_PYTHON_CLIENT_MEM_FRACTION=.1 && \ -python async_drq_randomized.py "$@" \ +python3 async_drq_randomized.py "$@" \ --actor \ --render \ - --env FrankaPegInsert-Vision-v0 \ + --env KukaPegInsert-Vision-v0 \ --exp_name=serl_dev_drq_rlpd10demos_peg_insert_random_resnet \ --seed 0 \ --random_steps 0 \ diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index 147837fb..b43171ed 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -6,10 +6,11 @@ from rclpy.action import ActionClient from rclpy.callback_groups import ReentrantCallbackGroup, MutuallyExclusiveCallbackGroup from rclpy.executors import MultiThreadedExecutor - +from rcl_interfaces.msg import ParameterValue +from rcl_interfaces.srv import GetParameters import numpy as np - from geometry_msgs.msg import Pose, Point, Quaternion +from std_msgs.msg import String import rclpy.task from sensor_msgs.msg import JointState from geometry_msgs.msg import WrenchStamped @@ -26,11 +27,12 @@ from moveit_msgs.srv import GetPositionIK, GetMotionPlan, GetPositionFK from moveit_msgs.action import ExecuteTrajectory from kuka_server.wait_for_message import wait_for_message - +from moveit_configs_utils.launches import generate_move_group_launch from kuka_server.utils import quat_to_euler, convert_wrench_to_numpy, euler_to_quat from lbr_fri_idl.msg import LBRState import copy +import optas class RobotInterfaceNode(Node): timeout_sec_ = 5.0 @@ -46,6 +48,7 @@ class RobotInterfaceNode(Node): execute_action_name_ = "execute_trajectory" fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" WRENCH_TOPIC = "/lbr/force_torque_broadcaster/wrench" + robot_desc_topic_ = "robot_description" base_ = "link_0" ###Changed for compatibility with latest lbr stack end_effector_ = "link_ee" @@ -67,6 +70,8 @@ def __init__(self) -> None: 10 ) + self._init_jacobian() + self.ik_client_ = self.create_client(GetPositionIK, self.ik_srv_name_, callback_group=self.ik_client_callback) if not self.ik_client_.wait_for_service(timeout_sec=self.timeout_sec_): self.get_logger().error("IK service not available.") @@ -126,8 +131,23 @@ def wrench_callback(self, msg): self.latest_wrench = convert_wrench_to_numpy(msg) # self.get_logger().info(f"Received wrench data: {self.latest_wrench}") return + + + def _init_jacobian(self): + print("Initializing Jacobian") + + self._robot_description = self._retrieve_parameter( + "robot_state_publisher/get_parameters", "robot_description" + ).string_value + self._robot = optas.RobotModel( + urdf_string=self._robot_description, time_derivs=[0, 1] + ) + self.jacobian_func = self._robot.get_link_geometric_jacobian_function( + link=self.end_effector_, base_link=self.base_, numpy_output=True + ) + return def get_fk(self) -> Pose | None: current_joint_state = self.get_joint_state() if current_joint_state is None: @@ -166,7 +186,7 @@ def get_fk_lbr(self, commanded:bool = False) -> Pose | None: joint_position = lbr_state.measured_joint_position.tolist() if commanded: joint_position = lbr_state.commanded_joint_position.tolist() - print(joint_position) + joint_position[2], joint_position[3] = joint_position[3], joint_position[2] current_robot_state = RobotState() @@ -203,11 +223,29 @@ def sum_of_square_diff( return np.sum( np.square(np.subtract(joint_state_1.position, joint_state_2.position)) ) + + def _retrieve_parameter(self, service: str, parameter_name: str) -> ParameterValue: + parameter_client = self.create_client(GetParameters, service) + while not parameter_client.wait_for_service(timeout_sec=1.0): + if not rclpy.ok(): + self.get_logger().error( + "Interrupted while waiting for the service. Exiting." + ) + return None + self.get_logger().info(f"Waiting for '{service}' service...") + request = GetParameters.Request(names=[parameter_name]) + future = parameter_client.call_async(request=request) + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error(f"Failed to retrieve '{parameter_name}'.") + return None + self.get_logger().info(f"Received '{parameter_name}' from '{service}'.") + return future.result().values[0] def get_current_state(self): ##Return a dictionary with all the corresponding state variables #Pose, Velocity, Force, Torque, Jacobian, Joint Angles, Joint Velocity, Gripper Pose - self.robot_state["pose"] = np.zeros((6,)) + self.robot_state["pose"] = np.zeros((7,)) self.robot_state["vel"] = np.zeros((6,)) self.robot_state["force"] = np.zeros((3,)) self.robot_state["torque"] = np.zeros((3,)) @@ -216,12 +254,8 @@ def get_current_state(self): ##TCP pose current_ee_geom_pose, current_joint_state = self.get_fk() - self.robot_state["pose"][:3] = np.array([current_ee_geom_pose.position.x,current_ee_geom_pose.position.y,current_ee_geom_pose.position.z]) - euler_angles = quat_to_euler( np.array([current_ee_geom_pose.orientation.x, - current_ee_geom_pose.orientation.y, - current_ee_geom_pose.orientation.z, - current_ee_geom_pose.orientation.w])) - self.robot_state["pose"][3:] = euler_angles + self.robot_state["pose"] = np.array([current_ee_geom_pose.position.x,current_ee_geom_pose.position.y,current_ee_geom_pose.position.z, + current_ee_geom_pose.orientation.x,current_ee_geom_pose.orientation.y,current_ee_geom_pose.orientation.z,current_ee_geom_pose.orientation.w]) ##Getting Joint Angles joint_angles = current_joint_state.position @@ -233,25 +267,27 @@ def get_current_state(self): current_wrench = copy.deepcopy(self.latest_wrench) self.robot_state["force"] = current_wrench[:3] self.robot_state["torque"] = current_wrench[3:] - + + self.robot_state["jacobian"] = self.jacobian_func(joint_angles) + self.robot_state["vel"] = np.dot(self.robot_state["jacobian"],joint_velocity) - return + return self.robot_state def move_to_pose(self, pose:np.ndarray): - + print("Moving to Pose: ", pose) target_pose = Pose() target_pose.position.x = pose[0] target_pose.position.y = pose[1] target_pose.position.z = pose[2] - target_pose_quaternion = euler_to_quat(pose[3:]) - target_pose.orientation.x = target_pose_quaternion[0] - target_pose.orientation.y = target_pose_quaternion[1] - target_pose.orientation.z = target_pose_quaternion[2] - target_pose.orientation.w = target_pose_quaternion[3] + # target_pose_quaternion = euler_to_quat(pose[3:]) + target_pose.orientation.x = pose[0] + target_pose.orientation.y = pose[1] + target_pose.orientation.z = pose[2] + target_pose.orientation.w = pose[3] traj = self.get_motion_plan(target_pose, True) diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index c4810a49..230c0103 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -1,6 +1,6 @@ """Gym Interface for Franka""" import sys -sys.path.append("/home/aero/anaconda3/envs/serl/lib/python3.10/site-packages") +sys.path.append("/home/cam/miniconda3/envs/serl-rros/lib/python3.10/site-packages") import numpy as np import gym @@ -14,14 +14,14 @@ from datetime import datetime from collections import OrderedDict from typing import Dict -sys.path.append("/home/aero/omey_ws/serl-rros/src/") +sys.path.append("/home/cam/omey_ws/serl-rros/src/") from serl_robot_infra.franka_env.camera.video_capture import VideoCapture from serl_robot_infra.franka_env.camera.rs_capture import RSCapture from serl_robot_infra.franka_env.utils.rotations import euler_2_quat, quat_2_euler # from kuka_server.kuka_server.robot_interface import RobotInterfaceNode -from kuka_server.robot_interface import RobotInterfaceNode + class ImageDisplayer(threading.Thread): @@ -52,8 +52,8 @@ class DefaultEnvConfig: ROBOT_IP: str = "192.168.10.122" REALSENSE_CAMERAS: Dict = { - "wrist_1": "130322274175", - "wrist_2": "127122270572", + "wrist_1": "128422270311", + "wrist_2": "840412060409", } TARGET_POSE: np.ndarray = np.zeros((6,)) REWARD_THRESHOLD: np.ndarray = np.zeros((6,)) @@ -83,9 +83,10 @@ def __init__( save_video=False, config: DefaultEnvConfig = None, max_episode_length=100, + robot_interface_node=None ): - - self.robot_interface_node = RobotInterfaceNode() + print("Initializing KukaEnv") + self.robot_interface_node = robot_interface_node self.action_scale = config.ACTION_SCALE self._TARGET_POSE = config.TARGET_POSE self._REWARD_THRESHOLD = config.REWARD_THRESHOLD @@ -136,7 +137,8 @@ def __init__( np.ones((7,), dtype=np.float32) * -1, np.ones((7,), dtype=np.float32), ) - + print("Initializing Observation Space") + time.sleep(2) self.observation_space = gym.spaces.Dict( { "state": gym.spaces.Dict( @@ -164,8 +166,9 @@ def __init__( self.cycle_count = 0 if fake_env: + print("returning from fake env") return - + print("Initializing Cameras") self.cap = None self.init_cameras(config.REALSENSE_CAMERAS) self.img_queue = queue.Queue() @@ -200,6 +203,7 @@ def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: def step(self, action: np.ndarray) -> tuple: """standard gym step function.""" + print("In step function") start_time = time.time() action = np.clip(action, self.action_space.low, self.action_space.high) xyz_delta = action[:3] @@ -358,6 +362,7 @@ def init_cameras(self, name_serial_dict=None): self.close_cameras() self.cap = OrderedDict() + for cam_name, cam_serial in name_serial_dict.items(): cap = VideoCapture( RSCapture(name=cam_name, serial_number=cam_serial, depth=False) @@ -413,12 +418,13 @@ def _update_currpos(self): Internal function to get the latest state of the robot and its gripper. """ ps = self.robot_interface_node.get_current_state() + print("Ps: ", ps) self.currpos[:] = np.array(ps["pose"]) self.currvel[:] = np.array(ps["vel"]) self.currforce[:] = np.array(ps["force"]) self.currtorque[:] = np.array(ps["torque"]) - self.currjacobian[:] = np.reshape(np.array(ps["jacobian"]), (6, 7)) + # self.currjacobian[:] = np.reshape(np.array(ps["jacobian"]), (6, 7)) self.q[:] = np.array(ps["q"]) self.dq[:] = np.array(ps["dq"]) @@ -438,7 +444,6 @@ def _get_obs(self) -> dict: if __name__ == '__main__': - env = gym.make("KukaEnv") diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index ce52b798..2c741a14 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -7,26 +7,27 @@ class PegEnvConfig(DefaultEnvConfig): ROBOT_IP: str = "192.168.10.122" REALSENSE_CAMERAS = { - "wrist_1": "130322274175", - "wrist_2": "127122270572", + "wrist_1": "840412060409", + "wrist_2": "128422270311", } TARGET_POSE = np.array( [ 0.5906439143742067, 0.07771711953459341, - 0.0937835826958042, + 0.337835826958042, 3.1099675, 0.0146619, -0.0078615, ] ) - RESET_POSE = TARGET_POSE + np.array([0.5, 0.1, 0.3, 0.0, 0.0, 0.0]) + RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.3, 0.0, 0.0, 0.0]) REWARD_THRESHOLD: np.ndarray = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) APPLY_GRIPPER_PENALTY = False ACTION_SCALE = np.array([0.02, 0.1, 1]) RANDOM_RESET = True RANDOM_XY_RANGE = 0.05 RANDOM_RZ_RANGE = np.pi / 6 + USE_GRIPPER: bool = False ABS_POSE_LIMIT_LOW = np.array( [ TARGET_POSE[0] - RANDOM_XY_RANGE, diff --git a/serl_robot_infra/kuka_env/envs/relative_env.py b/serl_robot_infra/kuka_env/envs/relative_env.py index 7d48f526..bec6b120 100644 --- a/serl_robot_infra/kuka_env/envs/relative_env.py +++ b/serl_robot_infra/kuka_env/envs/relative_env.py @@ -2,7 +2,7 @@ import gym import numpy as np from gym import Env -from franka_env.utils.transformations import ( +from serl_robot_infra.franka_env.utils.transformations import ( construct_adjoint_matrix, construct_homogeneous_matrix, ) From 4cd36b343705d9caa149080daa910c4b24151a5f Mon Sep 17 00:00:00 2001 From: rishabh shukla Date: Tue, 12 Nov 2024 16:37:00 -0800 Subject: [PATCH 04/24] Completed setting up the training pipeline --- .../async_drq_randomized.py | 13 ++- kuka_server/kuka_server/robot_interface.py | 81 ++----------------- serl_robot_infra/kuka_env/__init__.py | 2 +- serl_robot_infra/kuka_env/envs/kuka_env.py | 48 +++++++---- .../kuka_env/envs/peg_env/config.py | 8 +- .../kuka_env/envs/peg_env/kuka_peg_insert.py | 2 +- .../kuka_env/envs/relative_env.py | 3 + 7 files changed, 58 insertions(+), 99 deletions(-) diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 65408615..746368ee 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -33,7 +33,10 @@ from kuka_server.robot_interface import RobotInterfaceNode from serl_launcher.data.data_store import MemoryEfficientReplayBufferDataStore from serl_launcher.wrappers.serl_obs_wrappers import SERLObsWrapper -sys.path.append("/home/cam/omey_ws/serl-rros/src/") + +import os +MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) +sys.path.append(MAIN_DIR) from serl_robot_infra.kuka_env.envs.relative_env import RelativeFrame from serl_robot_infra.franka_env.envs.wrappers import ( Quat2EulerWrapper, @@ -106,6 +109,7 @@ def actor(agent: DrQAgent, data_store, env, sampling_rng): This is the actor loop, which runs when "--actor" is set to True. """ if FLAGS.eval_checkpoint_step: + print("Evaluating the policy") success_counter = 0 time_list = [] @@ -163,10 +167,11 @@ def update_params(params): done = False # training loop + print("Entering Training Loop") timer = Timer() running_return = 0.0 - for step in tqdm.tqdm(range(FLAGS.max_steps), dynamic_ncols=True): + for step in tqdm.tqdm(range(FLAGS.max_steps), dynamic_ncols=True, desc="actor"): timer.tick("total") with timer.context("sample_actions"): @@ -247,7 +252,7 @@ def stats_callback(type: str, payload: dict) -> dict: # Create server server = TrainerServer(make_trainer_config(), request_callback=stats_callback) server.register_data_store("actor_env", replay_buffer) - server.start(threaded=False) + server.start(threaded=True) # Loop to wait until replay_buffer is filled pbar = tqdm.tqdm( @@ -270,7 +275,7 @@ def stats_callback(type: str, payload: dict) -> dict: # 50/50 sampling from RLPD, half from demo and half from online experience replay_iterator = replay_buffer.get_iterator( sample_args={ - "batch_size": FLAGS.batch_size // 2, + "batch_size": FLAGS.batch_size, "pack_obs_and_next_obs": True, }, device=sharding.replicate(), diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index b43171ed..4a119b2a 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -279,15 +279,15 @@ def move_to_pose(self, pose:np.ndarray): print("Moving to Pose: ", pose) target_pose = Pose() - target_pose.position.x = pose[0] - target_pose.position.y = pose[1] - target_pose.position.z = pose[2] + target_pose.position.x = float(pose[0]) + target_pose.position.y = float(pose[1]) + target_pose.position.z = float(pose[2]) # target_pose_quaternion = euler_to_quat(pose[3:]) - target_pose.orientation.x = pose[0] - target_pose.orientation.y = pose[1] - target_pose.orientation.z = pose[2] - target_pose.orientation.w = pose[3] + target_pose.orientation.x = float(pose[3]) + target_pose.orientation.y = float(pose[4]) + target_pose.orientation.z = float(pose[5]) + target_pose.orientation.w = float(pose[6]) traj = self.get_motion_plan(target_pose, True) @@ -464,73 +464,6 @@ def main(args=None): executor.add_node(robot_interface_node) robot_interface_node.get_logger().info("Robot interface node started.") - # target_poses = [] - # for i in range(3): - # target_poses.append( - # Pose( - # position=Point(x=0.5, y=-0.1 + 0.1 * i, z=0.6), - # orientation=Quaternion(x=0.0, y=-1.0, z=0.0, w=0.0), - # ) - # ) - - # traj = robot_interface_node.get_motion_plan(target_poses[1]) - # if traj: - # client = robot_interface_node.get_motion_execute_client() - # goal = ExecuteTrajectory.Goal() - # goal.trajectory = traj - - # future = client.send_goal_async(goal) - # rclpy.spin_until_future_complete(robot_interface_node, future) - - # goal_handle = future.result() - # if not goal_handle.accepted: - # robot_interface_node.get_logger().error("Failed to execute trajectory") - # else: - # robot_interface_node.get_logger().info("Trajectory accepted") - - # result_future = goal_handle.get_result_async() - - # expect_duration = traj.joint_trajectory.points[-1].time_from_start - # expect_time = time.time() + 2 * expect_duration.sec - # while not result_future.done() and time.time() < expect_time: - # time.sleep(0.01) - - # robot_interface_node.get_logger().info("Trajectory executed") - - # robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0])) - - # for target_pose in target_poses: - # traj = robot_interface_node.get_motion_plan(target_pose, True) - # if traj: - # client = robot_interface_node.get_motion_execute_client() - # goal = ExecuteTrajectory.Goal() - # goal.trajectory = traj - - # future = client.send_goal_async(goal) - # rclpy.spin_until_future_complete(robot_interface_node, future) - - # goal_handle = future.result() - # if not goal_handle.accepted: - # robot_interface_node.get_logger().error("Failed to execute trajectory") - # else: - # robot_interface_node.get_logger().info("Trajectory accepted") - - - # result_future = goal_handle.get_result_async() - - # expect_duration = traj.joint_trajectory.points[-1].time_from_start - # expect_time = time.time() + 2 * expect_duration.sec - # while not result_future.done() and time.time() < expect_time: - # time.sleep(0.01) - - # robot_interface_node.get_logger().info("Trajectory executed") - - # robot_interface_node.get_logger().info("Current pose: " + str(robot_interface_node.get_fk()[0]) ) - - # rclpy.spin(robot_interface_node) - robot_interface_node.get_current_state() - - print("Current State: ", robot_interface_node.robot_state) executor.spin() rclpy.shutdown() diff --git a/serl_robot_infra/kuka_env/__init__.py b/serl_robot_infra/kuka_env/__init__.py index 886837a3..e1e059e7 100644 --- a/serl_robot_infra/kuka_env/__init__.py +++ b/serl_robot_infra/kuka_env/__init__.py @@ -3,7 +3,7 @@ register( id="KukaEnv-Vision-v0", - entry_point="kuka_env.envs:FrankaEnv", + entry_point="kuka_env.envs:KukaEnv", max_episode_steps=100, ) diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index 230c0103..dcfed4fa 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -1,5 +1,8 @@ """Gym Interface for Franka""" import sys +import time +time.sleep(2) +print("MAKE SURE TO CHANGE THE PATHS") sys.path.append("/home/cam/miniconda3/envs/serl-rros/lib/python3.10/site-packages") import numpy as np @@ -7,7 +10,6 @@ import cv2 import copy from scipy.spatial.transform import Rotation -import time import requests import queue import threading @@ -52,8 +54,8 @@ class DefaultEnvConfig: ROBOT_IP: str = "192.168.10.122" REALSENSE_CAMERAS: Dict = { - "wrist_1": "128422270311", - "wrist_2": "840412060409", + "wrist_1": "840412060409", + "wrist_2": "932122060300", } TARGET_POSE: np.ndarray = np.zeros((6,)) REWARD_THRESHOLD: np.ndarray = np.zeros((6,)) @@ -137,8 +139,7 @@ def __init__( np.ones((7,), dtype=np.float32) * -1, np.ones((7,), dtype=np.float32), ) - print("Initializing Observation Space") - time.sleep(2) + self.observation_space = gym.spaces.Dict( { "state": gym.spaces.Dict( @@ -204,6 +205,7 @@ def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: def step(self, action: np.ndarray) -> tuple: """standard gym step function.""" print("In step function") + time.sleep(2) start_time = time.time() action = np.clip(action, self.action_space.low, self.action_space.high) xyz_delta = action[:3] @@ -241,6 +243,8 @@ def step(self, action: np.ndarray) -> tuple: def compute_reward(self, obs, gripper_action_effective = None) -> bool: """We are using a sparse reward function.""" current_pose = obs["state"]["tcp_pose"] + print("In Reward function") + time.sleep(2) # convert from quat to euler first euler_angles = quat_2_euler(current_pose[3:]) euler_angles = np.abs(euler_angles) @@ -312,10 +316,24 @@ def go_to_rest(self, joint_reset=False): """ # Change to precision mode for reset # requests.post(self.url + "update_param", json=self.config.PRECISION_PARAM) - time.sleep(0.5) + print("In Go to rest") + time.sleep(2) - reset_pose = self.resetpos.copy() - self.interpolate_move(reset_pose, timeout=1.5) + # Perform Carteasian reset + if self.randomreset: # randomize reset position in xy plane + reset_pose = self.resetpos.copy() + reset_pose[:2] += np.random.uniform( + -self.random_xy_range, self.random_xy_range, (2,) + ) + euler_random = self._TARGET_POSE[3:].copy() + euler_random[-1] += np.random.uniform( + -self.random_rz_range, self.random_rz_range + ) + reset_pose[3:] = euler_2_quat(euler_random) + self.interpolate_move(reset_pose, timeout=1.5) + else: + reset_pose = self.resetpos.copy() + self.interpolate_move(reset_pose, timeout=1.5) # Change to compliance mode # requests.post(self.url + "update_param", json=self.config.COMPLIANCE_PARAM) @@ -419,15 +437,15 @@ def _update_currpos(self): """ ps = self.robot_interface_node.get_current_state() print("Ps: ", ps) - self.currpos[:] = np.array(ps["pose"]) - self.currvel[:] = np.array(ps["vel"]) + self.currpos[:] = np.array(ps["pose"], dtype=np.float32) + self.currvel[:] = np.array(ps["vel"], dtype=np.float32) - self.currforce[:] = np.array(ps["force"]) - self.currtorque[:] = np.array(ps["torque"]) - # self.currjacobian[:] = np.reshape(np.array(ps["jacobian"]), (6, 7)) + self.currforce[:] = np.array(ps["force"], dtype=np.float32) + self.currtorque[:] = np.array(ps["torque"], dtype=np.float32) + self.currjacobian[:] = np.reshape(np.array(ps["jacobian"], dtype=np.float32), (6, 7)) - self.q[:] = np.array(ps["q"]) - self.dq[:] = np.array(ps["dq"]) + self.q[:] = np.array(ps["q"], dtype=np.float32) + self.dq[:] = np.array(ps["dq"], dtype=np.float32) if(self.use_gripper): self.curr_gripper_pos = np.array(ps["gripper_pos"]) diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index 2c741a14..196b9ef0 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -8,7 +8,7 @@ class PegEnvConfig(DefaultEnvConfig): ROBOT_IP: str = "192.168.10.122" REALSENSE_CAMERAS = { "wrist_1": "840412060409", - "wrist_2": "128422270311", + "wrist_2": "932122060300", } TARGET_POSE = np.array( [ @@ -20,12 +20,12 @@ class PegEnvConfig(DefaultEnvConfig): -0.0078615, ] ) - RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.3, 0.0, 0.0, 0.0]) + RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.2, 0.0, 0.0, 0.0]) REWARD_THRESHOLD: np.ndarray = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) APPLY_GRIPPER_PENALTY = False ACTION_SCALE = np.array([0.02, 0.1, 1]) - RANDOM_RESET = True - RANDOM_XY_RANGE = 0.05 + RANDOM_RESET = False + RANDOM_XY_RANGE = 0.01 RANDOM_RZ_RANGE = np.pi / 6 USE_GRIPPER: bool = False ABS_POSE_LIMIT_LOW = np.array( diff --git a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py index d4a3212a..3fa1ca91 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py @@ -27,7 +27,7 @@ def go_to_rest(self, joint_reset=False): self._update_currpos() reset_pose = copy.deepcopy(self.currpos) reset_pose[2] += 0.10 - self.interpolate_move(reset_pose, timeout=1) + self._send_pos_command(reset_pose) # execute the go_to_rest method from the parent class super().go_to_rest(joint_reset) diff --git a/serl_robot_infra/kuka_env/envs/relative_env.py b/serl_robot_infra/kuka_env/envs/relative_env.py index bec6b120..c8857e4d 100644 --- a/serl_robot_infra/kuka_env/envs/relative_env.py +++ b/serl_robot_infra/kuka_env/envs/relative_env.py @@ -40,6 +40,9 @@ def __init__(self, env: Env, include_relative_pose=True): def step(self, action: np.ndarray): # action is assumed to be (x, y, z, rx, ry, rz, gripper) # Transform action from end-effector frame to base frame + import time + print("sleeping for 2 seconds in step") + time.sleep(2) transformed_action = self.transform_action(action) obs, reward, done, truncated, info = self.env.step(transformed_action) From 4e74bee3c1d04508f910625c14486e9102a6d24e Mon Sep 17 00:00:00 2001 From: omeym Date: Thu, 30 Jan 2025 11:57:11 -0800 Subject: [PATCH 05/24] Pushing latest changes for benchmarking trials --- .gitignore | 5 + data_collector/LICENSE | 202 ++++++++++++++ data_collector/data_collector/__init__.py | 0 .../data_collector/arduino-python.py | 41 +++ data_collector/data_collector/config.json | 5 + data_collector/data_collector/create_pkl.py | 161 ++++++++++++ .../joint_cartesian_converter.py | 244 +++++++++++++++++ .../data_collector/kuka_data_recorder.py | 248 ++++++++++++++++++ .../data_collector/parse_robot_log.py | 128 +++++++++ data_collector/package.xml | 38 +++ data_collector/resource/data_collector | 0 data_collector/setup.cfg | 4 + data_collector/setup.py | 27 ++ .../async_drq_randomized.py | 67 +++-- examples/async_peg_insert_drq/run_actor.sh | 11 +- examples/async_peg_insert_drq/run_learner.sh | 11 +- kuka_server/kuka_server/robot_interface.py | 179 +++++++++---- serl_robot_infra/kuka_env/envs/kuka_env.py | 32 ++- .../kuka_env/envs/peg_env/config.py | 16 +- .../kuka_env/envs/relative_env.py | 3 - 20 files changed, 1313 insertions(+), 109 deletions(-) create mode 100644 data_collector/LICENSE create mode 100644 data_collector/data_collector/__init__.py create mode 100644 data_collector/data_collector/arduino-python.py create mode 100644 data_collector/data_collector/config.json create mode 100644 data_collector/data_collector/create_pkl.py create mode 100644 data_collector/data_collector/joint_cartesian_converter.py create mode 100644 data_collector/data_collector/kuka_data_recorder.py create mode 100644 data_collector/data_collector/parse_robot_log.py create mode 100644 data_collector/package.xml create mode 100644 data_collector/resource/data_collector create mode 100644 data_collector/setup.cfg create mode 100644 data_collector/setup.py diff --git a/.gitignore b/.gitignore index 59b5787e..06005b52 100644 --- a/.gitignore +++ b/.gitignore @@ -184,3 +184,8 @@ wandb/ # Built Visual Studio Code Extensions *.vsix + +realsense-ros/ +.vscode/ +data_collector/data_collector/processed_data/ +data_collector/data_collector/11_dec_logs/ \ No newline at end of file diff --git a/data_collector/LICENSE b/data_collector/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/data_collector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/data_collector/data_collector/__init__.py b/data_collector/data_collector/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/data_collector/data_collector/arduino-python.py b/data_collector/data_collector/arduino-python.py new file mode 100644 index 00000000..9947c7db --- /dev/null +++ b/data_collector/data_collector/arduino-python.py @@ -0,0 +1,41 @@ +###################### +import serial +import time +import subprocess +import signal +import sys + +# Use the correct serial port based on what you found, either '/dev/ttyACM0' or '/dev/ttyUSB0' +arduino = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=.1) + +def send_command(command): + arduino.write(command.encode()) # Send the command to the Arduino + time.sleep(0.05) # Small delay to ensure the command is sent properly + +def start_two_cameras_script(): + # Launch the two_cameras.py script + subprocess.Popen(["python", "/home/lm-2023/jeon_team_ws/control_kuka/lbr_fri_ws-dev-socket/data_collection/data_collection/two_cameras_live.py"]) + +def signal_handler(sig, frame): + print("Signal received, sending 'X' to microcontroller...") + send_command('X') # Send the 'X' command when the process is terminated + sys.exit(0) # Exit the program + +# Register the signal handler for termination signals +signal.signal(signal.SIGINT, signal_handler) # Handles Ctrl+C +signal.signal(signal.SIGTERM, signal_handler) # Handles termination signal + +# Example usage +while True: + command = input("Enter command (X: Press Both Buttons, Y: Release Both Buttons and Start two_cameras.py): ").strip().upper() + + if command == 'X': + send_command('X') # Press both buttonsy + + elif command == 'Y': + send_command('Y') # Release both buttons + #start_two_cameras_script() # Start the two_cameras.py script + + else: + print("Invalid command. Please enter X or Y.") + diff --git a/data_collector/data_collector/config.json b/data_collector/data_collector/config.json new file mode 100644 index 00000000..6f95cbb0 --- /dev/null +++ b/data_collector/data_collector/config.json @@ -0,0 +1,5 @@ +{ + "log_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay", + "processed_csv_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay/processed", + "replay_csv_name": "2013-01-01_01-10-52.csv" +} \ No newline at end of file diff --git a/data_collector/data_collector/create_pkl.py b/data_collector/data_collector/create_pkl.py new file mode 100644 index 00000000..05b211c8 --- /dev/null +++ b/data_collector/data_collector/create_pkl.py @@ -0,0 +1,161 @@ +## Format of the pkl file +# The pkl file is a dictionary with the following keys: +# dict( +# observations=obs, +# actions=actions, +# next_observations=next_obs, +# rewards=rew, +# masks=1.0 - done, +# dones=done, +# ) +# where +# observations : { +# "state": gym.spaces.Box(-np.inf, np.inf, shape=(7 + 6 + 3 + 3,)), +# "wrist_1": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), +# "wrist_2": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), +# } +# actions = gym.spaces.Box( +# np.ones((7,), dtype=np.float32) * -1, +# np.ones((7,), dtype=np.float32), +# ) +# state : concatenation of tcp_poses (7), tcp_velocities (6), tcp_forces (3), tcp_torques (3) +# image names are in the format {timestamp}_A.png and {timestamp}_B.png + +import numpy as np +import pandas as pd +import pickle +import os +import json +from PIL import Image + +TARGET_POSE = np.array([ + 0.5906439143742067, + 0.07771711953459341, + 0.337835826958042, + 3.1099675, + 0.0146619, + -0.0078615, +]) + +REWARD_THRESHOLD = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) + +def convertToPickle(recorded_data_file_path, imageA_folder_path, imageB_folder_path): + + recorded_data = pd.read_csv(recorded_data_file_path) + + transitions = [] + + observations = [] + actions = [] + next_observations = [] + rewards = [] + masks = [] + dones = [] + + done = 0.0 + + for i in range(len(recorded_data) - 1): + + current_row = recorded_data.iloc[i] + next_row = recorded_data.iloc[i + 1] + + curr_time = current_row["timestamp"] + next_time = next_row["timestamp"] + + img_size = (128, 128) + + curr_wrist_1_image = Image.open(os.path.join(imageA_folder_path, f"{curr_time}_A.jpg")).resize(img_size) + curr_wrist_2_image = Image.open(os.path.join(imageB_folder_path, f"{curr_time}_B.jpg")).resize(img_size) + + next_wrist_1_image = Image.open(os.path.join(imageA_folder_path, f"{next_time}_A.jpg")).resize(img_size) + next_wrist_2_image = Image.open(os.path.join(imageB_folder_path, f"{next_time}_B.jpg")).resize(img_size) + + + # print(np.array(curr_wrist_1_image).shape) + observation = { + "state": np.concatenate([ + current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values, + current_row[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, + current_row[['Fx', 'Fy', 'Fz']].values, + current_row[['Tx', 'Ty', 'Tz']].values + ]), + "wrist_1": np.array(curr_wrist_1_image), + "wrist_2": np.array(curr_wrist_2_image) + } + + action = next_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - current_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + + next_observation = { + "state": np.concatenate([ + next_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values, + next_row[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, + next_row[['Fx', 'Fy', 'Fz']].values, + next_row[['Tx', 'Ty', 'Tz']].values + ]), + "wrist_1": np.array(next_wrist_1_image), + "wrist_2": np.array(next_wrist_2_image) + } + + if done != 1.0: + tcp_pose = current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values + if np.all(np.abs(tcp_pose - TARGET_POSE) <= REWARD_THRESHOLD): + reward = 1.0 + done = 1.0 + else: + reward = 0.0 + else: + reward = 0.0 + + + data_dict = { + "observations": observation, + "actions": action, + "next_observations": next_observation, + "rewards": reward, + "masks": 1.0 - done, + "dones": done + } + + transitions.append(data_dict) + + # observations.append(observation) + # actions.append(action) + # next_observations.append(next_observation) + # rewards.append(reward) + # masks.append(1.0 - done) + # dones.append(done) + + curr_wrist_1_image.close() + curr_wrist_2_image.close() + next_wrist_1_image.close() + next_wrist_2_image.close() + + return transitions + +def main(): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + processed_folder_path = config['processed_csv_folder_path'] + output_pkl_file_path = os.path.join(processed_folder_path, 'recorded_data.pkl') + all_transitions = [] + + for folder_name in os.listdir(processed_folder_path): + folder_path = os.path.join(processed_folder_path, folder_name) + if os.path.isdir(folder_path): + recorded_data_file_path = os.path.join(folder_path, 'recorded_data.csv') + imageA_folder_path = os.path.join(folder_path, 'imageA') + imageB_folder_path = os.path.join(folder_path, 'imageB') + if os.path.exists(recorded_data_file_path) and os.path.exists(imageA_folder_path) and os.path.exists(imageB_folder_path): + transitions = convertToPickle(recorded_data_file_path, imageA_folder_path, imageB_folder_path) + all_transitions.extend(transitions) + + with open(output_pkl_file_path, 'wb') as f: + pickle.dump(all_transitions, f) + + print(f"Data saved to {output_pkl_file_path}") + + +if __name__ == "__main__": + main() + diff --git a/data_collector/data_collector/joint_cartesian_converter.py b/data_collector/data_collector/joint_cartesian_converter.py new file mode 100644 index 00000000..0c2bd7f9 --- /dev/null +++ b/data_collector/data_collector/joint_cartesian_converter.py @@ -0,0 +1,244 @@ +import time +import sys + +env = "serl" +print("Ensure to change anaconda3 or miniconda3 and change your environment name") +print("Conda Environment name is: ", env) +sys.path.append( + "/home/" + "omey" + "/anaconda3/envs/" + env + "/lib/python3.10/site-packages" +) +import rclpy +import rclpy.duration +from rclpy.node import Node +from rclpy.callback_groups import MutuallyExclusiveCallbackGroup +from rcl_interfaces.msg import ParameterValue +from rcl_interfaces.srv import GetParameters + +import numpy as np +import pandas as pd +import json +import os +from scipy.spatial.transform import Rotation as R + +from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped +import rclpy.task +from sensor_msgs.msg import JointState + +from moveit_msgs.msg import ( + RobotState, + MoveItErrorCodes, +) +from moveit_msgs.srv import GetPositionFK +import optas + +import os + +MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + +class JointCartesianConverter(Node): + + move_group_name_ = "arm" + namespace_ = "lbr" + + joint_state_topic_ = "joint_states" + lbr_state_topic_ = "state" + pose_topic_ = "pose" + plan_srv_name_ = "plan_kinematic_path" + ik_srv_name_ = "compute_ik" + fk_srv_name_ = "compute_fk" + execute_action_name_ = "execute_trajectory" + fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" + + base_ = "link_0" + end_effector_ = "link_ee" + + def __init__(self, joint_data_file_path): + super().__init__("joint_cartesian_converter") + self._init_jacobian() + + self.joint_data_file_path = joint_data_file_path + self.fk_client_callback = MutuallyExclusiveCallbackGroup() + self.fk_client_ = self.create_client( + GetPositionFK, + f"{self.namespace_}/{self.fk_srv_name_}", + callback_group=self.fk_client_callback, + ) + if not self.fk_client_.wait_for_service(timeout_sec=5.0): + self.get_logger().error("FK service not available.") + exit(1) + + self.joint_data = pd.read_csv(self.joint_data_file_path) + + def _retrieve_parameter(self, service: str, parameter_name: str) -> ParameterValue: + parameter_client = self.create_client(GetParameters, service) + while not parameter_client.wait_for_service(timeout_sec=1.0): + if not rclpy.ok(): + self.get_logger().error( + "Interrupted while waiting for the service. Exiting." + ) + return None + self.get_logger().info(f"Waiting for '{service}' service...") + request = GetParameters.Request(names=[parameter_name]) + future = parameter_client.call_async(request=request) + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error(f"Failed to retrieve '{parameter_name}'.") + return None + return future.result().values[0] + + def _init_jacobian(self): + print("Initializing Jacobian") + + self._robot_description = self._retrieve_parameter( + "/lbr/robot_state_publisher/get_parameters", "robot_description" + ).string_value + self._robot = optas.RobotModel( + urdf_string=self._robot_description, time_derivs=[0, 1] + ) + self.jacobian_func = self._robot.get_link_geometric_jacobian_function( + link=self.end_effector_, base_link=self.base_, numpy_output=True + ) + return + + def quat_2_euler(self, quat): + """calculates and returns: yaw, pitch, roll from given quaternion""" + return R.from_quat(quat).as_euler("xyz") + + def convert(self): + + # Convert each row in the dataframe to cartesian coordinates and save in the dataframe + for index, row in self.joint_data.iterrows(): + + joint_state = JointState() + joint_state.name = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] + joint_state.position = [ + row["P1"], + row["P2"], + row["P3"], + row["P4"], + row["P5"], + row["P6"], + row["P7"], + ] + joint_state.velocity = [ + row["V1"], + row["V2"], + row["V3"], + row["V4"], + row["V5"], + row["V6"], + row["V7"], + ] + joint_state.effort = [ + row["E1"], + row["E2"], + row["E3"], + row["E4"], + row["E5"], + row["E6"], + row["E7"], + ] + joint_state.header.stamp = self.get_clock().now().to_msg() + joint_state.header.frame_id = f"{self.namespace_}/{self.base_}" + current_robot_state = RobotState() + current_robot_state.joint_state = joint_state + + request = GetPositionFK.Request() + + request.header.frame_id = f"{self.namespace_}/{self.base_}" + request.header.stamp = self.get_clock().now().to_msg() + + request.fk_link_names.append(self.end_effector_) + request.robot_state = current_robot_state + + future = self.fk_client_.call_async(request) + + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error("Failed to get FK solution") + return None + + response = future.result() + if response.error_code.val != MoveItErrorCodes.SUCCESS: + self.get_logger().error( + f"Failed to get FK solution: {response.error_code.val}" + ) + return None + + pose = response.pose_stamped[0].pose + euler_pose = self.quat_2_euler( + np.array( + [ + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + pose.orientation.w, + ] + ) + ) + self.joint_data.at[index, "X"] = pose.position.x + self.joint_data.at[index, "Y"] = pose.position.y + self.joint_data.at[index, "Z"] = pose.position.z + self.joint_data.at[index, "QX"] = pose.orientation.x + self.joint_data.at[index, "QY"] = pose.orientation.y + self.joint_data.at[index, "QZ"] = pose.orientation.z + self.joint_data.at[index, "QW"] = pose.orientation.w + self.joint_data.at[index, "Roll"] = euler_pose[0] + self.joint_data.at[index, "Pitch"] = euler_pose[1] + self.joint_data.at[index, "Yaw"] = euler_pose[2] + + # Compute the end_effector velocity + current_joint_angles = [ + joint_state.position[0], + joint_state.position[1], + joint_state.position[2], + joint_state.position[3], + joint_state.position[4], + joint_state.position[5], + joint_state.position[6], + ] + current_jacobian = self.jacobian_func(current_joint_angles) + + cart_vel = np.dot(current_jacobian, joint_state.velocity) + cart_wrench = np.matmul( + np.linalg.pinv(np.transpose(current_jacobian)), joint_state.effort + ) + + self.joint_data.at[index, "Vx"] = cart_vel[0] + self.joint_data.at[index, "Vy"] = cart_vel[1] + self.joint_data.at[index, "Vz"] = cart_vel[2] + self.joint_data.at[index, "Va"] = cart_vel[3] + self.joint_data.at[index, "Vb"] = cart_vel[4] + self.joint_data.at[index, "Vc"] = cart_vel[5] + + # # Overriding Wrench Vals + # self.joint_data.at[index, "Fx"] = cart_wrench[0] + # self.joint_data.at[index, "Fy"] = cart_wrench[1] + # self.joint_data.at[index, "Fz"] = cart_wrench[2] + # self.joint_data.at[index, "Tx"] = cart_wrench[3] + # self.joint_data.at[index, "Ty"] = cart_wrench[4] + # self.joint_data.at[index, "Tz"] = cart_wrench[5] + + self.joint_data.to_csv(self.joint_data_file_path, index=False) + + +def main(args=None): + rclpy.init(args=args) + + # with open('config.json', 'r') as config_file: + # config = json.load(config_file) + + # processed_folder_path = config['processed_csv_folder_path'] + # csv_name = config['replay_csv_name'] + csv_name = "2013-01-17_00-42-46.csv" + processed_folder_path = os.path.join( + MAIN_DIR, "data_collector", "data_collector", "processed_data" + ) + csv_folder = os.path.join(processed_folder_path, os.path.splitext(csv_name)[0]) + joint_data_file_path = os.path.join(csv_folder, "recorded_data.csv") + + joint_cartesian_converter = JointCartesianConverter(joint_data_file_path) + joint_cartesian_converter.convert() + + rclpy.shutdown() diff --git a/data_collector/data_collector/kuka_data_recorder.py b/data_collector/data_collector/kuka_data_recorder.py new file mode 100644 index 00000000..923525af --- /dev/null +++ b/data_collector/data_collector/kuka_data_recorder.py @@ -0,0 +1,248 @@ +import rclpy +import os +import pandas as pd +import csv +import math +import json +from cv_bridge import CvBridge +import cv2 +from rclpy.node import Node +from rclpy.executors import MultiThreadedExecutor +from rclpy.action import ActionClient +from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint +from control_msgs.action import FollowJointTrajectory +from sensor_msgs.msg import Image, JointState +from geometry_msgs.msg import WrenchStamped +from message_filters import Subscriber, ApproximateTimeSynchronizer + +MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + + +class KukaDataRecorder(Node): + def __init__(self, data_folder, joint_states_file): + super().__init__("kuka_data_recorder") + + self.data_folder = data_folder + self.joint_states_file = joint_states_file + self.save_data_folder = os.path.join( + self.data_folder, os.path.splitext(self.joint_states_file)[0] + ) + os.makedirs(self.save_data_folder, exist_ok=True) + + # Create two folders for the images + self.imageA_folder = os.path.join(self.save_data_folder, "imageA") + self.imageB_folder = os.path.join(self.save_data_folder, "imageB") + os.makedirs(self.imageA_folder, exist_ok=True) + os.makedirs(self.imageB_folder, exist_ok=True) + + # topic_callback_group = MutuallyExclusiveCallbackGroup() + # client_callback_group = MutuallyExclusiveCallbackGroup() + + # Initialize everything for trajectory replaying + self.feedback = None + self._action_client = ActionClient( + self, + FollowJointTrajectory, + "/lbr/joint_trajectory_controller/follow_joint_trajectory", + ) + self.joint_names = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] + self.joint_trajectories = self.readJoinStatesFromCsv( + os.path.join(self.data_folder, self.joint_states_file) + ) + # This can be done through goal sending + self.initializeRobotPose() + + # Create a synchronized callback for all the information + self.imageA_topic = "/camera1/camera1/color/image_raw" + self.imageB_topic = "/camera2/camera2/color/image_raw" + self.joint_state_topic = "/lbr/joint_states" + self.wrench_topic = "/lbr/force_torque_broadcaster/wrench" + + self.imageA_sub = Subscriber(self, Image, self.imageA_topic) + self.imageB_sub = Subscriber(self, Image, self.imageB_topic) + self.joint_state_sub = Subscriber(self, JointState, self.joint_state_topic) + self.wrench_sub = Subscriber(self, WrenchStamped, self.wrench_topic) + + self.synchronizer = ApproximateTimeSynchronizer( + [self.imageA_sub, self.imageB_sub, self.joint_state_sub, self.wrench_sub], + queue_size=100, + slop=0.05, # NOTE: 0.05 gives less choppy transitions in images. + ) + + self.cvbridge = CvBridge() + + self.synchronizer.registerCallback(self.recordSyncData) + self.get_logger().info("Data Synchronizer Node Initialized") + + self.dataframe = [] + + # This needs to be done through moveit planner since the points need to be interpolated to a trajectory + self.executeTrajectory() + + def recordSyncData(self, imageA_msg, imageB_msg, joint_state_msg, wrench_msg): + # The timestamp of imageA_msg will be used to name the columns + timestamp = f"{imageA_msg.header.stamp.sec}_{imageA_msg.header.stamp.nanosec}" + + self.get_logger().info(f"Recording data at timestamp {timestamp}") + + # Save the joint state + data_row = {"timestamp": timestamp} + + for i in range(7): + joint_id = joint_state_msg.name[i][-1] + data_row[f"P{joint_id}"] = joint_state_msg.position[i] + data_row[f"V{joint_id}"] = joint_state_msg.velocity[i] + data_row[f"E{joint_id}"] = joint_state_msg.effort[i] + + # Save the wrench + data_row["Fx"] = wrench_msg.wrench.force.x + data_row["Fy"] = wrench_msg.wrench.force.y + data_row["Fz"] = wrench_msg.wrench.force.z + data_row["Tx"] = wrench_msg.wrench.torque.x + data_row["Ty"] = wrench_msg.wrench.torque.y + data_row["Tz"] = wrench_msg.wrench.torque.z + + self.dataframe.append(data_row) + + # Save the images + imageA_filename = os.path.join(self.imageA_folder, f"{timestamp}_A.jpg") + imageB_filename = os.path.join(self.imageB_folder, f"{timestamp}_B.jpg") + image_A = self.cvbridge.imgmsg_to_cv2(imageA_msg, desired_encoding="bgr8") + cv2.imwrite(imageA_filename, image_A) + image_B = self.cvbridge.imgmsg_to_cv2(imageB_msg, desired_encoding="bgr8") + cv2.imwrite(imageB_filename, image_B) + + def readJoinStatesFromCsv(self, file_path): + joint_states = [] + with open(file_path, mode="r") as file: + csv_reader = csv.DictReader(file) + for row in csv_reader: + joint_states.append( + [ + math.radians(float(row["A1"])), + math.radians(float(row["A2"])), + math.radians(float(row["A3"])), + math.radians(float(row["A4"])), + math.radians(float(row["A5"])), + math.radians(float(row["A6"])), + math.radians(float(row["A7"])), + ] + ) + + return joint_states + + def initializeRobotPose(self): + # The robot needs to be initialized to the first position of the trajectory + # to ensure that there is no sudden movement when the trajectory is replayed + goal_msg = FollowJointTrajectory.Goal() + trajectory_msg = JointTrajectory() + trajectory_msg.joint_names = self.joint_names + + self.get_logger().info(f"Processing trajectory point init") + + point = JointTrajectoryPoint() + point.positions = self.joint_trajectories[0] + point.time_from_start.sec = 4 # Set the seconds part to 0 + + trajectory_msg.points.append(point) + goal_msg.trajectory = trajectory_msg + self._action_client.wait_for_server() + + send_goal_future = self._action_client.send_goal_async( + goal_msg, feedback_callback=self.feedback_callback + ) + + rclpy.spin_until_future_complete(self, send_goal_future) + + goal_handle = send_goal_future.result() + + if not goal_handle.accepted: + self.get_logger().info(f"Goal for init point was rejected") + return + + self.get_logger().info(f"Goal for init point was accepted") + get_result_future = goal_handle.get_result_async() + rclpy.spin_until_future_complete(self, get_result_future) + result = get_result_future.result().result + self.get_logger().info(f"Result : {result}, Initial position reached") + print("Press Enter to continue ...") + input() + + def executeTrajectory(self): + goal_msg = FollowJointTrajectory.Goal() + trajectory_msg = JointTrajectory() + trajectory_msg.joint_names = self.joint_names + for i, joint_values in enumerate(self.joint_trajectories): + point = JointTrajectoryPoint() + point.positions = joint_values + point.time_from_start = rclpy.duration.Duration( + seconds=(i + 1) * 0.1 + ).to_msg() + trajectory_msg.points.append(point) + goal_msg.trajectory = trajectory_msg + self._action_client.wait_for_server() + + send_goal_future = self._action_client.send_goal_async( + goal_msg, feedback_callback=self.feedback_callback + ) + + send_goal_future.add_done_callback(self.goal_response_callback) + + def goal_response_callback(self, future): + goal_handle = future.result() + if not goal_handle.accepted: + self.get_logger().info(f"Goal for trajectory was rejected") + return + self.get_logger().info(f"Goal for trajectory was accepted") + get_result_future = goal_handle.get_result_async() + get_result_future.add_done_callback(self.result_callback) + + def result_callback(self, future): + result = future.result() + if result.result.error_code == FollowJointTrajectory.Result.SUCCESSFUL: + self.get_logger().info("WOHOOO") + self.shutdown_node() + else: + self.get_logger().info("OHH NO") + + def feedback_callback(self, feedback_msg): + self.feedback = feedback_msg.feedback + + def shutdown_node(self): + self.get_logger().info("Shutting down node now") + self.save_data() + self.destroy_node() + rclpy.shutdown() + + def save_data(self): + df = pd.DataFrame(self.dataframe) + save_path = os.path.join(self.save_data_folder, "recorded_data.csv") + df.to_csv(save_path, index=False) + self.get_logger().info("Saved data!") + + +def main(args=None): + + data_folder = os.path.join( + MAIN_DIR, "data_collector", "data_collector", "processed_data" + ) + joint_states_file = "2013-01-17_00-42-46.csv" + rclpy.init(args=args) + executor = MultiThreadedExecutor() + node = KukaDataRecorder( + data_folder=data_folder, joint_states_file=joint_states_file + ) + executor.add_node(node) + try: + + executor.spin() + except Exception as e: + node.get_logger().error(f"An error occurred: {e}") + finally: + if rclpy.ok(): + node.destroy_node() + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/data_collector/data_collector/parse_robot_log.py b/data_collector/data_collector/parse_robot_log.py new file mode 100644 index 00000000..bdc17506 --- /dev/null +++ b/data_collector/data_collector/parse_robot_log.py @@ -0,0 +1,128 @@ +import pandas as pd +import json +import re +import os +import glob + +MAIN_DIR = os.path.dirname(os.path.realpath(__file__)) + + +def parse_robot_data(file_path): + """ + Parses the robot data file, renames joint columns to J1-J7, + extracts and renames time, force, and torque values. + + Parameters: + file_path (str): Path to the data file. + + Returns: + pd.DataFrame: A DataFrame containing time, renamed joint values, + and renamed force/torque data. + """ + # Step 1: Read the header line starting with '%' + with open(file_path, "r") as file: + for line in file: + if line.startswith("%"): + header = line.strip().lstrip("%").strip().split() + break + else: + raise ValueError("No header line starting with '%' found in the file.") + + # Step 2: Load the data into a DataFrame + df = pd.read_csv( + file_path, + delim_whitespace=True, # Assuming the data is space-separated + comment="%", # Skip any lines starting with '%' + names=header, # Use the extracted header + skiprows=1, # Skip the header line + ) + + # Step 3: Identify and rename joint columns + joint_pattern = re.compile(r"axisQMsr_LBR_iiwa_14_R820_1\[(\d+)\]") + joint_columns = [col for col in header if joint_pattern.match(col)] + + if len(joint_columns) != 7: + raise ValueError(f"Expected 7 joint columns, found {len(joint_columns)}.") + + # Sort joint columns based on their index and rename them to J1-J7 + joint_columns_sorted = sorted( + joint_columns, key=lambda x: int(joint_pattern.match(x).group(1)) + ) + joint_rename_map = { + col: f"A{idx+1}" for idx, col in enumerate(joint_columns_sorted) + } + df.rename(columns=joint_rename_map, inplace=True) + + # Step 4: Extract and combine time columns + if "ZeitInSec" not in df.columns or "ZeitInNanoSec" not in df.columns: + raise ValueError( + "Required time columns 'ZeitInSec' and/or 'ZeitInNanoSec' are missing." + ) + + # Combine 'ZeitInSec' and 'ZeitInNanoSec' into a single 'time' column in seconds + df["time"] = df["ZeitInSec"] + df["ZeitInNanoSec"] * 1e-9 + + # Drop the original time columns as they are now combined + df.drop(columns=["ZeitInSec", "ZeitInNanoSec"], inplace=True) + + # Step 5: Extract and rename force and torque columns + force_columns = {"cartForce1_X": "Fx", "cartForce1_Y": "Fy", "cartForce1_Z": "Fz"} + torque_columns = { + "cartTorque1_TauX": "Tx", + "cartTorque1_TauY": "Ty", + "cartTorque1_TauZ": "Tz", + } + + # Verify that the required force and torque columns exist + missing_force_columns = [ + col for col in force_columns.keys() if col not in df.columns + ] + missing_torque_columns = [ + col for col in torque_columns.keys() if col not in df.columns + ] + missing_columns = missing_force_columns + missing_torque_columns + if missing_columns: + raise ValueError( + f"The following required columns are missing in the data: {missing_columns}" + ) + + # Rename force and torque columns + df.rename(columns=force_columns, inplace=True) + df.rename(columns=torque_columns, inplace=True) + + # Step 6: Select the relevant columns + selected_columns = ( + ["time"] + + list(joint_rename_map.values()) + + list(force_columns.values()) + + list(torque_columns.values()) + ) + df_selected = df[selected_columns] + + return df_selected + + +def main(): + + # with open('config.json', 'r') as config_file: + # config = json.load(config_file) + + # log_folder_path = config['log_folder_path'] + # save_df_path = config['processed_csv_folder_path'] + log_folder_path = os.path.join(MAIN_DIR, "11_dec_logs") + save_df_path = os.path.join(MAIN_DIR, "processed_data") + os.makedirs(save_df_path, exist_ok=True) + + log_files = glob.glob(os.path.join(log_folder_path, "*.log")) + + for file_path in log_files: + print(file_path) + returned_df = parse_robot_data(file_path=file_path) + save_file_path = os.path.join( + save_df_path, os.path.splitext(os.path.basename(file_path))[0] + ".csv" + ) + returned_df.to_csv(save_file_path, index=False) + + +if __name__ == "__main__": + main() diff --git a/data_collector/package.xml b/data_collector/package.xml new file mode 100644 index 00000000..494d0fa0 --- /dev/null +++ b/data_collector/package.xml @@ -0,0 +1,38 @@ + + + + data_collector + 0.0.0 + TODO: Package description + nisara + Apache-2.0 + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + sensor_msgs + trajectory_msgs + control_msgs + geometry_msgs + message_filters + numpy + pandas + cv2 + cv_bridge + + + + ament_python + + diff --git a/data_collector/resource/data_collector b/data_collector/resource/data_collector new file mode 100644 index 00000000..e69de29b diff --git a/data_collector/setup.cfg b/data_collector/setup.cfg new file mode 100644 index 00000000..f0a7afc8 --- /dev/null +++ b/data_collector/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/data_collector +[install] +install_scripts=$base/lib/data_collector diff --git a/data_collector/setup.py b/data_collector/setup.py new file mode 100644 index 00000000..43107e8c --- /dev/null +++ b/data_collector/setup.py @@ -0,0 +1,27 @@ +from setuptools import find_packages, setup + +package_name = 'data_collector' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools', 'pandas', 'numpy'], + zip_safe=True, + maintainer='nisara', + maintainer_email='sarawgi.nikita@gmail.com', + description='TODO: Package description', + license='Apache-2.0', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'kuka_data_recorder = data_collector.kuka_data_recorder:main', + 'joint_cartesian_converter = data_collector.joint_cartesian_converter:main' + ], + }, +) diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 746368ee..07c6f177 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -1,6 +1,12 @@ #!/usr/bin/env python3 import sys -sys.path.append("/home/cam/miniconda3/envs/serl-rros/lib/python3.10/site-packages") +import os +import getpass +username = getpass.getuser() +env = "serl" +print("Ensure to change anaconda3 or miniconda3 and change your environment name") +print("Conda Environment name is: ", env) +sys.path.append("/home/"+username+"/anaconda3/envs/"+env+"/lib/python3.10/site-packages") import rclpy import rclpy.duration from rclpy.executors import MultiThreadedExecutor @@ -53,13 +59,15 @@ # pass FLAGS = flags.FLAGS -flags.DEFINE_string("env", "FrankaEnv-Vision-v0", "Name of environment.") +flags.DEFINE_string("env", "KukaPegInsert-Vision-v0", "Name of environment.") flags.DEFINE_string("agent", "drq", "Name of agent.") flags.DEFINE_string("exp_name", None, "Name of the experiment for wandb logging.") flags.DEFINE_integer("max_traj_length", 100, "Maximum length of trajectory.") flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_bool("save_model", False, "Whether to save model.") flags.DEFINE_integer("critic_actor_ratio", 4, "critic to actor update ratio.") +flags.DEFINE_boolean("load_checkpoint", False, "Whether to start from previous checkpoint or not.") +flags.DEFINE_string("load_checkpoint_path", None, "Checkpoint to start training from.") flags.DEFINE_integer("batch_size", 256, "Batch size for training the policy.") flags.DEFINE_integer("max_steps", 1000000, "Maximum number of training steps.") @@ -71,9 +79,10 @@ flags.DEFINE_integer("log_period", 10, "Logging period.") flags.DEFINE_integer("eval_period", 2000, "Evaluation period.") +flags.DEFINE_integer("loaded_checkpoint_step", 1000, "Loaded Checkpoint Step.") # flag to indicate if this is a leaner or a actor -flags.DEFINE_boolean("learner", True, "Is this a learner or a trainer.") +flags.DEFINE_boolean("learner", False, "Is this a learner or a trainer.") flags.DEFINE_boolean("actor", False, "Is this a learner or a trainer.") flags.DEFINE_boolean("render", False, "Render the environment.") flags.DEFINE_string("ip", "localhost", "IP address of the learner.") @@ -120,7 +129,7 @@ def actor(agent: DrQAgent, data_store, env, sampling_rng): ) agent = agent.replace(state=ckpt) - for episode in range(FLAGS.eval_n_trajs): + for episode in tqdm.tqdm(range(FLAGS.eval_n_trajs), "Evaluation Trajectory"): obs, _ = env.reset() done = False start_time = time.time() @@ -232,12 +241,12 @@ def learner(rng, agent: DrQAgent, replay_buffer): The learner loop, which runs when "--learner" is set to True. """ # set up wandb and logging - wandb_logger = None - # wandb_logger = make_wandb_logger( - # project="serl_dev", - # description=FLAGS.exp_name or FLAGS.env, - # debug=FLAGS.debug, - # ) + # wandb_logger = None + wandb_logger = make_wandb_logger( + project="serl_testing", + description=FLAGS.exp_name or FLAGS.env, + debug=FLAGS.debug, + ) # To track the step in the training loop update_steps = 0 @@ -339,6 +348,7 @@ def main(_): executor.add_node(robot_interface_node) robot_interface_node.get_logger().info("Robot interface node started.") # executor.spin_once() + assert FLAGS.batch_size % num_devices == 0 # seed rng = jax.random.PRNGKey(FLAGS.seed) @@ -373,6 +383,16 @@ def main(_): image_keys=image_keys, encoder_type=FLAGS.encoder_type, ) + + if(FLAGS.load_checkpoint): + print(f"Loading Checkpoint from Previous Run:{FLAGS.load_checkpoint_path}") + + ckpt = checkpoints.restore_checkpoint( + FLAGS.load_checkpoint_path, + agent.state, + step=FLAGS.loaded_checkpoint_step, + ) + agent = agent.replace(state=ckpt) print("Agent created") # replicate agent across devices @@ -392,19 +412,20 @@ def main(_): image_keys=image_keys, ) print("Replay buffer initialized") - # demo_buffer = MemoryEfficientReplayBufferDataStore( - # env.observation_space, - # env.action_space, - # capacity=10000, - # image_keys=image_keys, - # ) - # import pickle as pkl - - # with open(FLAGS.demo_path, "rb") as f: - # trajs = pkl.load(f) - # for traj in trajs: - # demo_buffer.insert(traj) - # print(f"demo buffer size: {len(demo_buffer)}") + print("Checkpoint Path: ", FLAGS.checkpoint_path) + demo_buffer = MemoryEfficientReplayBufferDataStore( + env.observation_space, + env.action_space, + capacity=10000, + image_keys=image_keys, + ) + import pickle as pkl + + with open(FLAGS.demo_path, "rb") as f: + trajs = pkl.load(f) + for traj in trajs: + demo_buffer.insert(traj) + print(f"demo buffer size: {len(demo_buffer)}") # learner loop print_green("starting learner loop") diff --git a/examples/async_peg_insert_drq/run_actor.sh b/examples/async_peg_insert_drq/run_actor.sh index 3850c8c0..230473ba 100644 --- a/examples/async_peg_insert_drq/run_actor.sh +++ b/examples/async_peg_insert_drq/run_actor.sh @@ -1,7 +1,8 @@ export XLA_PYTHON_CLIENT_PREALLOCATE=false && \ export XLA_PYTHON_CLIENT_MEM_FRACTION=.1 && \ +export XLA_PYTHON_CLIENT_ALLOCATOR=platform &&\ python3 async_drq_randomized.py "$@" \ - --actor \ + --actor 1\ --render \ --env KukaPegInsert-Vision-v0 \ --exp_name=serl_dev_drq_rlpd10demos_peg_insert_random_resnet \ @@ -9,4 +10,10 @@ python3 async_drq_randomized.py "$@" \ --random_steps 0 \ --training_starts 200 \ --encoder_type resnet-pretrained \ - --demo_path peg_insert_20_demos_2023-12-25_16-13-25.pkl \ + --demo_path /home/omey/nisara/expert_data_collector/processed_replay/processed_replay/2013-01-01_01-09-43/recorded_data.pkl \ + --eval_checkpoint_step 0 \ + --loaded_checkpoint_step 1000 \ + --eval_n_trajs 4 \ + --load_checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_12 \ + # --load_checkpoint 0 \ + # --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints/ diff --git a/examples/async_peg_insert_drq/run_learner.sh b/examples/async_peg_insert_drq/run_learner.sh index a1516d13..942cae1f 100644 --- a/examples/async_peg_insert_drq/run_learner.sh +++ b/examples/async_peg_insert_drq/run_learner.sh @@ -1,7 +1,8 @@ export XLA_PYTHON_CLIENT_PREALLOCATE=false && \ export XLA_PYTHON_CLIENT_MEM_FRACTION=.2 && \ +export XLA_PYTHON_CLIENT_ALLOCATOR=platform &&\ python3 async_drq_randomized.py "$@" \ - --learner \ + --learner 1\ --env KukaPegInsert-Vision-v0 \ --exp_name=serl_dev_drq_rlpd10demos_peg_insert_random_resnet_097 \ --seed 0 \ @@ -11,6 +12,10 @@ python3 async_drq_randomized.py "$@" \ --batch_size 256 \ --eval_period 2000 \ --encoder_type resnet-pretrained \ - --demo_path peg_insert_20_demos_2023-12-25_16-13-25.pkl \ + --demo_path /home/omey/nisara/expert_data_collector/processed_replay/processed_replay/2013-01-01_01-09-43/recorded_data.pkl \ --checkpoint_period 1000 \ - --checkpoint_path /home/undergrad/code/serl_dev/examples/async_peg_insert_drq/5x5_20degs_20demos_rand_peg_insert_097 + --loaded_checkpoint_step 1000 \ + --load_checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_12/ \ + --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints/ + # --load_checkpoint 0 \ + diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index 4a119b2a..3a95c3c6 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -17,6 +17,7 @@ from control_msgs.action import FollowJointTrajectory import os import sys +import socket from moveit_msgs.msg import ( RobotState, RobotTrajectory, @@ -34,6 +35,7 @@ import copy import optas + class RobotInterfaceNode(Node): timeout_sec_ = 5.0 @@ -49,7 +51,8 @@ class RobotInterfaceNode(Node): fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" WRENCH_TOPIC = "/lbr/force_torque_broadcaster/wrench" robot_desc_topic_ = "robot_description" - + port_id = 30200 + robot_ip = "192.168.10.122" base_ = "link_0" ###Changed for compatibility with latest lbr stack end_effector_ = "link_ee" @@ -64,31 +67,42 @@ def __init__(self) -> None: self.robot_state = {} self.wrench_sub = self.create_subscription( - WrenchStamped, - self.WRENCH_TOPIC, - self.wrench_callback, - 10 + WrenchStamped, self.WRENCH_TOPIC, self.wrench_callback, 10 + ) + + self.state_sub = self.create_subscription( + LBRState, self.lbr_state_topic_, self.state_callback, 100 ) self._init_jacobian() + self.session_ok = True - self.ik_client_ = self.create_client(GetPositionIK, self.ik_srv_name_, callback_group=self.ik_client_callback) + self.ik_client_ = self.create_client( + GetPositionIK, self.ik_srv_name_, callback_group=self.ik_client_callback + ) if not self.ik_client_.wait_for_service(timeout_sec=self.timeout_sec_): self.get_logger().error("IK service not available.") exit(1) - self.fk_client_ = self.create_client(GetPositionFK, self.fk_srv_name_, callback_group=self.fk_client_callback) + self.fk_client_ = self.create_client( + GetPositionFK, self.fk_srv_name_, callback_group=self.fk_client_callback + ) if not self.fk_client_.wait_for_service(timeout_sec=self.timeout_sec_): self.get_logger().error("FK service not available.") exit(1) - self.plan_client_ = self.create_client(GetMotionPlan, self.plan_srv_name_, callback_group=self.plan_client_callback) + self.plan_client_ = self.create_client( + GetMotionPlan, self.plan_srv_name_, callback_group=self.plan_client_callback + ) if not self.plan_client_.wait_for_service(timeout_sec=self.timeout_sec_): self.get_logger().error("Plan service not available.") exit(1) self.execute_client_ = ActionClient( - self, ExecuteTrajectory, self.execute_action_name_, callback_group=self.execute_client_callback + self, + ExecuteTrajectory, + self.execute_action_name_, + callback_group=self.execute_client_callback, ) if not self.execute_client_.wait_for_server(timeout_sec=self.timeout_sec_): self.get_logger().error("Execute action not available.") @@ -104,6 +118,18 @@ def __init__(self) -> None: self.get_logger().error("FRI Execute action not available.") exit(1) + def state_callback(self, state: LBRState): + self.session_state = state.session_state + external_torque = state.external_torque + # if(np.any(np.abs(external_torque)>10) or self.session_state!=4): + if self.session_state != 4: + self.get_logger().info( + f"Torque Exceeded or State Change to {self.session_state}" + ) + self.session_ok = False + + return + def get_ik(self, target_pose: Pose) -> JointState | None: request = GetPositionIK.Request() @@ -126,18 +152,17 @@ def get_ik(self, target_pose: Pose) -> JointState | None: return None return response.solution.joint_state - + def wrench_callback(self, msg): self.latest_wrench = convert_wrench_to_numpy(msg) # self.get_logger().info(f"Received wrench data: {self.latest_wrench}") return - - + def _init_jacobian(self): print("Initializing Jacobian") self._robot_description = self._retrieve_parameter( - "robot_state_publisher/get_parameters", "robot_description" + "/lbr/robot_state_publisher/get_parameters", "robot_description" ).string_value self._robot = optas.RobotModel( urdf_string=self._robot_description, time_derivs=[0, 1] @@ -148,6 +173,7 @@ def _init_jacobian(self): ) return + def get_fk(self) -> Pose | None: current_joint_state = self.get_joint_state() if current_joint_state is None: @@ -158,7 +184,6 @@ def get_fk(self) -> Pose | None: current_robot_state.joint_state = current_joint_state request = GetPositionFK.Request() - self.get_logger().info(f"{self.namespace_}/{self.base_}") request.header.frame_id = f"{self.namespace_}/{self.base_}" request.header.stamp = self.get_clock().now().to_msg() @@ -171,22 +196,24 @@ def get_fk(self) -> Pose | None: if future.result() is None: self.get_logger().error("Failed to get FK solution") return None - + response = future.result() if response.error_code.val != MoveItErrorCodes.SUCCESS: self.get_logger().error( f"Failed to get FK solution: {response.error_code.val}" ) return None - + return response.pose_stamped[0].pose, current_joint_state - def get_fk_lbr(self, commanded:bool = False) -> Pose | None: - lbr_state_set, lbr_state = wait_for_message(LBRState, self, self.lbr_state_topic_) + def get_fk_lbr(self, commanded: bool = False) -> Pose | None: + lbr_state_set, lbr_state = wait_for_message( + LBRState, self, self.lbr_state_topic_ + ) joint_position = lbr_state.measured_joint_position.tolist() if commanded: joint_position = lbr_state.commanded_joint_position.tolist() - + joint_position[2], joint_position[3] = joint_position[3], joint_position[2] current_robot_state = RobotState() @@ -207,14 +234,14 @@ def get_fk_lbr(self, commanded:bool = False) -> Pose | None: if future.result() is None: self.get_logger().error("Failed to get FK solution") return None - + response = future.result() if response.error_code.val != MoveItErrorCodes.SUCCESS: self.get_logger().error( f"Failed to get FK solution: {response.error_code.val}" ) return None - + return response.pose_stamped[0].pose def sum_of_square_diff( @@ -223,7 +250,7 @@ def sum_of_square_diff( return np.sum( np.square(np.subtract(joint_state_1.position, joint_state_2.position)) ) - + def _retrieve_parameter(self, service: str, parameter_name: str) -> ParameterValue: parameter_client = self.create_client(GetParameters, service) while not parameter_client.wait_for_service(timeout_sec=1.0): @@ -239,12 +266,11 @@ def _retrieve_parameter(self, service: str, parameter_name: str) -> ParameterVal if future.result() is None: self.get_logger().error(f"Failed to retrieve '{parameter_name}'.") return None - self.get_logger().info(f"Received '{parameter_name}' from '{service}'.") return future.result().values[0] def get_current_state(self): ##Return a dictionary with all the corresponding state variables - #Pose, Velocity, Force, Torque, Jacobian, Joint Angles, Joint Velocity, Gripper Pose + # Pose, Velocity, Force, Torque, Jacobian, Joint Angles, Joint Velocity, Gripper Pose self.robot_state["pose"] = np.zeros((7,)) self.robot_state["vel"] = np.zeros((6,)) self.robot_state["force"] = np.zeros((3,)) @@ -254,31 +280,71 @@ def get_current_state(self): ##TCP pose current_ee_geom_pose, current_joint_state = self.get_fk() - self.robot_state["pose"] = np.array([current_ee_geom_pose.position.x,current_ee_geom_pose.position.y,current_ee_geom_pose.position.z, - current_ee_geom_pose.orientation.x,current_ee_geom_pose.orientation.y,current_ee_geom_pose.orientation.z,current_ee_geom_pose.orientation.w]) - + self.robot_state["pose"] = np.array( + [ + current_ee_geom_pose.position.x, + current_ee_geom_pose.position.y, + current_ee_geom_pose.position.z, + current_ee_geom_pose.orientation.x, + current_ee_geom_pose.orientation.y, + current_ee_geom_pose.orientation.z, + current_ee_geom_pose.orientation.w, + ] + ) + ##Getting Joint Angles - joint_angles = current_joint_state.position + joint_angles = [ + current_joint_state.position[0], + current_joint_state.position[1], + current_joint_state.position[3], + current_joint_state.position[2], + current_joint_state.position[4], + current_joint_state.position[5], + current_joint_state.position[6], + ] + self.robot_state["q"] = joint_angles - joint_velocity = current_joint_state.velocity + joint_velocity = [ + current_joint_state.velocity[0], + current_joint_state.velocity[1], + current_joint_state.velocity[3], + current_joint_state.velocity[2], + current_joint_state.velocity[4], + current_joint_state.velocity[5], + current_joint_state.velocity[6], + ] + self.robot_state["dq"] = joint_velocity ##Getting Force and Wrench current_wrench = copy.deepcopy(self.latest_wrench) self.robot_state["force"] = current_wrench[:3] self.robot_state["torque"] = current_wrench[3:] - - self.robot_state["jacobian"] = self.jacobian_func(joint_angles) - - self.robot_state["vel"] = np.dot(self.robot_state["jacobian"],joint_velocity) - + self.robot_state["vel"] = np.dot(self.robot_state["jacobian"], joint_velocity) return self.robot_state - def move_to_pose(self, pose:np.ndarray): - print("Moving to Pose: ", pose) + def is_socket_open(self, host, port): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(1) # Timeout after 1 second + try: + sock.connect((host, port)) + return True # If the connection succeeds, the socket is open + except (socket.timeout, socket.error): + return False # If there’s an error, the socket is likely closed + finally: + sock.close() # Always close the socket after checking + + def move_to_pose(self, pose: np.ndarray, reset_pose: np.ndarray): + + if not self.session_ok: + print("ERROR in LBR FRI Restart the node") + input("Press Enter When Done Resetting") + input("Are you sure everything is good? Press Enter Again") + self.session_ok = True + target_pose = Pose() - + target_pose.position.x = float(pose[0]) target_pose.position.y = float(pose[1]) target_pose.position.z = float(pose[2]) @@ -289,8 +355,7 @@ def move_to_pose(self, pose:np.ndarray): target_pose.orientation.z = float(pose[5]) target_pose.orientation.w = float(pose[6]) - - traj = self.get_motion_plan(target_pose, True) + traj = self.get_motion_plan(target_pose, True, scaling_factor=0.04) if traj: client = self.get_motion_execute_client() goal = ExecuteTrajectory.Goal() @@ -298,14 +363,13 @@ def move_to_pose(self, pose:np.ndarray): future = client.send_goal_async(goal) rclpy.spin_until_future_complete(self, future) - + goal_handle = future.result() if not goal_handle.accepted: self.get_logger().error("Failed to execute trajectory") else: self.get_logger().info("Trajectory accepted") - result_future = goal_handle.get_result_async() expect_duration = traj.joint_trajectory.points[-1].time_from_start @@ -314,14 +378,9 @@ def move_to_pose(self, pose:np.ndarray): time.sleep(0.01) self.get_logger().info("Trajectory executed") - - self.get_logger().info("Current pose: " + str(self.get_fk()[0]) ) - return - - def get_best_ik(self, target_pose: Pose, attempts: int = 100) -> JointState | None: current_joint_state = self.get_joint_state() if current_joint_state is None: @@ -353,11 +412,15 @@ def get_joint_state(self) -> JointState: if not current_joint_state_set: self.get_logger().error("Failed to get current joint state") return None - + return current_joint_state def get_motion_plan( - self, target_pose: Pose, linear: bool = False, scaling_factor: float = .1, attempts: int = 10 + self, + target_pose: Pose, + linear: bool = False, + scaling_factor: float = 0.1, + attempts: int = 10, ) -> RobotTrajectory | None: current_pose = self.get_fk()[0] if current_pose is None: @@ -420,12 +483,12 @@ def get_motion_plan( ) else: return response.motion_plan_response.trajectory - + return None def get_motion_execute_client(self) -> ActionClient: return self.execute_client_ - + def get_fri_motion_execute(self, traj: RobotTrajectory, short_wait=False) -> bool: joint_trajectory_goal = FollowJointTrajectory.Goal() joint_trajectory_goal.trajectory = traj.joint_trajectory @@ -443,11 +506,21 @@ def get_fri_motion_execute(self, traj: RobotTrajectory, short_wait=False) -> boo execution_finished = False expected_timout = time.time() + 30 - if short_wait: - expected_timout = time.time() + 2.0 + if short_wait: + expected_timout = time.time() + 2.0 while not execution_finished and time.time() < expected_timout: _, lbr_state = wait_for_message(LBRState, self, self.lbr_state_topic_) - if np.max(np.abs(np.subtract(lbr_state.measured_joint_position, traj.joint_trajectory.points[-1].positions))) < 0.0002: + if ( + np.max( + np.abs( + np.subtract( + lbr_state.measured_joint_position, + traj.joint_trajectory.points[-1].positions, + ) + ) + ) + < 0.0002 + ): execution_finished = True time.sleep(0.01) diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index dcfed4fa..37ff6a55 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -1,9 +1,12 @@ """Gym Interface for Franka""" import sys import time -time.sleep(2) -print("MAKE SURE TO CHANGE THE PATHS") -sys.path.append("/home/cam/miniconda3/envs/serl-rros/lib/python3.10/site-packages") +import getpass +username = getpass.getuser() +env = "serl" +print("Ensure to change anaconda3 or miniconda3 and change your environment name") +print("Conda Environment name is: ", env) +sys.path.append("/home/"+username+"/anaconda3/envs/"+env+"/lib/python3.10/site-packages") import numpy as np import gym @@ -205,7 +208,7 @@ def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: def step(self, action: np.ndarray) -> tuple: """standard gym step function.""" print("In step function") - time.sleep(2) + start_time = time.time() action = np.clip(action, self.action_space.low, self.action_space.high) xyz_delta = action[:3] @@ -243,14 +246,14 @@ def step(self, action: np.ndarray) -> tuple: def compute_reward(self, obs, gripper_action_effective = None) -> bool: """We are using a sparse reward function.""" current_pose = obs["state"]["tcp_pose"] - print("In Reward function") - time.sleep(2) + # convert from quat to euler first euler_angles = quat_2_euler(current_pose[3:]) euler_angles = np.abs(euler_angles) current_pose = np.hstack([current_pose[:3], euler_angles]) delta = np.abs(current_pose - self._TARGET_POSE) if np.all(delta < self._REWARD_THRESHOLD): + print("Received reward 1!!!!") reward = 1 else: # print(f'Goal not reached, the difference is {delta}, the desired threshold is {_REWARD_THRESHOLD}') @@ -264,9 +267,9 @@ def compute_reward(self, obs, gripper_action_effective = None) -> bool: def crop_image(self, name, image) -> np.ndarray: """Crop realsense images to be a square.""" if name == "wrist_1": - return image[:, 80:560, :] + return image[:, 80:650, :] elif name == "wrist_2": - return image[:, 80:560, :] + return image[:, 80:650, :] else: return ValueError(f"Camera {name} not recognized in cropping") @@ -316,9 +319,7 @@ def go_to_rest(self, joint_reset=False): """ # Change to precision mode for reset # requests.post(self.url + "update_param", json=self.config.PRECISION_PARAM) - print("In Go to rest") - time.sleep(2) - + # Perform Carteasian reset if self.randomreset: # randomize reset position in xy plane reset_pose = self.resetpos.copy() @@ -330,10 +331,10 @@ def go_to_rest(self, joint_reset=False): -self.random_rz_range, self.random_rz_range ) reset_pose[3:] = euler_2_quat(euler_random) - self.interpolate_move(reset_pose, timeout=1.5) + self._send_pos_command(reset_pose) else: reset_pose = self.resetpos.copy() - self.interpolate_move(reset_pose, timeout=1.5) + self._send_pos_command(reset_pose) # Change to compliance mode # requests.post(self.url + "update_param", json=self.config.COMPLIANCE_PARAM) @@ -350,7 +351,6 @@ def reset(self, joint_reset=False, **kwargs): joint_reset = True self.go_to_rest(joint_reset=joint_reset) - self._recover() self.curr_path_length = 0 self._update_currpos() @@ -402,9 +402,8 @@ def _recover(self): def _send_pos_command(self, pos: np.ndarray): """Internal function to send position command to the robot.""" - self._recover() arr = np.array(pos).astype(np.float32) - self.robot_interface_node.move_to_pose(arr) + self.robot_interface_node.move_to_pose(arr, self.resetpos) print("Done moving the robot") def _send_gripper_command(self, pos: float, mode="binary"): @@ -436,7 +435,6 @@ def _update_currpos(self): Internal function to get the latest state of the robot and its gripper. """ ps = self.robot_interface_node.get_current_state() - print("Ps: ", ps) self.currpos[:] = np.array(ps["pose"], dtype=np.float32) self.currvel[:] = np.array(ps["vel"], dtype=np.float32) diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index 196b9ef0..ca306a78 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -12,16 +12,16 @@ class PegEnvConfig(DefaultEnvConfig): } TARGET_POSE = np.array( [ - 0.5906439143742067, - 0.07771711953459341, - 0.337835826958042, - 3.1099675, - 0.0146619, - -0.0078615, + 0.69243, + -0.01238, + 0.2242, + np.deg2rad(179.03), + np.deg2rad(1.41), + np.deg2rad(176.33), ] ) RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.2, 0.0, 0.0, 0.0]) - REWARD_THRESHOLD: np.ndarray = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) + REWARD_THRESHOLD: np.ndarray = np.array([0.002, 0.002, 0.006, np.deg2rad(2), np.deg2rad(2), np.deg2rad(2)]) APPLY_GRIPPER_PENALTY = False ACTION_SCALE = np.array([0.02, 0.1, 1]) RANDOM_RESET = False @@ -42,7 +42,7 @@ class PegEnvConfig(DefaultEnvConfig): [ TARGET_POSE[0] + RANDOM_XY_RANGE, TARGET_POSE[1] + RANDOM_XY_RANGE, - TARGET_POSE[2] + 0.1, + TARGET_POSE[2] + 0.05, TARGET_POSE[3] + 0.01, TARGET_POSE[4] + 0.01, TARGET_POSE[5] + RANDOM_RZ_RANGE, diff --git a/serl_robot_infra/kuka_env/envs/relative_env.py b/serl_robot_infra/kuka_env/envs/relative_env.py index c8857e4d..bec6b120 100644 --- a/serl_robot_infra/kuka_env/envs/relative_env.py +++ b/serl_robot_infra/kuka_env/envs/relative_env.py @@ -40,9 +40,6 @@ def __init__(self, env: Env, include_relative_pose=True): def step(self, action: np.ndarray): # action is assumed to be (x, y, z, rx, ry, rz, gripper) # Transform action from end-effector frame to base frame - import time - print("sleeping for 2 seconds in step") - time.sleep(2) transformed_action = self.transform_action(action) obs, reward, done, truncated, info = self.env.step(transformed_action) From 8182483ad5eddfbb8286741fb4f594bc72c41004 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Fri, 7 Feb 2025 12:45:04 -0800 Subject: [PATCH 06/24] Removing the stale data_collector folder --- data_collector/LICENSE | 202 -------------- data_collector/data_collector/__init__.py | 0 .../data_collector/arduino-python.py | 41 --- data_collector/data_collector/config.json | 5 - data_collector/data_collector/create_pkl.py | 161 ------------ .../joint_cartesian_converter.py | 244 ----------------- .../data_collector/kuka_data_recorder.py | 248 ------------------ .../data_collector/parse_robot_log.py | 128 --------- data_collector/package.xml | 38 --- data_collector/resource/data_collector | 0 data_collector/setup.cfg | 4 - data_collector/setup.py | 27 -- 12 files changed, 1098 deletions(-) delete mode 100644 data_collector/LICENSE delete mode 100644 data_collector/data_collector/__init__.py delete mode 100644 data_collector/data_collector/arduino-python.py delete mode 100644 data_collector/data_collector/config.json delete mode 100644 data_collector/data_collector/create_pkl.py delete mode 100644 data_collector/data_collector/joint_cartesian_converter.py delete mode 100644 data_collector/data_collector/kuka_data_recorder.py delete mode 100644 data_collector/data_collector/parse_robot_log.py delete mode 100644 data_collector/package.xml delete mode 100644 data_collector/resource/data_collector delete mode 100644 data_collector/setup.cfg delete mode 100644 data_collector/setup.py diff --git a/data_collector/LICENSE b/data_collector/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/data_collector/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/data_collector/data_collector/__init__.py b/data_collector/data_collector/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/data_collector/data_collector/arduino-python.py b/data_collector/data_collector/arduino-python.py deleted file mode 100644 index 9947c7db..00000000 --- a/data_collector/data_collector/arduino-python.py +++ /dev/null @@ -1,41 +0,0 @@ -###################### -import serial -import time -import subprocess -import signal -import sys - -# Use the correct serial port based on what you found, either '/dev/ttyACM0' or '/dev/ttyUSB0' -arduino = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=.1) - -def send_command(command): - arduino.write(command.encode()) # Send the command to the Arduino - time.sleep(0.05) # Small delay to ensure the command is sent properly - -def start_two_cameras_script(): - # Launch the two_cameras.py script - subprocess.Popen(["python", "/home/lm-2023/jeon_team_ws/control_kuka/lbr_fri_ws-dev-socket/data_collection/data_collection/two_cameras_live.py"]) - -def signal_handler(sig, frame): - print("Signal received, sending 'X' to microcontroller...") - send_command('X') # Send the 'X' command when the process is terminated - sys.exit(0) # Exit the program - -# Register the signal handler for termination signals -signal.signal(signal.SIGINT, signal_handler) # Handles Ctrl+C -signal.signal(signal.SIGTERM, signal_handler) # Handles termination signal - -# Example usage -while True: - command = input("Enter command (X: Press Both Buttons, Y: Release Both Buttons and Start two_cameras.py): ").strip().upper() - - if command == 'X': - send_command('X') # Press both buttonsy - - elif command == 'Y': - send_command('Y') # Release both buttons - #start_two_cameras_script() # Start the two_cameras.py script - - else: - print("Invalid command. Please enter X or Y.") - diff --git a/data_collector/data_collector/config.json b/data_collector/data_collector/config.json deleted file mode 100644 index 6f95cbb0..00000000 --- a/data_collector/data_collector/config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "log_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay", - "processed_csv_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay/processed", - "replay_csv_name": "2013-01-01_01-10-52.csv" -} \ No newline at end of file diff --git a/data_collector/data_collector/create_pkl.py b/data_collector/data_collector/create_pkl.py deleted file mode 100644 index 05b211c8..00000000 --- a/data_collector/data_collector/create_pkl.py +++ /dev/null @@ -1,161 +0,0 @@ -## Format of the pkl file -# The pkl file is a dictionary with the following keys: -# dict( -# observations=obs, -# actions=actions, -# next_observations=next_obs, -# rewards=rew, -# masks=1.0 - done, -# dones=done, -# ) -# where -# observations : { -# "state": gym.spaces.Box(-np.inf, np.inf, shape=(7 + 6 + 3 + 3,)), -# "wrist_1": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), -# "wrist_2": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), -# } -# actions = gym.spaces.Box( -# np.ones((7,), dtype=np.float32) * -1, -# np.ones((7,), dtype=np.float32), -# ) -# state : concatenation of tcp_poses (7), tcp_velocities (6), tcp_forces (3), tcp_torques (3) -# image names are in the format {timestamp}_A.png and {timestamp}_B.png - -import numpy as np -import pandas as pd -import pickle -import os -import json -from PIL import Image - -TARGET_POSE = np.array([ - 0.5906439143742067, - 0.07771711953459341, - 0.337835826958042, - 3.1099675, - 0.0146619, - -0.0078615, -]) - -REWARD_THRESHOLD = np.array([0.01, 0.01, 0.01, 0.2, 0.2, 0.2]) - -def convertToPickle(recorded_data_file_path, imageA_folder_path, imageB_folder_path): - - recorded_data = pd.read_csv(recorded_data_file_path) - - transitions = [] - - observations = [] - actions = [] - next_observations = [] - rewards = [] - masks = [] - dones = [] - - done = 0.0 - - for i in range(len(recorded_data) - 1): - - current_row = recorded_data.iloc[i] - next_row = recorded_data.iloc[i + 1] - - curr_time = current_row["timestamp"] - next_time = next_row["timestamp"] - - img_size = (128, 128) - - curr_wrist_1_image = Image.open(os.path.join(imageA_folder_path, f"{curr_time}_A.jpg")).resize(img_size) - curr_wrist_2_image = Image.open(os.path.join(imageB_folder_path, f"{curr_time}_B.jpg")).resize(img_size) - - next_wrist_1_image = Image.open(os.path.join(imageA_folder_path, f"{next_time}_A.jpg")).resize(img_size) - next_wrist_2_image = Image.open(os.path.join(imageB_folder_path, f"{next_time}_B.jpg")).resize(img_size) - - - # print(np.array(curr_wrist_1_image).shape) - observation = { - "state": np.concatenate([ - current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values, - current_row[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, - current_row[['Fx', 'Fy', 'Fz']].values, - current_row[['Tx', 'Ty', 'Tz']].values - ]), - "wrist_1": np.array(curr_wrist_1_image), - "wrist_2": np.array(curr_wrist_2_image) - } - - action = next_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - current_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - - next_observation = { - "state": np.concatenate([ - next_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values, - next_row[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, - next_row[['Fx', 'Fy', 'Fz']].values, - next_row[['Tx', 'Ty', 'Tz']].values - ]), - "wrist_1": np.array(next_wrist_1_image), - "wrist_2": np.array(next_wrist_2_image) - } - - if done != 1.0: - tcp_pose = current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values - if np.all(np.abs(tcp_pose - TARGET_POSE) <= REWARD_THRESHOLD): - reward = 1.0 - done = 1.0 - else: - reward = 0.0 - else: - reward = 0.0 - - - data_dict = { - "observations": observation, - "actions": action, - "next_observations": next_observation, - "rewards": reward, - "masks": 1.0 - done, - "dones": done - } - - transitions.append(data_dict) - - # observations.append(observation) - # actions.append(action) - # next_observations.append(next_observation) - # rewards.append(reward) - # masks.append(1.0 - done) - # dones.append(done) - - curr_wrist_1_image.close() - curr_wrist_2_image.close() - next_wrist_1_image.close() - next_wrist_2_image.close() - - return transitions - -def main(): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - processed_folder_path = config['processed_csv_folder_path'] - output_pkl_file_path = os.path.join(processed_folder_path, 'recorded_data.pkl') - all_transitions = [] - - for folder_name in os.listdir(processed_folder_path): - folder_path = os.path.join(processed_folder_path, folder_name) - if os.path.isdir(folder_path): - recorded_data_file_path = os.path.join(folder_path, 'recorded_data.csv') - imageA_folder_path = os.path.join(folder_path, 'imageA') - imageB_folder_path = os.path.join(folder_path, 'imageB') - if os.path.exists(recorded_data_file_path) and os.path.exists(imageA_folder_path) and os.path.exists(imageB_folder_path): - transitions = convertToPickle(recorded_data_file_path, imageA_folder_path, imageB_folder_path) - all_transitions.extend(transitions) - - with open(output_pkl_file_path, 'wb') as f: - pickle.dump(all_transitions, f) - - print(f"Data saved to {output_pkl_file_path}") - - -if __name__ == "__main__": - main() - diff --git a/data_collector/data_collector/joint_cartesian_converter.py b/data_collector/data_collector/joint_cartesian_converter.py deleted file mode 100644 index 0c2bd7f9..00000000 --- a/data_collector/data_collector/joint_cartesian_converter.py +++ /dev/null @@ -1,244 +0,0 @@ -import time -import sys - -env = "serl" -print("Ensure to change anaconda3 or miniconda3 and change your environment name") -print("Conda Environment name is: ", env) -sys.path.append( - "/home/" + "omey" + "/anaconda3/envs/" + env + "/lib/python3.10/site-packages" -) -import rclpy -import rclpy.duration -from rclpy.node import Node -from rclpy.callback_groups import MutuallyExclusiveCallbackGroup -from rcl_interfaces.msg import ParameterValue -from rcl_interfaces.srv import GetParameters - -import numpy as np -import pandas as pd -import json -import os -from scipy.spatial.transform import Rotation as R - -from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped -import rclpy.task -from sensor_msgs.msg import JointState - -from moveit_msgs.msg import ( - RobotState, - MoveItErrorCodes, -) -from moveit_msgs.srv import GetPositionFK -import optas - -import os - -MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) - - -class JointCartesianConverter(Node): - - move_group_name_ = "arm" - namespace_ = "lbr" - - joint_state_topic_ = "joint_states" - lbr_state_topic_ = "state" - pose_topic_ = "pose" - plan_srv_name_ = "plan_kinematic_path" - ik_srv_name_ = "compute_ik" - fk_srv_name_ = "compute_fk" - execute_action_name_ = "execute_trajectory" - fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" - - base_ = "link_0" - end_effector_ = "link_ee" - - def __init__(self, joint_data_file_path): - super().__init__("joint_cartesian_converter") - self._init_jacobian() - - self.joint_data_file_path = joint_data_file_path - self.fk_client_callback = MutuallyExclusiveCallbackGroup() - self.fk_client_ = self.create_client( - GetPositionFK, - f"{self.namespace_}/{self.fk_srv_name_}", - callback_group=self.fk_client_callback, - ) - if not self.fk_client_.wait_for_service(timeout_sec=5.0): - self.get_logger().error("FK service not available.") - exit(1) - - self.joint_data = pd.read_csv(self.joint_data_file_path) - - def _retrieve_parameter(self, service: str, parameter_name: str) -> ParameterValue: - parameter_client = self.create_client(GetParameters, service) - while not parameter_client.wait_for_service(timeout_sec=1.0): - if not rclpy.ok(): - self.get_logger().error( - "Interrupted while waiting for the service. Exiting." - ) - return None - self.get_logger().info(f"Waiting for '{service}' service...") - request = GetParameters.Request(names=[parameter_name]) - future = parameter_client.call_async(request=request) - rclpy.spin_until_future_complete(self, future) - if future.result() is None: - self.get_logger().error(f"Failed to retrieve '{parameter_name}'.") - return None - return future.result().values[0] - - def _init_jacobian(self): - print("Initializing Jacobian") - - self._robot_description = self._retrieve_parameter( - "/lbr/robot_state_publisher/get_parameters", "robot_description" - ).string_value - self._robot = optas.RobotModel( - urdf_string=self._robot_description, time_derivs=[0, 1] - ) - self.jacobian_func = self._robot.get_link_geometric_jacobian_function( - link=self.end_effector_, base_link=self.base_, numpy_output=True - ) - return - - def quat_2_euler(self, quat): - """calculates and returns: yaw, pitch, roll from given quaternion""" - return R.from_quat(quat).as_euler("xyz") - - def convert(self): - - # Convert each row in the dataframe to cartesian coordinates and save in the dataframe - for index, row in self.joint_data.iterrows(): - - joint_state = JointState() - joint_state.name = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] - joint_state.position = [ - row["P1"], - row["P2"], - row["P3"], - row["P4"], - row["P5"], - row["P6"], - row["P7"], - ] - joint_state.velocity = [ - row["V1"], - row["V2"], - row["V3"], - row["V4"], - row["V5"], - row["V6"], - row["V7"], - ] - joint_state.effort = [ - row["E1"], - row["E2"], - row["E3"], - row["E4"], - row["E5"], - row["E6"], - row["E7"], - ] - joint_state.header.stamp = self.get_clock().now().to_msg() - joint_state.header.frame_id = f"{self.namespace_}/{self.base_}" - current_robot_state = RobotState() - current_robot_state.joint_state = joint_state - - request = GetPositionFK.Request() - - request.header.frame_id = f"{self.namespace_}/{self.base_}" - request.header.stamp = self.get_clock().now().to_msg() - - request.fk_link_names.append(self.end_effector_) - request.robot_state = current_robot_state - - future = self.fk_client_.call_async(request) - - rclpy.spin_until_future_complete(self, future) - if future.result() is None: - self.get_logger().error("Failed to get FK solution") - return None - - response = future.result() - if response.error_code.val != MoveItErrorCodes.SUCCESS: - self.get_logger().error( - f"Failed to get FK solution: {response.error_code.val}" - ) - return None - - pose = response.pose_stamped[0].pose - euler_pose = self.quat_2_euler( - np.array( - [ - pose.orientation.x, - pose.orientation.y, - pose.orientation.z, - pose.orientation.w, - ] - ) - ) - self.joint_data.at[index, "X"] = pose.position.x - self.joint_data.at[index, "Y"] = pose.position.y - self.joint_data.at[index, "Z"] = pose.position.z - self.joint_data.at[index, "QX"] = pose.orientation.x - self.joint_data.at[index, "QY"] = pose.orientation.y - self.joint_data.at[index, "QZ"] = pose.orientation.z - self.joint_data.at[index, "QW"] = pose.orientation.w - self.joint_data.at[index, "Roll"] = euler_pose[0] - self.joint_data.at[index, "Pitch"] = euler_pose[1] - self.joint_data.at[index, "Yaw"] = euler_pose[2] - - # Compute the end_effector velocity - current_joint_angles = [ - joint_state.position[0], - joint_state.position[1], - joint_state.position[2], - joint_state.position[3], - joint_state.position[4], - joint_state.position[5], - joint_state.position[6], - ] - current_jacobian = self.jacobian_func(current_joint_angles) - - cart_vel = np.dot(current_jacobian, joint_state.velocity) - cart_wrench = np.matmul( - np.linalg.pinv(np.transpose(current_jacobian)), joint_state.effort - ) - - self.joint_data.at[index, "Vx"] = cart_vel[0] - self.joint_data.at[index, "Vy"] = cart_vel[1] - self.joint_data.at[index, "Vz"] = cart_vel[2] - self.joint_data.at[index, "Va"] = cart_vel[3] - self.joint_data.at[index, "Vb"] = cart_vel[4] - self.joint_data.at[index, "Vc"] = cart_vel[5] - - # # Overriding Wrench Vals - # self.joint_data.at[index, "Fx"] = cart_wrench[0] - # self.joint_data.at[index, "Fy"] = cart_wrench[1] - # self.joint_data.at[index, "Fz"] = cart_wrench[2] - # self.joint_data.at[index, "Tx"] = cart_wrench[3] - # self.joint_data.at[index, "Ty"] = cart_wrench[4] - # self.joint_data.at[index, "Tz"] = cart_wrench[5] - - self.joint_data.to_csv(self.joint_data_file_path, index=False) - - -def main(args=None): - rclpy.init(args=args) - - # with open('config.json', 'r') as config_file: - # config = json.load(config_file) - - # processed_folder_path = config['processed_csv_folder_path'] - # csv_name = config['replay_csv_name'] - csv_name = "2013-01-17_00-42-46.csv" - processed_folder_path = os.path.join( - MAIN_DIR, "data_collector", "data_collector", "processed_data" - ) - csv_folder = os.path.join(processed_folder_path, os.path.splitext(csv_name)[0]) - joint_data_file_path = os.path.join(csv_folder, "recorded_data.csv") - - joint_cartesian_converter = JointCartesianConverter(joint_data_file_path) - joint_cartesian_converter.convert() - - rclpy.shutdown() diff --git a/data_collector/data_collector/kuka_data_recorder.py b/data_collector/data_collector/kuka_data_recorder.py deleted file mode 100644 index 923525af..00000000 --- a/data_collector/data_collector/kuka_data_recorder.py +++ /dev/null @@ -1,248 +0,0 @@ -import rclpy -import os -import pandas as pd -import csv -import math -import json -from cv_bridge import CvBridge -import cv2 -from rclpy.node import Node -from rclpy.executors import MultiThreadedExecutor -from rclpy.action import ActionClient -from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint -from control_msgs.action import FollowJointTrajectory -from sensor_msgs.msg import Image, JointState -from geometry_msgs.msg import WrenchStamped -from message_filters import Subscriber, ApproximateTimeSynchronizer - -MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) - - -class KukaDataRecorder(Node): - def __init__(self, data_folder, joint_states_file): - super().__init__("kuka_data_recorder") - - self.data_folder = data_folder - self.joint_states_file = joint_states_file - self.save_data_folder = os.path.join( - self.data_folder, os.path.splitext(self.joint_states_file)[0] - ) - os.makedirs(self.save_data_folder, exist_ok=True) - - # Create two folders for the images - self.imageA_folder = os.path.join(self.save_data_folder, "imageA") - self.imageB_folder = os.path.join(self.save_data_folder, "imageB") - os.makedirs(self.imageA_folder, exist_ok=True) - os.makedirs(self.imageB_folder, exist_ok=True) - - # topic_callback_group = MutuallyExclusiveCallbackGroup() - # client_callback_group = MutuallyExclusiveCallbackGroup() - - # Initialize everything for trajectory replaying - self.feedback = None - self._action_client = ActionClient( - self, - FollowJointTrajectory, - "/lbr/joint_trajectory_controller/follow_joint_trajectory", - ) - self.joint_names = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] - self.joint_trajectories = self.readJoinStatesFromCsv( - os.path.join(self.data_folder, self.joint_states_file) - ) - # This can be done through goal sending - self.initializeRobotPose() - - # Create a synchronized callback for all the information - self.imageA_topic = "/camera1/camera1/color/image_raw" - self.imageB_topic = "/camera2/camera2/color/image_raw" - self.joint_state_topic = "/lbr/joint_states" - self.wrench_topic = "/lbr/force_torque_broadcaster/wrench" - - self.imageA_sub = Subscriber(self, Image, self.imageA_topic) - self.imageB_sub = Subscriber(self, Image, self.imageB_topic) - self.joint_state_sub = Subscriber(self, JointState, self.joint_state_topic) - self.wrench_sub = Subscriber(self, WrenchStamped, self.wrench_topic) - - self.synchronizer = ApproximateTimeSynchronizer( - [self.imageA_sub, self.imageB_sub, self.joint_state_sub, self.wrench_sub], - queue_size=100, - slop=0.05, # NOTE: 0.05 gives less choppy transitions in images. - ) - - self.cvbridge = CvBridge() - - self.synchronizer.registerCallback(self.recordSyncData) - self.get_logger().info("Data Synchronizer Node Initialized") - - self.dataframe = [] - - # This needs to be done through moveit planner since the points need to be interpolated to a trajectory - self.executeTrajectory() - - def recordSyncData(self, imageA_msg, imageB_msg, joint_state_msg, wrench_msg): - # The timestamp of imageA_msg will be used to name the columns - timestamp = f"{imageA_msg.header.stamp.sec}_{imageA_msg.header.stamp.nanosec}" - - self.get_logger().info(f"Recording data at timestamp {timestamp}") - - # Save the joint state - data_row = {"timestamp": timestamp} - - for i in range(7): - joint_id = joint_state_msg.name[i][-1] - data_row[f"P{joint_id}"] = joint_state_msg.position[i] - data_row[f"V{joint_id}"] = joint_state_msg.velocity[i] - data_row[f"E{joint_id}"] = joint_state_msg.effort[i] - - # Save the wrench - data_row["Fx"] = wrench_msg.wrench.force.x - data_row["Fy"] = wrench_msg.wrench.force.y - data_row["Fz"] = wrench_msg.wrench.force.z - data_row["Tx"] = wrench_msg.wrench.torque.x - data_row["Ty"] = wrench_msg.wrench.torque.y - data_row["Tz"] = wrench_msg.wrench.torque.z - - self.dataframe.append(data_row) - - # Save the images - imageA_filename = os.path.join(self.imageA_folder, f"{timestamp}_A.jpg") - imageB_filename = os.path.join(self.imageB_folder, f"{timestamp}_B.jpg") - image_A = self.cvbridge.imgmsg_to_cv2(imageA_msg, desired_encoding="bgr8") - cv2.imwrite(imageA_filename, image_A) - image_B = self.cvbridge.imgmsg_to_cv2(imageB_msg, desired_encoding="bgr8") - cv2.imwrite(imageB_filename, image_B) - - def readJoinStatesFromCsv(self, file_path): - joint_states = [] - with open(file_path, mode="r") as file: - csv_reader = csv.DictReader(file) - for row in csv_reader: - joint_states.append( - [ - math.radians(float(row["A1"])), - math.radians(float(row["A2"])), - math.radians(float(row["A3"])), - math.radians(float(row["A4"])), - math.radians(float(row["A5"])), - math.radians(float(row["A6"])), - math.radians(float(row["A7"])), - ] - ) - - return joint_states - - def initializeRobotPose(self): - # The robot needs to be initialized to the first position of the trajectory - # to ensure that there is no sudden movement when the trajectory is replayed - goal_msg = FollowJointTrajectory.Goal() - trajectory_msg = JointTrajectory() - trajectory_msg.joint_names = self.joint_names - - self.get_logger().info(f"Processing trajectory point init") - - point = JointTrajectoryPoint() - point.positions = self.joint_trajectories[0] - point.time_from_start.sec = 4 # Set the seconds part to 0 - - trajectory_msg.points.append(point) - goal_msg.trajectory = trajectory_msg - self._action_client.wait_for_server() - - send_goal_future = self._action_client.send_goal_async( - goal_msg, feedback_callback=self.feedback_callback - ) - - rclpy.spin_until_future_complete(self, send_goal_future) - - goal_handle = send_goal_future.result() - - if not goal_handle.accepted: - self.get_logger().info(f"Goal for init point was rejected") - return - - self.get_logger().info(f"Goal for init point was accepted") - get_result_future = goal_handle.get_result_async() - rclpy.spin_until_future_complete(self, get_result_future) - result = get_result_future.result().result - self.get_logger().info(f"Result : {result}, Initial position reached") - print("Press Enter to continue ...") - input() - - def executeTrajectory(self): - goal_msg = FollowJointTrajectory.Goal() - trajectory_msg = JointTrajectory() - trajectory_msg.joint_names = self.joint_names - for i, joint_values in enumerate(self.joint_trajectories): - point = JointTrajectoryPoint() - point.positions = joint_values - point.time_from_start = rclpy.duration.Duration( - seconds=(i + 1) * 0.1 - ).to_msg() - trajectory_msg.points.append(point) - goal_msg.trajectory = trajectory_msg - self._action_client.wait_for_server() - - send_goal_future = self._action_client.send_goal_async( - goal_msg, feedback_callback=self.feedback_callback - ) - - send_goal_future.add_done_callback(self.goal_response_callback) - - def goal_response_callback(self, future): - goal_handle = future.result() - if not goal_handle.accepted: - self.get_logger().info(f"Goal for trajectory was rejected") - return - self.get_logger().info(f"Goal for trajectory was accepted") - get_result_future = goal_handle.get_result_async() - get_result_future.add_done_callback(self.result_callback) - - def result_callback(self, future): - result = future.result() - if result.result.error_code == FollowJointTrajectory.Result.SUCCESSFUL: - self.get_logger().info("WOHOOO") - self.shutdown_node() - else: - self.get_logger().info("OHH NO") - - def feedback_callback(self, feedback_msg): - self.feedback = feedback_msg.feedback - - def shutdown_node(self): - self.get_logger().info("Shutting down node now") - self.save_data() - self.destroy_node() - rclpy.shutdown() - - def save_data(self): - df = pd.DataFrame(self.dataframe) - save_path = os.path.join(self.save_data_folder, "recorded_data.csv") - df.to_csv(save_path, index=False) - self.get_logger().info("Saved data!") - - -def main(args=None): - - data_folder = os.path.join( - MAIN_DIR, "data_collector", "data_collector", "processed_data" - ) - joint_states_file = "2013-01-17_00-42-46.csv" - rclpy.init(args=args) - executor = MultiThreadedExecutor() - node = KukaDataRecorder( - data_folder=data_folder, joint_states_file=joint_states_file - ) - executor.add_node(node) - try: - - executor.spin() - except Exception as e: - node.get_logger().error(f"An error occurred: {e}") - finally: - if rclpy.ok(): - node.destroy_node() - rclpy.shutdown() - - -if __name__ == "__main__": - main() diff --git a/data_collector/data_collector/parse_robot_log.py b/data_collector/data_collector/parse_robot_log.py deleted file mode 100644 index bdc17506..00000000 --- a/data_collector/data_collector/parse_robot_log.py +++ /dev/null @@ -1,128 +0,0 @@ -import pandas as pd -import json -import re -import os -import glob - -MAIN_DIR = os.path.dirname(os.path.realpath(__file__)) - - -def parse_robot_data(file_path): - """ - Parses the robot data file, renames joint columns to J1-J7, - extracts and renames time, force, and torque values. - - Parameters: - file_path (str): Path to the data file. - - Returns: - pd.DataFrame: A DataFrame containing time, renamed joint values, - and renamed force/torque data. - """ - # Step 1: Read the header line starting with '%' - with open(file_path, "r") as file: - for line in file: - if line.startswith("%"): - header = line.strip().lstrip("%").strip().split() - break - else: - raise ValueError("No header line starting with '%' found in the file.") - - # Step 2: Load the data into a DataFrame - df = pd.read_csv( - file_path, - delim_whitespace=True, # Assuming the data is space-separated - comment="%", # Skip any lines starting with '%' - names=header, # Use the extracted header - skiprows=1, # Skip the header line - ) - - # Step 3: Identify and rename joint columns - joint_pattern = re.compile(r"axisQMsr_LBR_iiwa_14_R820_1\[(\d+)\]") - joint_columns = [col for col in header if joint_pattern.match(col)] - - if len(joint_columns) != 7: - raise ValueError(f"Expected 7 joint columns, found {len(joint_columns)}.") - - # Sort joint columns based on their index and rename them to J1-J7 - joint_columns_sorted = sorted( - joint_columns, key=lambda x: int(joint_pattern.match(x).group(1)) - ) - joint_rename_map = { - col: f"A{idx+1}" for idx, col in enumerate(joint_columns_sorted) - } - df.rename(columns=joint_rename_map, inplace=True) - - # Step 4: Extract and combine time columns - if "ZeitInSec" not in df.columns or "ZeitInNanoSec" not in df.columns: - raise ValueError( - "Required time columns 'ZeitInSec' and/or 'ZeitInNanoSec' are missing." - ) - - # Combine 'ZeitInSec' and 'ZeitInNanoSec' into a single 'time' column in seconds - df["time"] = df["ZeitInSec"] + df["ZeitInNanoSec"] * 1e-9 - - # Drop the original time columns as they are now combined - df.drop(columns=["ZeitInSec", "ZeitInNanoSec"], inplace=True) - - # Step 5: Extract and rename force and torque columns - force_columns = {"cartForce1_X": "Fx", "cartForce1_Y": "Fy", "cartForce1_Z": "Fz"} - torque_columns = { - "cartTorque1_TauX": "Tx", - "cartTorque1_TauY": "Ty", - "cartTorque1_TauZ": "Tz", - } - - # Verify that the required force and torque columns exist - missing_force_columns = [ - col for col in force_columns.keys() if col not in df.columns - ] - missing_torque_columns = [ - col for col in torque_columns.keys() if col not in df.columns - ] - missing_columns = missing_force_columns + missing_torque_columns - if missing_columns: - raise ValueError( - f"The following required columns are missing in the data: {missing_columns}" - ) - - # Rename force and torque columns - df.rename(columns=force_columns, inplace=True) - df.rename(columns=torque_columns, inplace=True) - - # Step 6: Select the relevant columns - selected_columns = ( - ["time"] - + list(joint_rename_map.values()) - + list(force_columns.values()) - + list(torque_columns.values()) - ) - df_selected = df[selected_columns] - - return df_selected - - -def main(): - - # with open('config.json', 'r') as config_file: - # config = json.load(config_file) - - # log_folder_path = config['log_folder_path'] - # save_df_path = config['processed_csv_folder_path'] - log_folder_path = os.path.join(MAIN_DIR, "11_dec_logs") - save_df_path = os.path.join(MAIN_DIR, "processed_data") - os.makedirs(save_df_path, exist_ok=True) - - log_files = glob.glob(os.path.join(log_folder_path, "*.log")) - - for file_path in log_files: - print(file_path) - returned_df = parse_robot_data(file_path=file_path) - save_file_path = os.path.join( - save_df_path, os.path.splitext(os.path.basename(file_path))[0] + ".csv" - ) - returned_df.to_csv(save_file_path, index=False) - - -if __name__ == "__main__": - main() diff --git a/data_collector/package.xml b/data_collector/package.xml deleted file mode 100644 index 494d0fa0..00000000 --- a/data_collector/package.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - data_collector - 0.0.0 - TODO: Package description - nisara - Apache-2.0 - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - rclpy - sensor_msgs - trajectory_msgs - control_msgs - geometry_msgs - message_filters - numpy - pandas - cv2 - cv_bridge - - - - ament_python - - diff --git a/data_collector/resource/data_collector b/data_collector/resource/data_collector deleted file mode 100644 index e69de29b..00000000 diff --git a/data_collector/setup.cfg b/data_collector/setup.cfg deleted file mode 100644 index f0a7afc8..00000000 --- a/data_collector/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/data_collector -[install] -install_scripts=$base/lib/data_collector diff --git a/data_collector/setup.py b/data_collector/setup.py deleted file mode 100644 index 43107e8c..00000000 --- a/data_collector/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'data_collector' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools', 'pandas', 'numpy'], - zip_safe=True, - maintainer='nisara', - maintainer_email='sarawgi.nikita@gmail.com', - description='TODO: Package description', - license='Apache-2.0', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'kuka_data_recorder = data_collector.kuka_data_recorder:main', - 'joint_cartesian_converter = data_collector.joint_cartesian_converter:main' - ], - }, -) From bbaffae892d4bf99bcf6998e61655da060a2d90f Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Fri, 7 Feb 2025 12:45:12 -0800 Subject: [PATCH 07/24] Squashed 'data_collector/' content from commit 7d3b98f git-subtree-dir: data_collector git-subtree-split: 7d3b98f7341e655cf167ef05889314eb7a6e8188 --- data_collector/LICENSE | 202 ++++++++++++++ data_collector/data_collector/__init__.py | 0 .../data_collector/arduino-python.py | 41 +++ data_collector/data_collector/config.json | 17 ++ data_collector/data_collector/create_pkl.py | 231 ++++++++++++++++ .../joint_cartesian_converter.py | 127 +++++++++ .../data_collector/kuka_data_recorder.py | 249 ++++++++++++++++++ .../data_collector/parse_robot_log.py | 110 ++++++++ .../data_collector/transformations.py | 37 +++ data_collector/package.xml | 38 +++ data_collector/setup.cfg | 4 + data_collector/setup.py | 27 ++ 12 files changed, 1083 insertions(+) create mode 100644 data_collector/LICENSE create mode 100644 data_collector/data_collector/__init__.py create mode 100644 data_collector/data_collector/arduino-python.py create mode 100644 data_collector/data_collector/config.json create mode 100644 data_collector/data_collector/create_pkl.py create mode 100644 data_collector/data_collector/joint_cartesian_converter.py create mode 100644 data_collector/data_collector/kuka_data_recorder.py create mode 100644 data_collector/data_collector/parse_robot_log.py create mode 100644 data_collector/data_collector/transformations.py create mode 100644 data_collector/package.xml create mode 100644 data_collector/setup.cfg create mode 100644 data_collector/setup.py diff --git a/data_collector/LICENSE b/data_collector/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/data_collector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/data_collector/data_collector/__init__.py b/data_collector/data_collector/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/data_collector/data_collector/arduino-python.py b/data_collector/data_collector/arduino-python.py new file mode 100644 index 00000000..9947c7db --- /dev/null +++ b/data_collector/data_collector/arduino-python.py @@ -0,0 +1,41 @@ +###################### +import serial +import time +import subprocess +import signal +import sys + +# Use the correct serial port based on what you found, either '/dev/ttyACM0' or '/dev/ttyUSB0' +arduino = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=.1) + +def send_command(command): + arduino.write(command.encode()) # Send the command to the Arduino + time.sleep(0.05) # Small delay to ensure the command is sent properly + +def start_two_cameras_script(): + # Launch the two_cameras.py script + subprocess.Popen(["python", "/home/lm-2023/jeon_team_ws/control_kuka/lbr_fri_ws-dev-socket/data_collection/data_collection/two_cameras_live.py"]) + +def signal_handler(sig, frame): + print("Signal received, sending 'X' to microcontroller...") + send_command('X') # Send the 'X' command when the process is terminated + sys.exit(0) # Exit the program + +# Register the signal handler for termination signals +signal.signal(signal.SIGINT, signal_handler) # Handles Ctrl+C +signal.signal(signal.SIGTERM, signal_handler) # Handles termination signal + +# Example usage +while True: + command = input("Enter command (X: Press Both Buttons, Y: Release Both Buttons and Start two_cameras.py): ").strip().upper() + + if command == 'X': + send_command('X') # Press both buttonsy + + elif command == 'Y': + send_command('Y') # Release both buttons + #start_two_cameras_script() # Start the two_cameras.py script + + else: + print("Invalid command. Please enter X or Y.") + diff --git a/data_collector/data_collector/config.json b/data_collector/data_collector/config.json new file mode 100644 index 00000000..3c828d65 --- /dev/null +++ b/data_collector/data_collector/config.json @@ -0,0 +1,17 @@ +{ + "log_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay", + "processed_csv_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay/processed", + "replay_csv_name": "2013-01-01_01-10-52.csv", + + "ACTION_SCALE": [0.02, 0.1, 1], + "TARGET_POSE": [ + 0.5906439143742067, + 0.07771711953459341, + 0.337835826958042, + 3.1099675, + 0.0146619, + -0.0078615 + ], + "REWARD_THRESHOLD": [0.01, 0.01, 0.01, 0.2, 0.2, 0.2] + +} \ No newline at end of file diff --git a/data_collector/data_collector/create_pkl.py b/data_collector/data_collector/create_pkl.py new file mode 100644 index 00000000..d383215b --- /dev/null +++ b/data_collector/data_collector/create_pkl.py @@ -0,0 +1,231 @@ +''' +FORMAT OF THE CREATED PKL FILE: + The pkl file is a dictionary with the following keys: + dict( + observations=obs, + actions=actions, + next_observations=next_obs, + rewards=rew, + masks=1.0 - done, + dones=done, + ) + where + observations : { + "state": gym.spaces.Box(-np.inf, np.inf, shape=(7 + 6 + 3 + 3,)), + "wrist_1": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), + "wrist_2": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), + } + actions = gym.spaces.Box( + np.ones((7,), dtype=np.float32) * -1, + np.ones((7,), dtype=np.float32), + ) + The action space's last element is actually for the gripper, but we are not using it. + It should be x,y,z,roll,pitch,yaw + state : concatenation of tcp_poses (7), tcp_velocities (6), tcp_forces (3), tcp_torques (3) + image names are in the format {timestamp}_A.png and {timestamp}_B.png +''' + + +import numpy as np +import pandas as pd +import pickle +import os +import json +from PIL import Image +from scipy.spatial.transform import Rotation as R +from transformations import construct_adjoint_matrix, construct_homogeneous_matrix + + + + +class SingleTrajectoryConverter: + def __init__(self, recorded_data_file_path, imageA_folder_path, imageB_folder_path, action_scale, target_pose, reward_threshold): + + self.recorded_data_file_path = recorded_data_file_path + self.imageA_folder_path = imageA_folder_path + self.imageB_folder_path = imageB_folder_path + self.action_scale = action_scale + self.target_pose = target_pose + self.reward_threshold = reward_threshold + + self.recorded_data = pd.read_csv(recorded_data_file_path) + + self.adjoint_matrix = None + self.T_r_o_inv = None + + self.transitions = [] + + def transform_observation(self, obs): + """ + Transform observations from spatial(base) frame into body(end-effector) frame + using the adjoint matrix + """ + adjoint_inv = np.linalg.inv(self.adjoint_matrix) + obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']] = adjoint_inv @ obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values + + T_b_o = construct_homogeneous_matrix(obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values) + T_b_r = self.T_r_o_inv @ T_b_o + + # Reconstruct transformed tcp_pose vector + p_b_r = T_b_r[:3, 3] + theta_b_r = R.from_matrix(T_b_r[:3, :3]).as_quat() + obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']] = np.concatenate((p_b_r, theta_b_r)) + + return obs + + def calculate_next_action(self, curr_pos, next_pos): + action = np.zeros(7) + action[:3] = (next_pos[:3] - curr_pos[:3]) / self.action_scale[0] + action[3:6] = (R.from_quat(next_pos[3:7]) * R.from_quat(curr_pos[3:7]).inv()).as_euler("xyz") / self.action_scale[1] + action[6] = 0.0 + return action + + def convertToPickle(self): + + observations = [] + actions = [] + next_observations = [] + rewards = [] + masks = [] + dones = [] + + done = 0.0 + + first_observation = self.recorded_data.iloc[0] + # Construct the adjoint matrix and T_r_o_inv using the first observation + # T_r_o_inv remains constant but the adjoint matrix updates every iteration + curr_pos = first_observation[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + self.adjoint_matrix = construct_adjoint_matrix(curr_pos) + self.T_r_o_inv = np.linalg.inv(construct_homogeneous_matrix(curr_pos)) + next_transformed_obs = self.transform_observation(first_observation) + + + for i in range(1, len(self.recorded_data) - 1): + + current_row = self.recorded_data.iloc[i-1] + next_row = self.recorded_data.iloc[i] + + curr_time = current_row["timestamp"] + next_time = next_row["timestamp"] + + img_size = (128, 128) + + curr_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{curr_time}_A.jpg")).resize(img_size) + curr_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{curr_time}_B.jpg")).resize(img_size) + + next_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{next_time}_A.jpg")).resize(img_size) + next_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{next_time}_B.jpg")).resize(img_size) + + curr_obs_no_transform_pose = current_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + next_obs_no_transform_pose = next_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + action = self.calculate_next_action(curr_obs_no_transform_pose, next_obs_no_transform_pose) + + self.adjoint_matrix = construct_adjoint_matrix(curr_obs_no_transform_pose) + + curr_transformed_obs = next_transformed_obs + next_transformed_obs = self.transform_observation(next_row) + + curr_euler_angles = R.from_quat(curr_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') + next_euler_angles = R.from_quat(next_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') + + observation = { + "state": np.concatenate([ + curr_transformed_obs[['P1', 'P2', 'P3']].values, + curr_euler_angles, + curr_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, + curr_transformed_obs[['Fx', 'Fy', 'Fz']].values, + curr_transformed_obs[['Tx', 'Ty', 'Tz']].values + ]), + "wrist_1": np.array(curr_wrist_1_image), + "wrist_2": np.array(curr_wrist_2_image) + } + + next_observation = { + "state": np.concatenate([ + next_transformed_obs[['P1', 'P2', 'P3']].values, + next_euler_angles, + next_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, + next_transformed_obs[['Fx', 'Fy', 'Fz']].values, + next_transformed_obs[['Tx', 'Ty', 'Tz']].values + ]), + "wrist_1": np.array(next_wrist_1_image), + "wrist_2": np.array(next_wrist_2_image) + } + + if done != 1.0: + tcp_pose = current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values + tcp_pose[3:] = np.abs(tcp_pose[3:]) + if np.all(tcp_pose - self.target_pose <= self.reward_threshold): + reward = 1.0 + done = 1.0 + else: + reward = 0.0 + else: + reward = 0.0 + + + data_dict = { + "observations": observation, + "actions": action, + "next_observations": next_observation, + "rewards": reward, + "masks": 1.0 - done, + "dones": done + } + + self.transitions.append(data_dict) + + # observations.append(observation) + # actions.append(action) + # next_observations.append(next_observation) + # rewards.append(reward) + # masks.append(1.0 - done) + # dones.append(done) + + curr_wrist_1_image.close() + curr_wrist_2_image.close() + next_wrist_1_image.close() + next_wrist_2_image.close() + + return self.transitions + +def main(): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + processed_folder_path = config['processed_csv_folder_path'] + output_pkl_file_path = os.path.join(processed_folder_path, 'recorded_data.pkl') + + action_scale = np.array(config['ACTION_SCALE']) + target_pose = np.array(config['TARGET_POSE']) + reward_threshold = np.array(config['REWARD_THRESHOLD']) + + + all_transitions = [] + + for folder_name in os.listdir(processed_folder_path): + folder_path = os.path.join(processed_folder_path, folder_name) + if os.path.isdir(folder_path): + recorded_data_file_path = os.path.join(folder_path, 'recorded_data.csv') + imageA_folder_path = os.path.join(folder_path, 'imageA') + imageB_folder_path = os.path.join(folder_path, 'imageB') + if os.path.exists(recorded_data_file_path) and os.path.exists(imageA_folder_path) and os.path.exists(imageB_folder_path): + singleTrajectoryConverter = SingleTrajectoryConverter( + recorded_data_file_path, + imageA_folder_path, + imageB_folder_path, + action_scale, + target_pose, + reward_threshold) + transitions = singleTrajectoryConverter.convertToPickle() + all_transitions.extend(transitions) + + with open(output_pkl_file_path, 'wb') as f: + pickle.dump(all_transitions, f) + + print(f"Data saved to {output_pkl_file_path}") + + +if __name__ == "__main__": + main() + diff --git a/data_collector/data_collector/joint_cartesian_converter.py b/data_collector/data_collector/joint_cartesian_converter.py new file mode 100644 index 00000000..72d8c3d2 --- /dev/null +++ b/data_collector/data_collector/joint_cartesian_converter.py @@ -0,0 +1,127 @@ +import time + +import rclpy +import rclpy.duration +from rclpy.node import Node +from rclpy.callback_groups import MutuallyExclusiveCallbackGroup + +import numpy as np +import pandas as pd +import json +import os +from scipy.spatial.transform import Rotation as R + +from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped +import rclpy.task +from sensor_msgs.msg import JointState + +from moveit_msgs.msg import ( + RobotState, + MoveItErrorCodes, +) +from moveit_msgs.srv import GetPositionFK + + + +class JointCartesianConverter(Node): + + move_group_name_ = "arm" + namespace_ = "lbr" + + joint_state_topic_ = "joint_states" + lbr_state_topic_ = "state" + pose_topic_ = "pose" + plan_srv_name_ = "plan_kinematic_path" + ik_srv_name_ = "compute_ik" + fk_srv_name_ = "compute_fk" + execute_action_name_ = "execute_trajectory" + fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" + + base_ = "link_0" + end_effector_ = "link_ee" + + def __init__(self, joint_data_file_path): + super().__init__("joint_cartesian_converter") + + self.joint_data_file_path = joint_data_file_path + self.fk_client_callback = MutuallyExclusiveCallbackGroup() + self.fk_client_ = self.create_client(GetPositionFK, f"{self.namespace_}/{self.fk_srv_name_}", callback_group=self.fk_client_callback) + if not self.fk_client_.wait_for_service(timeout_sec=5.0): + self.get_logger().error("FK service not available.") + exit(1) + + self.joint_data = pd.read_csv(self.joint_data_file_path) + + def quat_2_euler(self, quat): + """calculates and returns: yaw, pitch, roll from given quaternion""" + return R.from_quat(quat).as_euler("xyz") + + def convert(self): + + # Convert each row in the dataframe to cartesian coordinates and save in the dataframe + for index, row in self.joint_data.iterrows(): + + joint_state = JointState() + joint_state.name = ["A1", "A2", "A4", "A3", "A5", "A6", "A7"] + joint_state.position = [row["P1"], row["P2"], row["P3"], row["P4"], row["P5"], row["P6"], row["P7"]] + joint_state.velocity = [row["V1"], row["V2"], row["V3"], row["V4"], row["V5"], row["V6"], row["V7"]] + joint_state.effort = [row["E1"], row["E2"], row["E3"], row["E4"], row["E5"], row["E6"], row["E7"]] + joint_state.header.stamp = self.get_clock().now().to_msg() + joint_state.header.frame_id = f"{self.namespace_}/{self.base_}" + current_robot_state = RobotState() + current_robot_state.joint_state = joint_state + + request = GetPositionFK.Request() + + request.header.frame_id = f"{self.namespace_}/{self.base_}" + request.header.stamp = self.get_clock().now().to_msg() + + request.fk_link_names.append(self.end_effector_) + request.robot_state = current_robot_state + + future = self.fk_client_.call_async(request) + + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error("Failed to get FK solution") + return None + + response = future.result() + if response.error_code.val != MoveItErrorCodes.SUCCESS: + self.get_logger().error( + f"Failed to get FK solution: {response.error_code.val}" + ) + return None + + pose = response.pose_stamped[0].pose + euler_pose = self.quat_2_euler(np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])) + self.joint_data.at[index, "X"] = pose.position.x + self.joint_data.at[index, "Y"] = pose.position.y + self.joint_data.at[index, "Z"] = pose.position.z + self.joint_data.at[index, "QX"] = pose.orientation.x + self.joint_data.at[index, "QY"] = pose.orientation.y + self.joint_data.at[index, "QZ"] = pose.orientation.z + self.joint_data.at[index, "QW"] = pose.orientation.w + self.joint_data.at[index, "Roll"] = euler_pose[0] + self.joint_data.at[index, "Pitch"] = euler_pose[1] + self.joint_data.at[index, "Yaw"] = euler_pose[2] + + # Compute the end_effector velocity + + self.joint_data.to_csv(self.joint_data_file_path, index=False) + +def main(args=None): + rclpy.init(args=args) + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + + processed_folder_path = config['processed_csv_folder_path'] + csv_name = config['replay_csv_name'] + csv_folder = os.path.join(processed_folder_path, os.path.splitext(csv_folder)[0]) + joint_data_file_path = os.path.join(csv_folder, 'recorded_data.csv') + + joint_cartesian_converter = JointCartesianConverter(joint_data_file_path) + joint_cartesian_converter.convert() + + rclpy.shutdown() \ No newline at end of file diff --git a/data_collector/data_collector/kuka_data_recorder.py b/data_collector/data_collector/kuka_data_recorder.py new file mode 100644 index 00000000..d8e560ca --- /dev/null +++ b/data_collector/data_collector/kuka_data_recorder.py @@ -0,0 +1,249 @@ +import rclpy +import os +import pandas as pd +import csv +import math +import json +from cv_bridge import CvBridge +import cv2 +from rclpy.node import Node +from rclpy.executors import MultiThreadedExecutor +from rclpy.action import ActionClient +from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint +from control_msgs.action import FollowJointTrajectory +from sensor_msgs.msg import Image, JointState +from geometry_msgs.msg import WrenchStamped +from message_filters import Subscriber, ApproximateTimeSynchronizer + +class KukaDataRecorder(Node): + def __init__(self, data_folder, joint_states_file): + super().__init__("kuka_data_recorder") + + self.data_folder = data_folder + self.joint_states_file = joint_states_file + self.save_data_folder = os.path.join(self.data_folder, os.path.splitext(self.joint_states_file)[0]) + os.makedirs(self.save_data_folder, exist_ok=True) + # Create two folders for the images + self.imageA_folder = os.path.join(self.save_data_folder, "imageA") + self.imageB_folder = os.path.join(self.save_data_folder, "imageB") + os.makedirs(self.imageA_folder, exist_ok=True) + os.makedirs(self.imageB_folder, exist_ok=True) + + # topic_callback_group = MutuallyExclusiveCallbackGroup() + # client_callback_group = MutuallyExclusiveCallbackGroup() + + # Initialize everything for trajectory replaying + self.feedback = None + self._action_client = ActionClient(self, FollowJointTrajectory, '/lbr/joint_trajectory_controller/follow_joint_trajectory') + self.joint_names = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] + self.joint_trajectories = self.readJoinStatesFromCsv(os.path.join(self.data_folder, "processed", self.joint_states_file)) + # This can be done through goal sending + self.initializeRobotPose() + + # Create a synchronized callback for all the information + self.imageA_topic = "/camera1/camera1/color/image_raw" + self.imageB_topic = "/camera2/camera2/color/image_raw" + self.joint_state_topic = "/lbr/joint_states" + self.wrench_topic = "/lbr/force_torque_broadcaster/wrench" + + self.imageA_sub = Subscriber(self, Image, self.imageA_topic) + self.imageB_sub = Subscriber(self, Image, self.imageB_topic) + self.joint_state_sub = Subscriber(self, JointState, self.joint_state_topic) + self.wrench_sub = Subscriber(self, WrenchStamped, self.wrench_topic) + + self.synchronizer = ApproximateTimeSynchronizer( + [self.imageA_sub, self.imageB_sub, self.joint_state_sub, self.wrench_sub], + queue_size=100, + slop=0.01, + ) + + self.cvbridge = CvBridge() + + self.synchronizer.registerCallback(self.recordSyncData) + self.get_logger().info("Data Synchronizer Node Initialized") + + # self.dataframe = pd.DataFrame(columns=[ + # 'timestamp', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', + # 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', + # 'E1', 'E2', 'E3', 'E4', 'E5', 'E6', 'E7', 'Fx', 'Fy', 'Fz', 'Tx', 'Ty', 'Tz' + # ]) + self.dataframe = [] + + # This needs to be done through moveit planner since the points need to be interpolated to a trajectory + self.executeTrajectory() + + + def recordSyncData(self, imageA_msg, imageB_msg, joint_state_msg, wrench_msg): + # The timestamp of imageA_msg will be used to name the columns + timestamp = f"{imageA_msg.header.stamp.sec}_{imageA_msg.header.stamp.nanosec}" + + self.get_logger().info(f"Recording data at timestamp {timestamp}") + + # Save the joint state + data_row = {'timestamp': timestamp} + + for i in range(7): + # imp imp: Remember that kuka publishes A1, A2, A4, A3, A5, A6, A7 (NOTE THE ORDER) + j = i + if i == 3: + j = 4 + if i == 4: + j = 3 + data_row[f'P{j+1}'] = joint_state_msg.position[i] + data_row[f'V{j+1}'] = joint_state_msg.velocity[i] + data_row[f'E{j+1}'] = joint_state_msg.effort[i] + + # Save the wrench + data_row['Fx'] = wrench_msg.wrench.force.x + data_row['Fy'] = wrench_msg.wrench.force.y + data_row['Fz'] = wrench_msg.wrench.force.z + data_row['Tx'] = wrench_msg.wrench.torque.x + data_row['Ty'] = wrench_msg.wrench.torque.y + data_row['Tz'] = wrench_msg.wrench.torque.z + + self.dataframe.append(data_row) + + # Save the images + imageA_filename = os.path.join(self.imageA_folder, f"{timestamp}_A.jpg") + imageB_filename = os.path.join(self.imageB_folder, f"{timestamp}_B.jpg") + image_A = self.cvbridge.imgmsg_to_cv2(imageA_msg, desired_encoding='bgr8') + cv2.imwrite(imageA_filename, image_A) + image_B = self.cvbridge.imgmsg_to_cv2(imageB_msg, desired_encoding='bgr8') + cv2.imwrite(imageB_filename, image_B) + + + def readJoinStatesFromCsv(self, file_path): + joint_states = [] + with open(file_path, mode='r') as file: + csv_reader = csv.DictReader(file) + for row in csv_reader: + joint_states.append([math.radians(float(row['A1'])), math.radians(float(row['A2'])), math.radians(float(row['A3'])), + math.radians(float(row['A4'])), math.radians(float(row['A5'])), math.radians(float(row['A6'])), math.radians(float(row['A7']))]) + print(joint_states[0]) + return joint_states + + def initializeRobotPose(self): + # The robot needs to be initialized to the first position of the trajectory + # to ensure that there is no sudden movement when the trajectory is replayed + goal_msg = FollowJointTrajectory.Goal() + trajectory_msg = JointTrajectory() + trajectory_msg.joint_names = self.joint_names + + self.get_logger().info(f"Processing trajectory point init") + + point = JointTrajectoryPoint() + point.positions = self.joint_trajectories[0] + point.time_from_start.sec = 4 # Set the seconds part to 0 + + trajectory_msg.points.append(point) + goal_msg.trajectory = trajectory_msg + self._action_client.wait_for_server() + + send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) + + rclpy.spin_until_future_complete(self, send_goal_future) + + goal_handle = send_goal_future.result() + + if not goal_handle.accepted: + self.get_logger().info(f"Goal for init point was rejected") + return + + self.get_logger().info(f"Goal for init point was accepted") + get_result_future = goal_handle.get_result_async() + rclpy.spin_until_future_complete(self, get_result_future) + result = get_result_future.result().result + self.get_logger().info(f"Result : {result}, Initial position reached") + print("Press Enter to continue ...") + input() + + def executeTrajectory(self): + goal_msg = FollowJointTrajectory.Goal() + trajectory_msg = JointTrajectory() + trajectory_msg.joint_names = self.joint_names + for i, joint_values in enumerate(self.joint_trajectories): + point = JointTrajectoryPoint() + point.positions = joint_values + point.time_from_start = rclpy.duration.Duration(seconds=(i+1) * 0.1).to_msg() + trajectory_msg.points.append(point) + goal_msg.trajectory = trajectory_msg + self._action_client.wait_for_server() + + send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) + + # rclpy.spin_until_future_complete(self, send_goal_future) + + # goal_handle = send_goal_future.result() + + # if not goal_handle.accepted: + # self.get_logger().info(f"Goal for trajectory was rejected") + # return + + # self.get_logger().info(f"Goal for trajectory was accepted") + # get_result_future = goal_handle.get_result_async() + # rclpy.spin_until_future_complete(self, get_result_future) + # result = get_result_future.result().result + # self.get_logger().info(f"Result : {result}, Trajectory executed?") + + send_goal_future.add_done_callback(self.goal_response_callback) + + def goal_response_callback(self, future): + goal_handle = future.result() + if not goal_handle.accepted: + self.get_logger().info(f"Goal for trajectory was rejected") + return + self.get_logger().info(f"Goal for trajectory was accepted") + get_result_future = goal_handle.get_result_async() + get_result_future.add_done_callback(self.result_callback) + + def result_callback(self, future): + result = future.result() + if result.result.error_code == FollowJointTrajectory.Result.SUCCESSFUL: + self.get_logger().info("WOHOOO") + self.shutdown_node() + else: + self.get_logger().info("OHH NO") + + def feedback_callback(self, feedback_msg): + self.feedback = feedback_msg.feedback + + def shutdown_node(self): + self.get_logger().info("Shutting down node now") + self.save_data() + self.destroy_node() + rclpy.shutdown() + + def save_data(self): + df = pd.DataFrame(self.dataframe) + save_path = os.path.join(self.save_data_folder, 'recorded_data.csv') + df.to_csv(save_path, index=False) + self.get_logger().info("Saved data!") + +def main(args=None): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + + data_folder = config['log_folder_path'] + joint_states_file = config['replay_csv_name'] + rclpy.init(args=args) + executor = MultiThreadedExecutor() + node = KukaDataRecorder(data_folder=data_folder, joint_states_file=joint_states_file) + executor.add_node(node) + try: + + executor.spin() + except Exception as e: + node.get_logger().error(f"An error occurred: {e}") + finally: + # if hasattr(node, 'dataframe'): + # df = pd.DataFrame(node.dataframe) + # save_path = os.path.join(node.data_folder, 'recorded_data.csv') + # df.to_csv(save_path, index=False) + # node.get_logger().info(f"Recorded data saved!") + if rclpy.ok(): + node.destroy_node() + rclpy.shutdown() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data_collector/data_collector/parse_robot_log.py b/data_collector/data_collector/parse_robot_log.py new file mode 100644 index 00000000..2b11adcd --- /dev/null +++ b/data_collector/data_collector/parse_robot_log.py @@ -0,0 +1,110 @@ +import pandas as pd +import json +import re +import os +import glob + +def parse_robot_data(file_path): + """ + Parses the robot data file, renames joint columns to J1-J7, + extracts and renames time, force, and torque values. + + Parameters: + file_path (str): Path to the data file. + + Returns: + pd.DataFrame: A DataFrame containing time, renamed joint values, + and renamed force/torque data. + """ + # Step 1: Read the header line starting with '%' + with open(file_path, 'r') as file: + for line in file: + if line.startswith('%'): + header = line.strip().lstrip('%').strip().split() + break + else: + raise ValueError("No header line starting with '%' found in the file.") + + # Step 2: Load the data into a DataFrame + df = pd.read_csv( + file_path, + delim_whitespace=True, # Assuming the data is space-separated + comment='%', # Skip any lines starting with '%' + names=header, # Use the extracted header + skiprows=1 # Skip the header line + ) + + # Step 3: Identify and rename joint columns + joint_pattern = re.compile(r'axisQMsr_LBR_iiwa_14_R820_1\[(\d+)\]') + joint_columns = [col for col in header if joint_pattern.match(col)] + + if len(joint_columns) != 7: + raise ValueError(f"Expected 7 joint columns, found {len(joint_columns)}.") + + # Sort joint columns based on their index and rename them to J1-J7 + joint_columns_sorted = sorted( + joint_columns, + key=lambda x: int(joint_pattern.match(x).group(1)) + ) + joint_rename_map = {col: f'A{idx+1}' for idx, col in enumerate(joint_columns_sorted)} + df.rename(columns=joint_rename_map, inplace=True) + + # Step 4: Extract and combine time columns + if 'ZeitInSec' not in df.columns or 'ZeitInNanoSec' not in df.columns: + raise ValueError("Required time columns 'ZeitInSec' and/or 'ZeitInNanoSec' are missing.") + + # Combine 'ZeitInSec' and 'ZeitInNanoSec' into a single 'time' column in seconds + df['time'] = df['ZeitInSec'] + df['ZeitInNanoSec'] * 1e-9 + + # Drop the original time columns as they are now combined + df.drop(columns=['ZeitInSec', 'ZeitInNanoSec'], inplace=True) + + # Step 5: Extract and rename force and torque columns + force_columns = { + 'cartForce1_X': 'Fx', + 'cartForce1_Y': 'Fy', + 'cartForce1_Z': 'Fz' + } + torque_columns = { + 'cartTorque1_TauX': 'Tx', + 'cartTorque1_TauY': 'Ty', + 'cartTorque1_TauZ': 'Tz' + } + + # Verify that the required force and torque columns exist + missing_force_columns = [col for col in force_columns.keys() if col not in df.columns] + missing_torque_columns = [col for col in torque_columns.keys() if col not in df.columns] + missing_columns = missing_force_columns + missing_torque_columns + if missing_columns: + raise ValueError(f"The following required columns are missing in the data: {missing_columns}") + + # Rename force and torque columns + df.rename(columns=force_columns, inplace=True) + df.rename(columns=torque_columns, inplace=True) + + # Step 6: Select the relevant columns + selected_columns = ['time'] + list(joint_rename_map.values()) + list(force_columns.values()) + list(torque_columns.values()) + df_selected = df[selected_columns] + + return df_selected + + +def main(): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + + log_folder_path = config['log_folder_path'] + save_df_path = config['processed_csv_folder_path'] + + os.makedirs(save_df_path, exist_ok=True) + + log_files = glob.glob(os.path.join(log_folder_path, "*.log")) + + for file_path in log_files: + returned_df = parse_robot_data(file_path=file_path) + save_file_path = os.path.join(save_df_path, os.path.splitext(os.path.basename(file_path))[0] + '.csv') + returned_df.to_csv(save_file_path, index=False) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data_collector/data_collector/transformations.py b/data_collector/data_collector/transformations.py new file mode 100644 index 00000000..52a55527 --- /dev/null +++ b/data_collector/data_collector/transformations.py @@ -0,0 +1,37 @@ +from scipy.spatial.transform import Rotation as R +import numpy as np + + +def construct_adjoint_matrix(tcp_pose): + """ + Construct the adjoint matrix for a spatial velocity vector + :args: tcp_pose: (x, y, z, qx, qy, qz, qw) + """ + rotation = R.from_quat(tcp_pose[3:]).as_matrix() + translation = np.array(tcp_pose[:3]) + skew_matrix = np.array( + [ + [0, -translation[2], translation[1]], + [translation[2], 0, -translation[0]], + [-translation[1], translation[0], 0], + ] + ) + adjoint_matrix = np.zeros((6, 6)) + adjoint_matrix[:3, :3] = rotation + adjoint_matrix[3:, 3:] = rotation + adjoint_matrix[3:, :3] = skew_matrix @ rotation + return adjoint_matrix + + +def construct_homogeneous_matrix(tcp_pose): + """ + Construct the homogeneous transformation matrix from given pose. + args: tcp_pose: (x, y, z, qx, qy, qz, qw) + """ + rotation = R.from_quat(tcp_pose[3:]).as_matrix() + translation = np.array(tcp_pose[:3]) + T = np.zeros((4, 4)) + T[:3, :3] = rotation + T[:3, 3] = translation + T[3, 3] = 1 + return T diff --git a/data_collector/package.xml b/data_collector/package.xml new file mode 100644 index 00000000..494d0fa0 --- /dev/null +++ b/data_collector/package.xml @@ -0,0 +1,38 @@ + + + + data_collector + 0.0.0 + TODO: Package description + nisara + Apache-2.0 + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + sensor_msgs + trajectory_msgs + control_msgs + geometry_msgs + message_filters + numpy + pandas + cv2 + cv_bridge + + + + ament_python + + diff --git a/data_collector/setup.cfg b/data_collector/setup.cfg new file mode 100644 index 00000000..f0a7afc8 --- /dev/null +++ b/data_collector/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/data_collector +[install] +install_scripts=$base/lib/data_collector diff --git a/data_collector/setup.py b/data_collector/setup.py new file mode 100644 index 00000000..43107e8c --- /dev/null +++ b/data_collector/setup.py @@ -0,0 +1,27 @@ +from setuptools import find_packages, setup + +package_name = 'data_collector' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools', 'pandas', 'numpy'], + zip_safe=True, + maintainer='nisara', + maintainer_email='sarawgi.nikita@gmail.com', + description='TODO: Package description', + license='Apache-2.0', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'kuka_data_recorder = data_collector.kuka_data_recorder:main', + 'joint_cartesian_converter = data_collector.joint_cartesian_converter:main' + ], + }, +) From 4744ba16d861632ebfd0e722854a0ce777e28b00 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Fri, 7 Feb 2025 12:55:01 -0800 Subject: [PATCH 08/24] Undoing the incorrect nesting of data_collector --- data_collector/data_collector/LICENSE | 202 -------------- .../data_collector/data_collector/__init__.py | 0 .../data_collector/arduino-python.py | 41 --- .../data_collector/data_collector/config.json | 17 -- .../data_collector/create_pkl.py | 231 ---------------- .../joint_cartesian_converter.py | 127 --------- .../data_collector/kuka_data_recorder.py | 249 ------------------ .../data_collector/parse_robot_log.py | 110 -------- .../data_collector/transformations.py | 37 --- data_collector/data_collector/package.xml | 38 --- data_collector/data_collector/setup.cfg | 4 - data_collector/data_collector/setup.py | 27 -- 12 files changed, 1083 deletions(-) delete mode 100644 data_collector/data_collector/LICENSE delete mode 100644 data_collector/data_collector/data_collector/__init__.py delete mode 100644 data_collector/data_collector/data_collector/arduino-python.py delete mode 100644 data_collector/data_collector/data_collector/config.json delete mode 100644 data_collector/data_collector/data_collector/create_pkl.py delete mode 100644 data_collector/data_collector/data_collector/joint_cartesian_converter.py delete mode 100644 data_collector/data_collector/data_collector/kuka_data_recorder.py delete mode 100644 data_collector/data_collector/data_collector/parse_robot_log.py delete mode 100644 data_collector/data_collector/data_collector/transformations.py delete mode 100644 data_collector/data_collector/package.xml delete mode 100644 data_collector/data_collector/setup.cfg delete mode 100644 data_collector/data_collector/setup.py diff --git a/data_collector/data_collector/LICENSE b/data_collector/data_collector/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/data_collector/data_collector/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/data_collector/data_collector/data_collector/__init__.py b/data_collector/data_collector/data_collector/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/data_collector/data_collector/data_collector/arduino-python.py b/data_collector/data_collector/data_collector/arduino-python.py deleted file mode 100644 index 9947c7db..00000000 --- a/data_collector/data_collector/data_collector/arduino-python.py +++ /dev/null @@ -1,41 +0,0 @@ -###################### -import serial -import time -import subprocess -import signal -import sys - -# Use the correct serial port based on what you found, either '/dev/ttyACM0' or '/dev/ttyUSB0' -arduino = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=.1) - -def send_command(command): - arduino.write(command.encode()) # Send the command to the Arduino - time.sleep(0.05) # Small delay to ensure the command is sent properly - -def start_two_cameras_script(): - # Launch the two_cameras.py script - subprocess.Popen(["python", "/home/lm-2023/jeon_team_ws/control_kuka/lbr_fri_ws-dev-socket/data_collection/data_collection/two_cameras_live.py"]) - -def signal_handler(sig, frame): - print("Signal received, sending 'X' to microcontroller...") - send_command('X') # Send the 'X' command when the process is terminated - sys.exit(0) # Exit the program - -# Register the signal handler for termination signals -signal.signal(signal.SIGINT, signal_handler) # Handles Ctrl+C -signal.signal(signal.SIGTERM, signal_handler) # Handles termination signal - -# Example usage -while True: - command = input("Enter command (X: Press Both Buttons, Y: Release Both Buttons and Start two_cameras.py): ").strip().upper() - - if command == 'X': - send_command('X') # Press both buttonsy - - elif command == 'Y': - send_command('Y') # Release both buttons - #start_two_cameras_script() # Start the two_cameras.py script - - else: - print("Invalid command. Please enter X or Y.") - diff --git a/data_collector/data_collector/data_collector/config.json b/data_collector/data_collector/data_collector/config.json deleted file mode 100644 index 3c828d65..00000000 --- a/data_collector/data_collector/data_collector/config.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "log_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay", - "processed_csv_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay/processed", - "replay_csv_name": "2013-01-01_01-10-52.csv", - - "ACTION_SCALE": [0.02, 0.1, 1], - "TARGET_POSE": [ - 0.5906439143742067, - 0.07771711953459341, - 0.337835826958042, - 3.1099675, - 0.0146619, - -0.0078615 - ], - "REWARD_THRESHOLD": [0.01, 0.01, 0.01, 0.2, 0.2, 0.2] - -} \ No newline at end of file diff --git a/data_collector/data_collector/data_collector/create_pkl.py b/data_collector/data_collector/data_collector/create_pkl.py deleted file mode 100644 index d383215b..00000000 --- a/data_collector/data_collector/data_collector/create_pkl.py +++ /dev/null @@ -1,231 +0,0 @@ -''' -FORMAT OF THE CREATED PKL FILE: - The pkl file is a dictionary with the following keys: - dict( - observations=obs, - actions=actions, - next_observations=next_obs, - rewards=rew, - masks=1.0 - done, - dones=done, - ) - where - observations : { - "state": gym.spaces.Box(-np.inf, np.inf, shape=(7 + 6 + 3 + 3,)), - "wrist_1": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), - "wrist_2": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), - } - actions = gym.spaces.Box( - np.ones((7,), dtype=np.float32) * -1, - np.ones((7,), dtype=np.float32), - ) - The action space's last element is actually for the gripper, but we are not using it. - It should be x,y,z,roll,pitch,yaw - state : concatenation of tcp_poses (7), tcp_velocities (6), tcp_forces (3), tcp_torques (3) - image names are in the format {timestamp}_A.png and {timestamp}_B.png -''' - - -import numpy as np -import pandas as pd -import pickle -import os -import json -from PIL import Image -from scipy.spatial.transform import Rotation as R -from transformations import construct_adjoint_matrix, construct_homogeneous_matrix - - - - -class SingleTrajectoryConverter: - def __init__(self, recorded_data_file_path, imageA_folder_path, imageB_folder_path, action_scale, target_pose, reward_threshold): - - self.recorded_data_file_path = recorded_data_file_path - self.imageA_folder_path = imageA_folder_path - self.imageB_folder_path = imageB_folder_path - self.action_scale = action_scale - self.target_pose = target_pose - self.reward_threshold = reward_threshold - - self.recorded_data = pd.read_csv(recorded_data_file_path) - - self.adjoint_matrix = None - self.T_r_o_inv = None - - self.transitions = [] - - def transform_observation(self, obs): - """ - Transform observations from spatial(base) frame into body(end-effector) frame - using the adjoint matrix - """ - adjoint_inv = np.linalg.inv(self.adjoint_matrix) - obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']] = adjoint_inv @ obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values - - T_b_o = construct_homogeneous_matrix(obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values) - T_b_r = self.T_r_o_inv @ T_b_o - - # Reconstruct transformed tcp_pose vector - p_b_r = T_b_r[:3, 3] - theta_b_r = R.from_matrix(T_b_r[:3, :3]).as_quat() - obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']] = np.concatenate((p_b_r, theta_b_r)) - - return obs - - def calculate_next_action(self, curr_pos, next_pos): - action = np.zeros(7) - action[:3] = (next_pos[:3] - curr_pos[:3]) / self.action_scale[0] - action[3:6] = (R.from_quat(next_pos[3:7]) * R.from_quat(curr_pos[3:7]).inv()).as_euler("xyz") / self.action_scale[1] - action[6] = 0.0 - return action - - def convertToPickle(self): - - observations = [] - actions = [] - next_observations = [] - rewards = [] - masks = [] - dones = [] - - done = 0.0 - - first_observation = self.recorded_data.iloc[0] - # Construct the adjoint matrix and T_r_o_inv using the first observation - # T_r_o_inv remains constant but the adjoint matrix updates every iteration - curr_pos = first_observation[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - self.adjoint_matrix = construct_adjoint_matrix(curr_pos) - self.T_r_o_inv = np.linalg.inv(construct_homogeneous_matrix(curr_pos)) - next_transformed_obs = self.transform_observation(first_observation) - - - for i in range(1, len(self.recorded_data) - 1): - - current_row = self.recorded_data.iloc[i-1] - next_row = self.recorded_data.iloc[i] - - curr_time = current_row["timestamp"] - next_time = next_row["timestamp"] - - img_size = (128, 128) - - curr_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{curr_time}_A.jpg")).resize(img_size) - curr_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{curr_time}_B.jpg")).resize(img_size) - - next_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{next_time}_A.jpg")).resize(img_size) - next_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{next_time}_B.jpg")).resize(img_size) - - curr_obs_no_transform_pose = current_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - next_obs_no_transform_pose = next_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - action = self.calculate_next_action(curr_obs_no_transform_pose, next_obs_no_transform_pose) - - self.adjoint_matrix = construct_adjoint_matrix(curr_obs_no_transform_pose) - - curr_transformed_obs = next_transformed_obs - next_transformed_obs = self.transform_observation(next_row) - - curr_euler_angles = R.from_quat(curr_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') - next_euler_angles = R.from_quat(next_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') - - observation = { - "state": np.concatenate([ - curr_transformed_obs[['P1', 'P2', 'P3']].values, - curr_euler_angles, - curr_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, - curr_transformed_obs[['Fx', 'Fy', 'Fz']].values, - curr_transformed_obs[['Tx', 'Ty', 'Tz']].values - ]), - "wrist_1": np.array(curr_wrist_1_image), - "wrist_2": np.array(curr_wrist_2_image) - } - - next_observation = { - "state": np.concatenate([ - next_transformed_obs[['P1', 'P2', 'P3']].values, - next_euler_angles, - next_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, - next_transformed_obs[['Fx', 'Fy', 'Fz']].values, - next_transformed_obs[['Tx', 'Ty', 'Tz']].values - ]), - "wrist_1": np.array(next_wrist_1_image), - "wrist_2": np.array(next_wrist_2_image) - } - - if done != 1.0: - tcp_pose = current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values - tcp_pose[3:] = np.abs(tcp_pose[3:]) - if np.all(tcp_pose - self.target_pose <= self.reward_threshold): - reward = 1.0 - done = 1.0 - else: - reward = 0.0 - else: - reward = 0.0 - - - data_dict = { - "observations": observation, - "actions": action, - "next_observations": next_observation, - "rewards": reward, - "masks": 1.0 - done, - "dones": done - } - - self.transitions.append(data_dict) - - # observations.append(observation) - # actions.append(action) - # next_observations.append(next_observation) - # rewards.append(reward) - # masks.append(1.0 - done) - # dones.append(done) - - curr_wrist_1_image.close() - curr_wrist_2_image.close() - next_wrist_1_image.close() - next_wrist_2_image.close() - - return self.transitions - -def main(): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - processed_folder_path = config['processed_csv_folder_path'] - output_pkl_file_path = os.path.join(processed_folder_path, 'recorded_data.pkl') - - action_scale = np.array(config['ACTION_SCALE']) - target_pose = np.array(config['TARGET_POSE']) - reward_threshold = np.array(config['REWARD_THRESHOLD']) - - - all_transitions = [] - - for folder_name in os.listdir(processed_folder_path): - folder_path = os.path.join(processed_folder_path, folder_name) - if os.path.isdir(folder_path): - recorded_data_file_path = os.path.join(folder_path, 'recorded_data.csv') - imageA_folder_path = os.path.join(folder_path, 'imageA') - imageB_folder_path = os.path.join(folder_path, 'imageB') - if os.path.exists(recorded_data_file_path) and os.path.exists(imageA_folder_path) and os.path.exists(imageB_folder_path): - singleTrajectoryConverter = SingleTrajectoryConverter( - recorded_data_file_path, - imageA_folder_path, - imageB_folder_path, - action_scale, - target_pose, - reward_threshold) - transitions = singleTrajectoryConverter.convertToPickle() - all_transitions.extend(transitions) - - with open(output_pkl_file_path, 'wb') as f: - pickle.dump(all_transitions, f) - - print(f"Data saved to {output_pkl_file_path}") - - -if __name__ == "__main__": - main() - diff --git a/data_collector/data_collector/data_collector/joint_cartesian_converter.py b/data_collector/data_collector/data_collector/joint_cartesian_converter.py deleted file mode 100644 index 72d8c3d2..00000000 --- a/data_collector/data_collector/data_collector/joint_cartesian_converter.py +++ /dev/null @@ -1,127 +0,0 @@ -import time - -import rclpy -import rclpy.duration -from rclpy.node import Node -from rclpy.callback_groups import MutuallyExclusiveCallbackGroup - -import numpy as np -import pandas as pd -import json -import os -from scipy.spatial.transform import Rotation as R - -from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped -import rclpy.task -from sensor_msgs.msg import JointState - -from moveit_msgs.msg import ( - RobotState, - MoveItErrorCodes, -) -from moveit_msgs.srv import GetPositionFK - - - -class JointCartesianConverter(Node): - - move_group_name_ = "arm" - namespace_ = "lbr" - - joint_state_topic_ = "joint_states" - lbr_state_topic_ = "state" - pose_topic_ = "pose" - plan_srv_name_ = "plan_kinematic_path" - ik_srv_name_ = "compute_ik" - fk_srv_name_ = "compute_fk" - execute_action_name_ = "execute_trajectory" - fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" - - base_ = "link_0" - end_effector_ = "link_ee" - - def __init__(self, joint_data_file_path): - super().__init__("joint_cartesian_converter") - - self.joint_data_file_path = joint_data_file_path - self.fk_client_callback = MutuallyExclusiveCallbackGroup() - self.fk_client_ = self.create_client(GetPositionFK, f"{self.namespace_}/{self.fk_srv_name_}", callback_group=self.fk_client_callback) - if not self.fk_client_.wait_for_service(timeout_sec=5.0): - self.get_logger().error("FK service not available.") - exit(1) - - self.joint_data = pd.read_csv(self.joint_data_file_path) - - def quat_2_euler(self, quat): - """calculates and returns: yaw, pitch, roll from given quaternion""" - return R.from_quat(quat).as_euler("xyz") - - def convert(self): - - # Convert each row in the dataframe to cartesian coordinates and save in the dataframe - for index, row in self.joint_data.iterrows(): - - joint_state = JointState() - joint_state.name = ["A1", "A2", "A4", "A3", "A5", "A6", "A7"] - joint_state.position = [row["P1"], row["P2"], row["P3"], row["P4"], row["P5"], row["P6"], row["P7"]] - joint_state.velocity = [row["V1"], row["V2"], row["V3"], row["V4"], row["V5"], row["V6"], row["V7"]] - joint_state.effort = [row["E1"], row["E2"], row["E3"], row["E4"], row["E5"], row["E6"], row["E7"]] - joint_state.header.stamp = self.get_clock().now().to_msg() - joint_state.header.frame_id = f"{self.namespace_}/{self.base_}" - current_robot_state = RobotState() - current_robot_state.joint_state = joint_state - - request = GetPositionFK.Request() - - request.header.frame_id = f"{self.namespace_}/{self.base_}" - request.header.stamp = self.get_clock().now().to_msg() - - request.fk_link_names.append(self.end_effector_) - request.robot_state = current_robot_state - - future = self.fk_client_.call_async(request) - - rclpy.spin_until_future_complete(self, future) - if future.result() is None: - self.get_logger().error("Failed to get FK solution") - return None - - response = future.result() - if response.error_code.val != MoveItErrorCodes.SUCCESS: - self.get_logger().error( - f"Failed to get FK solution: {response.error_code.val}" - ) - return None - - pose = response.pose_stamped[0].pose - euler_pose = self.quat_2_euler(np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])) - self.joint_data.at[index, "X"] = pose.position.x - self.joint_data.at[index, "Y"] = pose.position.y - self.joint_data.at[index, "Z"] = pose.position.z - self.joint_data.at[index, "QX"] = pose.orientation.x - self.joint_data.at[index, "QY"] = pose.orientation.y - self.joint_data.at[index, "QZ"] = pose.orientation.z - self.joint_data.at[index, "QW"] = pose.orientation.w - self.joint_data.at[index, "Roll"] = euler_pose[0] - self.joint_data.at[index, "Pitch"] = euler_pose[1] - self.joint_data.at[index, "Yaw"] = euler_pose[2] - - # Compute the end_effector velocity - - self.joint_data.to_csv(self.joint_data_file_path, index=False) - -def main(args=None): - rclpy.init(args=args) - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - - processed_folder_path = config['processed_csv_folder_path'] - csv_name = config['replay_csv_name'] - csv_folder = os.path.join(processed_folder_path, os.path.splitext(csv_folder)[0]) - joint_data_file_path = os.path.join(csv_folder, 'recorded_data.csv') - - joint_cartesian_converter = JointCartesianConverter(joint_data_file_path) - joint_cartesian_converter.convert() - - rclpy.shutdown() \ No newline at end of file diff --git a/data_collector/data_collector/data_collector/kuka_data_recorder.py b/data_collector/data_collector/data_collector/kuka_data_recorder.py deleted file mode 100644 index d8e560ca..00000000 --- a/data_collector/data_collector/data_collector/kuka_data_recorder.py +++ /dev/null @@ -1,249 +0,0 @@ -import rclpy -import os -import pandas as pd -import csv -import math -import json -from cv_bridge import CvBridge -import cv2 -from rclpy.node import Node -from rclpy.executors import MultiThreadedExecutor -from rclpy.action import ActionClient -from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint -from control_msgs.action import FollowJointTrajectory -from sensor_msgs.msg import Image, JointState -from geometry_msgs.msg import WrenchStamped -from message_filters import Subscriber, ApproximateTimeSynchronizer - -class KukaDataRecorder(Node): - def __init__(self, data_folder, joint_states_file): - super().__init__("kuka_data_recorder") - - self.data_folder = data_folder - self.joint_states_file = joint_states_file - self.save_data_folder = os.path.join(self.data_folder, os.path.splitext(self.joint_states_file)[0]) - os.makedirs(self.save_data_folder, exist_ok=True) - # Create two folders for the images - self.imageA_folder = os.path.join(self.save_data_folder, "imageA") - self.imageB_folder = os.path.join(self.save_data_folder, "imageB") - os.makedirs(self.imageA_folder, exist_ok=True) - os.makedirs(self.imageB_folder, exist_ok=True) - - # topic_callback_group = MutuallyExclusiveCallbackGroup() - # client_callback_group = MutuallyExclusiveCallbackGroup() - - # Initialize everything for trajectory replaying - self.feedback = None - self._action_client = ActionClient(self, FollowJointTrajectory, '/lbr/joint_trajectory_controller/follow_joint_trajectory') - self.joint_names = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] - self.joint_trajectories = self.readJoinStatesFromCsv(os.path.join(self.data_folder, "processed", self.joint_states_file)) - # This can be done through goal sending - self.initializeRobotPose() - - # Create a synchronized callback for all the information - self.imageA_topic = "/camera1/camera1/color/image_raw" - self.imageB_topic = "/camera2/camera2/color/image_raw" - self.joint_state_topic = "/lbr/joint_states" - self.wrench_topic = "/lbr/force_torque_broadcaster/wrench" - - self.imageA_sub = Subscriber(self, Image, self.imageA_topic) - self.imageB_sub = Subscriber(self, Image, self.imageB_topic) - self.joint_state_sub = Subscriber(self, JointState, self.joint_state_topic) - self.wrench_sub = Subscriber(self, WrenchStamped, self.wrench_topic) - - self.synchronizer = ApproximateTimeSynchronizer( - [self.imageA_sub, self.imageB_sub, self.joint_state_sub, self.wrench_sub], - queue_size=100, - slop=0.01, - ) - - self.cvbridge = CvBridge() - - self.synchronizer.registerCallback(self.recordSyncData) - self.get_logger().info("Data Synchronizer Node Initialized") - - # self.dataframe = pd.DataFrame(columns=[ - # 'timestamp', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', - # 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', - # 'E1', 'E2', 'E3', 'E4', 'E5', 'E6', 'E7', 'Fx', 'Fy', 'Fz', 'Tx', 'Ty', 'Tz' - # ]) - self.dataframe = [] - - # This needs to be done through moveit planner since the points need to be interpolated to a trajectory - self.executeTrajectory() - - - def recordSyncData(self, imageA_msg, imageB_msg, joint_state_msg, wrench_msg): - # The timestamp of imageA_msg will be used to name the columns - timestamp = f"{imageA_msg.header.stamp.sec}_{imageA_msg.header.stamp.nanosec}" - - self.get_logger().info(f"Recording data at timestamp {timestamp}") - - # Save the joint state - data_row = {'timestamp': timestamp} - - for i in range(7): - # imp imp: Remember that kuka publishes A1, A2, A4, A3, A5, A6, A7 (NOTE THE ORDER) - j = i - if i == 3: - j = 4 - if i == 4: - j = 3 - data_row[f'P{j+1}'] = joint_state_msg.position[i] - data_row[f'V{j+1}'] = joint_state_msg.velocity[i] - data_row[f'E{j+1}'] = joint_state_msg.effort[i] - - # Save the wrench - data_row['Fx'] = wrench_msg.wrench.force.x - data_row['Fy'] = wrench_msg.wrench.force.y - data_row['Fz'] = wrench_msg.wrench.force.z - data_row['Tx'] = wrench_msg.wrench.torque.x - data_row['Ty'] = wrench_msg.wrench.torque.y - data_row['Tz'] = wrench_msg.wrench.torque.z - - self.dataframe.append(data_row) - - # Save the images - imageA_filename = os.path.join(self.imageA_folder, f"{timestamp}_A.jpg") - imageB_filename = os.path.join(self.imageB_folder, f"{timestamp}_B.jpg") - image_A = self.cvbridge.imgmsg_to_cv2(imageA_msg, desired_encoding='bgr8') - cv2.imwrite(imageA_filename, image_A) - image_B = self.cvbridge.imgmsg_to_cv2(imageB_msg, desired_encoding='bgr8') - cv2.imwrite(imageB_filename, image_B) - - - def readJoinStatesFromCsv(self, file_path): - joint_states = [] - with open(file_path, mode='r') as file: - csv_reader = csv.DictReader(file) - for row in csv_reader: - joint_states.append([math.radians(float(row['A1'])), math.radians(float(row['A2'])), math.radians(float(row['A3'])), - math.radians(float(row['A4'])), math.radians(float(row['A5'])), math.radians(float(row['A6'])), math.radians(float(row['A7']))]) - print(joint_states[0]) - return joint_states - - def initializeRobotPose(self): - # The robot needs to be initialized to the first position of the trajectory - # to ensure that there is no sudden movement when the trajectory is replayed - goal_msg = FollowJointTrajectory.Goal() - trajectory_msg = JointTrajectory() - trajectory_msg.joint_names = self.joint_names - - self.get_logger().info(f"Processing trajectory point init") - - point = JointTrajectoryPoint() - point.positions = self.joint_trajectories[0] - point.time_from_start.sec = 4 # Set the seconds part to 0 - - trajectory_msg.points.append(point) - goal_msg.trajectory = trajectory_msg - self._action_client.wait_for_server() - - send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) - - rclpy.spin_until_future_complete(self, send_goal_future) - - goal_handle = send_goal_future.result() - - if not goal_handle.accepted: - self.get_logger().info(f"Goal for init point was rejected") - return - - self.get_logger().info(f"Goal for init point was accepted") - get_result_future = goal_handle.get_result_async() - rclpy.spin_until_future_complete(self, get_result_future) - result = get_result_future.result().result - self.get_logger().info(f"Result : {result}, Initial position reached") - print("Press Enter to continue ...") - input() - - def executeTrajectory(self): - goal_msg = FollowJointTrajectory.Goal() - trajectory_msg = JointTrajectory() - trajectory_msg.joint_names = self.joint_names - for i, joint_values in enumerate(self.joint_trajectories): - point = JointTrajectoryPoint() - point.positions = joint_values - point.time_from_start = rclpy.duration.Duration(seconds=(i+1) * 0.1).to_msg() - trajectory_msg.points.append(point) - goal_msg.trajectory = trajectory_msg - self._action_client.wait_for_server() - - send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) - - # rclpy.spin_until_future_complete(self, send_goal_future) - - # goal_handle = send_goal_future.result() - - # if not goal_handle.accepted: - # self.get_logger().info(f"Goal for trajectory was rejected") - # return - - # self.get_logger().info(f"Goal for trajectory was accepted") - # get_result_future = goal_handle.get_result_async() - # rclpy.spin_until_future_complete(self, get_result_future) - # result = get_result_future.result().result - # self.get_logger().info(f"Result : {result}, Trajectory executed?") - - send_goal_future.add_done_callback(self.goal_response_callback) - - def goal_response_callback(self, future): - goal_handle = future.result() - if not goal_handle.accepted: - self.get_logger().info(f"Goal for trajectory was rejected") - return - self.get_logger().info(f"Goal for trajectory was accepted") - get_result_future = goal_handle.get_result_async() - get_result_future.add_done_callback(self.result_callback) - - def result_callback(self, future): - result = future.result() - if result.result.error_code == FollowJointTrajectory.Result.SUCCESSFUL: - self.get_logger().info("WOHOOO") - self.shutdown_node() - else: - self.get_logger().info("OHH NO") - - def feedback_callback(self, feedback_msg): - self.feedback = feedback_msg.feedback - - def shutdown_node(self): - self.get_logger().info("Shutting down node now") - self.save_data() - self.destroy_node() - rclpy.shutdown() - - def save_data(self): - df = pd.DataFrame(self.dataframe) - save_path = os.path.join(self.save_data_folder, 'recorded_data.csv') - df.to_csv(save_path, index=False) - self.get_logger().info("Saved data!") - -def main(args=None): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - - data_folder = config['log_folder_path'] - joint_states_file = config['replay_csv_name'] - rclpy.init(args=args) - executor = MultiThreadedExecutor() - node = KukaDataRecorder(data_folder=data_folder, joint_states_file=joint_states_file) - executor.add_node(node) - try: - - executor.spin() - except Exception as e: - node.get_logger().error(f"An error occurred: {e}") - finally: - # if hasattr(node, 'dataframe'): - # df = pd.DataFrame(node.dataframe) - # save_path = os.path.join(node.data_folder, 'recorded_data.csv') - # df.to_csv(save_path, index=False) - # node.get_logger().info(f"Recorded data saved!") - if rclpy.ok(): - node.destroy_node() - rclpy.shutdown() - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/data_collector/data_collector/data_collector/parse_robot_log.py b/data_collector/data_collector/data_collector/parse_robot_log.py deleted file mode 100644 index 2b11adcd..00000000 --- a/data_collector/data_collector/data_collector/parse_robot_log.py +++ /dev/null @@ -1,110 +0,0 @@ -import pandas as pd -import json -import re -import os -import glob - -def parse_robot_data(file_path): - """ - Parses the robot data file, renames joint columns to J1-J7, - extracts and renames time, force, and torque values. - - Parameters: - file_path (str): Path to the data file. - - Returns: - pd.DataFrame: A DataFrame containing time, renamed joint values, - and renamed force/torque data. - """ - # Step 1: Read the header line starting with '%' - with open(file_path, 'r') as file: - for line in file: - if line.startswith('%'): - header = line.strip().lstrip('%').strip().split() - break - else: - raise ValueError("No header line starting with '%' found in the file.") - - # Step 2: Load the data into a DataFrame - df = pd.read_csv( - file_path, - delim_whitespace=True, # Assuming the data is space-separated - comment='%', # Skip any lines starting with '%' - names=header, # Use the extracted header - skiprows=1 # Skip the header line - ) - - # Step 3: Identify and rename joint columns - joint_pattern = re.compile(r'axisQMsr_LBR_iiwa_14_R820_1\[(\d+)\]') - joint_columns = [col for col in header if joint_pattern.match(col)] - - if len(joint_columns) != 7: - raise ValueError(f"Expected 7 joint columns, found {len(joint_columns)}.") - - # Sort joint columns based on their index and rename them to J1-J7 - joint_columns_sorted = sorted( - joint_columns, - key=lambda x: int(joint_pattern.match(x).group(1)) - ) - joint_rename_map = {col: f'A{idx+1}' for idx, col in enumerate(joint_columns_sorted)} - df.rename(columns=joint_rename_map, inplace=True) - - # Step 4: Extract and combine time columns - if 'ZeitInSec' not in df.columns or 'ZeitInNanoSec' not in df.columns: - raise ValueError("Required time columns 'ZeitInSec' and/or 'ZeitInNanoSec' are missing.") - - # Combine 'ZeitInSec' and 'ZeitInNanoSec' into a single 'time' column in seconds - df['time'] = df['ZeitInSec'] + df['ZeitInNanoSec'] * 1e-9 - - # Drop the original time columns as they are now combined - df.drop(columns=['ZeitInSec', 'ZeitInNanoSec'], inplace=True) - - # Step 5: Extract and rename force and torque columns - force_columns = { - 'cartForce1_X': 'Fx', - 'cartForce1_Y': 'Fy', - 'cartForce1_Z': 'Fz' - } - torque_columns = { - 'cartTorque1_TauX': 'Tx', - 'cartTorque1_TauY': 'Ty', - 'cartTorque1_TauZ': 'Tz' - } - - # Verify that the required force and torque columns exist - missing_force_columns = [col for col in force_columns.keys() if col not in df.columns] - missing_torque_columns = [col for col in torque_columns.keys() if col not in df.columns] - missing_columns = missing_force_columns + missing_torque_columns - if missing_columns: - raise ValueError(f"The following required columns are missing in the data: {missing_columns}") - - # Rename force and torque columns - df.rename(columns=force_columns, inplace=True) - df.rename(columns=torque_columns, inplace=True) - - # Step 6: Select the relevant columns - selected_columns = ['time'] + list(joint_rename_map.values()) + list(force_columns.values()) + list(torque_columns.values()) - df_selected = df[selected_columns] - - return df_selected - - -def main(): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - - log_folder_path = config['log_folder_path'] - save_df_path = config['processed_csv_folder_path'] - - os.makedirs(save_df_path, exist_ok=True) - - log_files = glob.glob(os.path.join(log_folder_path, "*.log")) - - for file_path in log_files: - returned_df = parse_robot_data(file_path=file_path) - save_file_path = os.path.join(save_df_path, os.path.splitext(os.path.basename(file_path))[0] + '.csv') - returned_df.to_csv(save_file_path, index=False) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/data_collector/data_collector/data_collector/transformations.py b/data_collector/data_collector/data_collector/transformations.py deleted file mode 100644 index 52a55527..00000000 --- a/data_collector/data_collector/data_collector/transformations.py +++ /dev/null @@ -1,37 +0,0 @@ -from scipy.spatial.transform import Rotation as R -import numpy as np - - -def construct_adjoint_matrix(tcp_pose): - """ - Construct the adjoint matrix for a spatial velocity vector - :args: tcp_pose: (x, y, z, qx, qy, qz, qw) - """ - rotation = R.from_quat(tcp_pose[3:]).as_matrix() - translation = np.array(tcp_pose[:3]) - skew_matrix = np.array( - [ - [0, -translation[2], translation[1]], - [translation[2], 0, -translation[0]], - [-translation[1], translation[0], 0], - ] - ) - adjoint_matrix = np.zeros((6, 6)) - adjoint_matrix[:3, :3] = rotation - adjoint_matrix[3:, 3:] = rotation - adjoint_matrix[3:, :3] = skew_matrix @ rotation - return adjoint_matrix - - -def construct_homogeneous_matrix(tcp_pose): - """ - Construct the homogeneous transformation matrix from given pose. - args: tcp_pose: (x, y, z, qx, qy, qz, qw) - """ - rotation = R.from_quat(tcp_pose[3:]).as_matrix() - translation = np.array(tcp_pose[:3]) - T = np.zeros((4, 4)) - T[:3, :3] = rotation - T[:3, 3] = translation - T[3, 3] = 1 - return T diff --git a/data_collector/data_collector/package.xml b/data_collector/data_collector/package.xml deleted file mode 100644 index 494d0fa0..00000000 --- a/data_collector/data_collector/package.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - data_collector - 0.0.0 - TODO: Package description - nisara - Apache-2.0 - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - rclpy - sensor_msgs - trajectory_msgs - control_msgs - geometry_msgs - message_filters - numpy - pandas - cv2 - cv_bridge - - - - ament_python - - diff --git a/data_collector/data_collector/setup.cfg b/data_collector/data_collector/setup.cfg deleted file mode 100644 index f0a7afc8..00000000 --- a/data_collector/data_collector/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/data_collector -[install] -install_scripts=$base/lib/data_collector diff --git a/data_collector/data_collector/setup.py b/data_collector/data_collector/setup.py deleted file mode 100644 index 43107e8c..00000000 --- a/data_collector/data_collector/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'data_collector' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools', 'pandas', 'numpy'], - zip_safe=True, - maintainer='nisara', - maintainer_email='sarawgi.nikita@gmail.com', - description='TODO: Package description', - license='Apache-2.0', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'kuka_data_recorder = data_collector.kuka_data_recorder:main', - 'joint_cartesian_converter = data_collector.joint_cartesian_converter:main' - ], - }, -) From ad9e61f5b0c5ddf0980c6f985aeea7c7313d0c37 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Fri, 7 Feb 2025 14:01:47 -0800 Subject: [PATCH 09/24] Squashed 'data_collector/' content from commit b8263e9 git-subtree-dir: data_collector git-subtree-split: b8263e9f3ae287665bdbcfe08d225ebe90702f20 --- LICENSE | 202 ++++++++++++++++ data_collector/__init__.py | 0 data_collector/arduino-python.py | 41 ++++ data_collector/config.json | 17 ++ data_collector/create_pkl.py | 231 ++++++++++++++++++ data_collector/joint_cartesian_converter.py | 127 ++++++++++ data_collector/kuka_data_recorder.py | 249 ++++++++++++++++++++ data_collector/parse_robot_log.py | 110 +++++++++ data_collector/transformations.py | 37 +++ package.xml | 38 +++ setup.cfg | 4 + setup.py | 27 +++ 12 files changed, 1083 insertions(+) create mode 100644 LICENSE create mode 100644 data_collector/__init__.py create mode 100644 data_collector/arduino-python.py create mode 100644 data_collector/config.json create mode 100644 data_collector/create_pkl.py create mode 100644 data_collector/joint_cartesian_converter.py create mode 100644 data_collector/kuka_data_recorder.py create mode 100644 data_collector/parse_robot_log.py create mode 100644 data_collector/transformations.py create mode 100644 package.xml create mode 100644 setup.cfg create mode 100644 setup.py diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/data_collector/__init__.py b/data_collector/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/data_collector/arduino-python.py b/data_collector/arduino-python.py new file mode 100644 index 00000000..9947c7db --- /dev/null +++ b/data_collector/arduino-python.py @@ -0,0 +1,41 @@ +###################### +import serial +import time +import subprocess +import signal +import sys + +# Use the correct serial port based on what you found, either '/dev/ttyACM0' or '/dev/ttyUSB0' +arduino = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=.1) + +def send_command(command): + arduino.write(command.encode()) # Send the command to the Arduino + time.sleep(0.05) # Small delay to ensure the command is sent properly + +def start_two_cameras_script(): + # Launch the two_cameras.py script + subprocess.Popen(["python", "/home/lm-2023/jeon_team_ws/control_kuka/lbr_fri_ws-dev-socket/data_collection/data_collection/two_cameras_live.py"]) + +def signal_handler(sig, frame): + print("Signal received, sending 'X' to microcontroller...") + send_command('X') # Send the 'X' command when the process is terminated + sys.exit(0) # Exit the program + +# Register the signal handler for termination signals +signal.signal(signal.SIGINT, signal_handler) # Handles Ctrl+C +signal.signal(signal.SIGTERM, signal_handler) # Handles termination signal + +# Example usage +while True: + command = input("Enter command (X: Press Both Buttons, Y: Release Both Buttons and Start two_cameras.py): ").strip().upper() + + if command == 'X': + send_command('X') # Press both buttonsy + + elif command == 'Y': + send_command('Y') # Release both buttons + #start_two_cameras_script() # Start the two_cameras.py script + + else: + print("Invalid command. Please enter X or Y.") + diff --git a/data_collector/config.json b/data_collector/config.json new file mode 100644 index 00000000..3c828d65 --- /dev/null +++ b/data_collector/config.json @@ -0,0 +1,17 @@ +{ + "log_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay", + "processed_csv_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay/processed", + "replay_csv_name": "2013-01-01_01-10-52.csv", + + "ACTION_SCALE": [0.02, 0.1, 1], + "TARGET_POSE": [ + 0.5906439143742067, + 0.07771711953459341, + 0.337835826958042, + 3.1099675, + 0.0146619, + -0.0078615 + ], + "REWARD_THRESHOLD": [0.01, 0.01, 0.01, 0.2, 0.2, 0.2] + +} \ No newline at end of file diff --git a/data_collector/create_pkl.py b/data_collector/create_pkl.py new file mode 100644 index 00000000..d383215b --- /dev/null +++ b/data_collector/create_pkl.py @@ -0,0 +1,231 @@ +''' +FORMAT OF THE CREATED PKL FILE: + The pkl file is a dictionary with the following keys: + dict( + observations=obs, + actions=actions, + next_observations=next_obs, + rewards=rew, + masks=1.0 - done, + dones=done, + ) + where + observations : { + "state": gym.spaces.Box(-np.inf, np.inf, shape=(7 + 6 + 3 + 3,)), + "wrist_1": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), + "wrist_2": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), + } + actions = gym.spaces.Box( + np.ones((7,), dtype=np.float32) * -1, + np.ones((7,), dtype=np.float32), + ) + The action space's last element is actually for the gripper, but we are not using it. + It should be x,y,z,roll,pitch,yaw + state : concatenation of tcp_poses (7), tcp_velocities (6), tcp_forces (3), tcp_torques (3) + image names are in the format {timestamp}_A.png and {timestamp}_B.png +''' + + +import numpy as np +import pandas as pd +import pickle +import os +import json +from PIL import Image +from scipy.spatial.transform import Rotation as R +from transformations import construct_adjoint_matrix, construct_homogeneous_matrix + + + + +class SingleTrajectoryConverter: + def __init__(self, recorded_data_file_path, imageA_folder_path, imageB_folder_path, action_scale, target_pose, reward_threshold): + + self.recorded_data_file_path = recorded_data_file_path + self.imageA_folder_path = imageA_folder_path + self.imageB_folder_path = imageB_folder_path + self.action_scale = action_scale + self.target_pose = target_pose + self.reward_threshold = reward_threshold + + self.recorded_data = pd.read_csv(recorded_data_file_path) + + self.adjoint_matrix = None + self.T_r_o_inv = None + + self.transitions = [] + + def transform_observation(self, obs): + """ + Transform observations from spatial(base) frame into body(end-effector) frame + using the adjoint matrix + """ + adjoint_inv = np.linalg.inv(self.adjoint_matrix) + obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']] = adjoint_inv @ obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values + + T_b_o = construct_homogeneous_matrix(obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values) + T_b_r = self.T_r_o_inv @ T_b_o + + # Reconstruct transformed tcp_pose vector + p_b_r = T_b_r[:3, 3] + theta_b_r = R.from_matrix(T_b_r[:3, :3]).as_quat() + obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']] = np.concatenate((p_b_r, theta_b_r)) + + return obs + + def calculate_next_action(self, curr_pos, next_pos): + action = np.zeros(7) + action[:3] = (next_pos[:3] - curr_pos[:3]) / self.action_scale[0] + action[3:6] = (R.from_quat(next_pos[3:7]) * R.from_quat(curr_pos[3:7]).inv()).as_euler("xyz") / self.action_scale[1] + action[6] = 0.0 + return action + + def convertToPickle(self): + + observations = [] + actions = [] + next_observations = [] + rewards = [] + masks = [] + dones = [] + + done = 0.0 + + first_observation = self.recorded_data.iloc[0] + # Construct the adjoint matrix and T_r_o_inv using the first observation + # T_r_o_inv remains constant but the adjoint matrix updates every iteration + curr_pos = first_observation[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + self.adjoint_matrix = construct_adjoint_matrix(curr_pos) + self.T_r_o_inv = np.linalg.inv(construct_homogeneous_matrix(curr_pos)) + next_transformed_obs = self.transform_observation(first_observation) + + + for i in range(1, len(self.recorded_data) - 1): + + current_row = self.recorded_data.iloc[i-1] + next_row = self.recorded_data.iloc[i] + + curr_time = current_row["timestamp"] + next_time = next_row["timestamp"] + + img_size = (128, 128) + + curr_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{curr_time}_A.jpg")).resize(img_size) + curr_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{curr_time}_B.jpg")).resize(img_size) + + next_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{next_time}_A.jpg")).resize(img_size) + next_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{next_time}_B.jpg")).resize(img_size) + + curr_obs_no_transform_pose = current_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + next_obs_no_transform_pose = next_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values + action = self.calculate_next_action(curr_obs_no_transform_pose, next_obs_no_transform_pose) + + self.adjoint_matrix = construct_adjoint_matrix(curr_obs_no_transform_pose) + + curr_transformed_obs = next_transformed_obs + next_transformed_obs = self.transform_observation(next_row) + + curr_euler_angles = R.from_quat(curr_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') + next_euler_angles = R.from_quat(next_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') + + observation = { + "state": np.concatenate([ + curr_transformed_obs[['P1', 'P2', 'P3']].values, + curr_euler_angles, + curr_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, + curr_transformed_obs[['Fx', 'Fy', 'Fz']].values, + curr_transformed_obs[['Tx', 'Ty', 'Tz']].values + ]), + "wrist_1": np.array(curr_wrist_1_image), + "wrist_2": np.array(curr_wrist_2_image) + } + + next_observation = { + "state": np.concatenate([ + next_transformed_obs[['P1', 'P2', 'P3']].values, + next_euler_angles, + next_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, + next_transformed_obs[['Fx', 'Fy', 'Fz']].values, + next_transformed_obs[['Tx', 'Ty', 'Tz']].values + ]), + "wrist_1": np.array(next_wrist_1_image), + "wrist_2": np.array(next_wrist_2_image) + } + + if done != 1.0: + tcp_pose = current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values + tcp_pose[3:] = np.abs(tcp_pose[3:]) + if np.all(tcp_pose - self.target_pose <= self.reward_threshold): + reward = 1.0 + done = 1.0 + else: + reward = 0.0 + else: + reward = 0.0 + + + data_dict = { + "observations": observation, + "actions": action, + "next_observations": next_observation, + "rewards": reward, + "masks": 1.0 - done, + "dones": done + } + + self.transitions.append(data_dict) + + # observations.append(observation) + # actions.append(action) + # next_observations.append(next_observation) + # rewards.append(reward) + # masks.append(1.0 - done) + # dones.append(done) + + curr_wrist_1_image.close() + curr_wrist_2_image.close() + next_wrist_1_image.close() + next_wrist_2_image.close() + + return self.transitions + +def main(): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + processed_folder_path = config['processed_csv_folder_path'] + output_pkl_file_path = os.path.join(processed_folder_path, 'recorded_data.pkl') + + action_scale = np.array(config['ACTION_SCALE']) + target_pose = np.array(config['TARGET_POSE']) + reward_threshold = np.array(config['REWARD_THRESHOLD']) + + + all_transitions = [] + + for folder_name in os.listdir(processed_folder_path): + folder_path = os.path.join(processed_folder_path, folder_name) + if os.path.isdir(folder_path): + recorded_data_file_path = os.path.join(folder_path, 'recorded_data.csv') + imageA_folder_path = os.path.join(folder_path, 'imageA') + imageB_folder_path = os.path.join(folder_path, 'imageB') + if os.path.exists(recorded_data_file_path) and os.path.exists(imageA_folder_path) and os.path.exists(imageB_folder_path): + singleTrajectoryConverter = SingleTrajectoryConverter( + recorded_data_file_path, + imageA_folder_path, + imageB_folder_path, + action_scale, + target_pose, + reward_threshold) + transitions = singleTrajectoryConverter.convertToPickle() + all_transitions.extend(transitions) + + with open(output_pkl_file_path, 'wb') as f: + pickle.dump(all_transitions, f) + + print(f"Data saved to {output_pkl_file_path}") + + +if __name__ == "__main__": + main() + diff --git a/data_collector/joint_cartesian_converter.py b/data_collector/joint_cartesian_converter.py new file mode 100644 index 00000000..72d8c3d2 --- /dev/null +++ b/data_collector/joint_cartesian_converter.py @@ -0,0 +1,127 @@ +import time + +import rclpy +import rclpy.duration +from rclpy.node import Node +from rclpy.callback_groups import MutuallyExclusiveCallbackGroup + +import numpy as np +import pandas as pd +import json +import os +from scipy.spatial.transform import Rotation as R + +from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped +import rclpy.task +from sensor_msgs.msg import JointState + +from moveit_msgs.msg import ( + RobotState, + MoveItErrorCodes, +) +from moveit_msgs.srv import GetPositionFK + + + +class JointCartesianConverter(Node): + + move_group_name_ = "arm" + namespace_ = "lbr" + + joint_state_topic_ = "joint_states" + lbr_state_topic_ = "state" + pose_topic_ = "pose" + plan_srv_name_ = "plan_kinematic_path" + ik_srv_name_ = "compute_ik" + fk_srv_name_ = "compute_fk" + execute_action_name_ = "execute_trajectory" + fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" + + base_ = "link_0" + end_effector_ = "link_ee" + + def __init__(self, joint_data_file_path): + super().__init__("joint_cartesian_converter") + + self.joint_data_file_path = joint_data_file_path + self.fk_client_callback = MutuallyExclusiveCallbackGroup() + self.fk_client_ = self.create_client(GetPositionFK, f"{self.namespace_}/{self.fk_srv_name_}", callback_group=self.fk_client_callback) + if not self.fk_client_.wait_for_service(timeout_sec=5.0): + self.get_logger().error("FK service not available.") + exit(1) + + self.joint_data = pd.read_csv(self.joint_data_file_path) + + def quat_2_euler(self, quat): + """calculates and returns: yaw, pitch, roll from given quaternion""" + return R.from_quat(quat).as_euler("xyz") + + def convert(self): + + # Convert each row in the dataframe to cartesian coordinates and save in the dataframe + for index, row in self.joint_data.iterrows(): + + joint_state = JointState() + joint_state.name = ["A1", "A2", "A4", "A3", "A5", "A6", "A7"] + joint_state.position = [row["P1"], row["P2"], row["P3"], row["P4"], row["P5"], row["P6"], row["P7"]] + joint_state.velocity = [row["V1"], row["V2"], row["V3"], row["V4"], row["V5"], row["V6"], row["V7"]] + joint_state.effort = [row["E1"], row["E2"], row["E3"], row["E4"], row["E5"], row["E6"], row["E7"]] + joint_state.header.stamp = self.get_clock().now().to_msg() + joint_state.header.frame_id = f"{self.namespace_}/{self.base_}" + current_robot_state = RobotState() + current_robot_state.joint_state = joint_state + + request = GetPositionFK.Request() + + request.header.frame_id = f"{self.namespace_}/{self.base_}" + request.header.stamp = self.get_clock().now().to_msg() + + request.fk_link_names.append(self.end_effector_) + request.robot_state = current_robot_state + + future = self.fk_client_.call_async(request) + + rclpy.spin_until_future_complete(self, future) + if future.result() is None: + self.get_logger().error("Failed to get FK solution") + return None + + response = future.result() + if response.error_code.val != MoveItErrorCodes.SUCCESS: + self.get_logger().error( + f"Failed to get FK solution: {response.error_code.val}" + ) + return None + + pose = response.pose_stamped[0].pose + euler_pose = self.quat_2_euler(np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])) + self.joint_data.at[index, "X"] = pose.position.x + self.joint_data.at[index, "Y"] = pose.position.y + self.joint_data.at[index, "Z"] = pose.position.z + self.joint_data.at[index, "QX"] = pose.orientation.x + self.joint_data.at[index, "QY"] = pose.orientation.y + self.joint_data.at[index, "QZ"] = pose.orientation.z + self.joint_data.at[index, "QW"] = pose.orientation.w + self.joint_data.at[index, "Roll"] = euler_pose[0] + self.joint_data.at[index, "Pitch"] = euler_pose[1] + self.joint_data.at[index, "Yaw"] = euler_pose[2] + + # Compute the end_effector velocity + + self.joint_data.to_csv(self.joint_data_file_path, index=False) + +def main(args=None): + rclpy.init(args=args) + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + + processed_folder_path = config['processed_csv_folder_path'] + csv_name = config['replay_csv_name'] + csv_folder = os.path.join(processed_folder_path, os.path.splitext(csv_folder)[0]) + joint_data_file_path = os.path.join(csv_folder, 'recorded_data.csv') + + joint_cartesian_converter = JointCartesianConverter(joint_data_file_path) + joint_cartesian_converter.convert() + + rclpy.shutdown() \ No newline at end of file diff --git a/data_collector/kuka_data_recorder.py b/data_collector/kuka_data_recorder.py new file mode 100644 index 00000000..d8e560ca --- /dev/null +++ b/data_collector/kuka_data_recorder.py @@ -0,0 +1,249 @@ +import rclpy +import os +import pandas as pd +import csv +import math +import json +from cv_bridge import CvBridge +import cv2 +from rclpy.node import Node +from rclpy.executors import MultiThreadedExecutor +from rclpy.action import ActionClient +from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint +from control_msgs.action import FollowJointTrajectory +from sensor_msgs.msg import Image, JointState +from geometry_msgs.msg import WrenchStamped +from message_filters import Subscriber, ApproximateTimeSynchronizer + +class KukaDataRecorder(Node): + def __init__(self, data_folder, joint_states_file): + super().__init__("kuka_data_recorder") + + self.data_folder = data_folder + self.joint_states_file = joint_states_file + self.save_data_folder = os.path.join(self.data_folder, os.path.splitext(self.joint_states_file)[0]) + os.makedirs(self.save_data_folder, exist_ok=True) + # Create two folders for the images + self.imageA_folder = os.path.join(self.save_data_folder, "imageA") + self.imageB_folder = os.path.join(self.save_data_folder, "imageB") + os.makedirs(self.imageA_folder, exist_ok=True) + os.makedirs(self.imageB_folder, exist_ok=True) + + # topic_callback_group = MutuallyExclusiveCallbackGroup() + # client_callback_group = MutuallyExclusiveCallbackGroup() + + # Initialize everything for trajectory replaying + self.feedback = None + self._action_client = ActionClient(self, FollowJointTrajectory, '/lbr/joint_trajectory_controller/follow_joint_trajectory') + self.joint_names = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] + self.joint_trajectories = self.readJoinStatesFromCsv(os.path.join(self.data_folder, "processed", self.joint_states_file)) + # This can be done through goal sending + self.initializeRobotPose() + + # Create a synchronized callback for all the information + self.imageA_topic = "/camera1/camera1/color/image_raw" + self.imageB_topic = "/camera2/camera2/color/image_raw" + self.joint_state_topic = "/lbr/joint_states" + self.wrench_topic = "/lbr/force_torque_broadcaster/wrench" + + self.imageA_sub = Subscriber(self, Image, self.imageA_topic) + self.imageB_sub = Subscriber(self, Image, self.imageB_topic) + self.joint_state_sub = Subscriber(self, JointState, self.joint_state_topic) + self.wrench_sub = Subscriber(self, WrenchStamped, self.wrench_topic) + + self.synchronizer = ApproximateTimeSynchronizer( + [self.imageA_sub, self.imageB_sub, self.joint_state_sub, self.wrench_sub], + queue_size=100, + slop=0.01, + ) + + self.cvbridge = CvBridge() + + self.synchronizer.registerCallback(self.recordSyncData) + self.get_logger().info("Data Synchronizer Node Initialized") + + # self.dataframe = pd.DataFrame(columns=[ + # 'timestamp', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', + # 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', + # 'E1', 'E2', 'E3', 'E4', 'E5', 'E6', 'E7', 'Fx', 'Fy', 'Fz', 'Tx', 'Ty', 'Tz' + # ]) + self.dataframe = [] + + # This needs to be done through moveit planner since the points need to be interpolated to a trajectory + self.executeTrajectory() + + + def recordSyncData(self, imageA_msg, imageB_msg, joint_state_msg, wrench_msg): + # The timestamp of imageA_msg will be used to name the columns + timestamp = f"{imageA_msg.header.stamp.sec}_{imageA_msg.header.stamp.nanosec}" + + self.get_logger().info(f"Recording data at timestamp {timestamp}") + + # Save the joint state + data_row = {'timestamp': timestamp} + + for i in range(7): + # imp imp: Remember that kuka publishes A1, A2, A4, A3, A5, A6, A7 (NOTE THE ORDER) + j = i + if i == 3: + j = 4 + if i == 4: + j = 3 + data_row[f'P{j+1}'] = joint_state_msg.position[i] + data_row[f'V{j+1}'] = joint_state_msg.velocity[i] + data_row[f'E{j+1}'] = joint_state_msg.effort[i] + + # Save the wrench + data_row['Fx'] = wrench_msg.wrench.force.x + data_row['Fy'] = wrench_msg.wrench.force.y + data_row['Fz'] = wrench_msg.wrench.force.z + data_row['Tx'] = wrench_msg.wrench.torque.x + data_row['Ty'] = wrench_msg.wrench.torque.y + data_row['Tz'] = wrench_msg.wrench.torque.z + + self.dataframe.append(data_row) + + # Save the images + imageA_filename = os.path.join(self.imageA_folder, f"{timestamp}_A.jpg") + imageB_filename = os.path.join(self.imageB_folder, f"{timestamp}_B.jpg") + image_A = self.cvbridge.imgmsg_to_cv2(imageA_msg, desired_encoding='bgr8') + cv2.imwrite(imageA_filename, image_A) + image_B = self.cvbridge.imgmsg_to_cv2(imageB_msg, desired_encoding='bgr8') + cv2.imwrite(imageB_filename, image_B) + + + def readJoinStatesFromCsv(self, file_path): + joint_states = [] + with open(file_path, mode='r') as file: + csv_reader = csv.DictReader(file) + for row in csv_reader: + joint_states.append([math.radians(float(row['A1'])), math.radians(float(row['A2'])), math.radians(float(row['A3'])), + math.radians(float(row['A4'])), math.radians(float(row['A5'])), math.radians(float(row['A6'])), math.radians(float(row['A7']))]) + print(joint_states[0]) + return joint_states + + def initializeRobotPose(self): + # The robot needs to be initialized to the first position of the trajectory + # to ensure that there is no sudden movement when the trajectory is replayed + goal_msg = FollowJointTrajectory.Goal() + trajectory_msg = JointTrajectory() + trajectory_msg.joint_names = self.joint_names + + self.get_logger().info(f"Processing trajectory point init") + + point = JointTrajectoryPoint() + point.positions = self.joint_trajectories[0] + point.time_from_start.sec = 4 # Set the seconds part to 0 + + trajectory_msg.points.append(point) + goal_msg.trajectory = trajectory_msg + self._action_client.wait_for_server() + + send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) + + rclpy.spin_until_future_complete(self, send_goal_future) + + goal_handle = send_goal_future.result() + + if not goal_handle.accepted: + self.get_logger().info(f"Goal for init point was rejected") + return + + self.get_logger().info(f"Goal for init point was accepted") + get_result_future = goal_handle.get_result_async() + rclpy.spin_until_future_complete(self, get_result_future) + result = get_result_future.result().result + self.get_logger().info(f"Result : {result}, Initial position reached") + print("Press Enter to continue ...") + input() + + def executeTrajectory(self): + goal_msg = FollowJointTrajectory.Goal() + trajectory_msg = JointTrajectory() + trajectory_msg.joint_names = self.joint_names + for i, joint_values in enumerate(self.joint_trajectories): + point = JointTrajectoryPoint() + point.positions = joint_values + point.time_from_start = rclpy.duration.Duration(seconds=(i+1) * 0.1).to_msg() + trajectory_msg.points.append(point) + goal_msg.trajectory = trajectory_msg + self._action_client.wait_for_server() + + send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) + + # rclpy.spin_until_future_complete(self, send_goal_future) + + # goal_handle = send_goal_future.result() + + # if not goal_handle.accepted: + # self.get_logger().info(f"Goal for trajectory was rejected") + # return + + # self.get_logger().info(f"Goal for trajectory was accepted") + # get_result_future = goal_handle.get_result_async() + # rclpy.spin_until_future_complete(self, get_result_future) + # result = get_result_future.result().result + # self.get_logger().info(f"Result : {result}, Trajectory executed?") + + send_goal_future.add_done_callback(self.goal_response_callback) + + def goal_response_callback(self, future): + goal_handle = future.result() + if not goal_handle.accepted: + self.get_logger().info(f"Goal for trajectory was rejected") + return + self.get_logger().info(f"Goal for trajectory was accepted") + get_result_future = goal_handle.get_result_async() + get_result_future.add_done_callback(self.result_callback) + + def result_callback(self, future): + result = future.result() + if result.result.error_code == FollowJointTrajectory.Result.SUCCESSFUL: + self.get_logger().info("WOHOOO") + self.shutdown_node() + else: + self.get_logger().info("OHH NO") + + def feedback_callback(self, feedback_msg): + self.feedback = feedback_msg.feedback + + def shutdown_node(self): + self.get_logger().info("Shutting down node now") + self.save_data() + self.destroy_node() + rclpy.shutdown() + + def save_data(self): + df = pd.DataFrame(self.dataframe) + save_path = os.path.join(self.save_data_folder, 'recorded_data.csv') + df.to_csv(save_path, index=False) + self.get_logger().info("Saved data!") + +def main(args=None): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + + data_folder = config['log_folder_path'] + joint_states_file = config['replay_csv_name'] + rclpy.init(args=args) + executor = MultiThreadedExecutor() + node = KukaDataRecorder(data_folder=data_folder, joint_states_file=joint_states_file) + executor.add_node(node) + try: + + executor.spin() + except Exception as e: + node.get_logger().error(f"An error occurred: {e}") + finally: + # if hasattr(node, 'dataframe'): + # df = pd.DataFrame(node.dataframe) + # save_path = os.path.join(node.data_folder, 'recorded_data.csv') + # df.to_csv(save_path, index=False) + # node.get_logger().info(f"Recorded data saved!") + if rclpy.ok(): + node.destroy_node() + rclpy.shutdown() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data_collector/parse_robot_log.py b/data_collector/parse_robot_log.py new file mode 100644 index 00000000..2b11adcd --- /dev/null +++ b/data_collector/parse_robot_log.py @@ -0,0 +1,110 @@ +import pandas as pd +import json +import re +import os +import glob + +def parse_robot_data(file_path): + """ + Parses the robot data file, renames joint columns to J1-J7, + extracts and renames time, force, and torque values. + + Parameters: + file_path (str): Path to the data file. + + Returns: + pd.DataFrame: A DataFrame containing time, renamed joint values, + and renamed force/torque data. + """ + # Step 1: Read the header line starting with '%' + with open(file_path, 'r') as file: + for line in file: + if line.startswith('%'): + header = line.strip().lstrip('%').strip().split() + break + else: + raise ValueError("No header line starting with '%' found in the file.") + + # Step 2: Load the data into a DataFrame + df = pd.read_csv( + file_path, + delim_whitespace=True, # Assuming the data is space-separated + comment='%', # Skip any lines starting with '%' + names=header, # Use the extracted header + skiprows=1 # Skip the header line + ) + + # Step 3: Identify and rename joint columns + joint_pattern = re.compile(r'axisQMsr_LBR_iiwa_14_R820_1\[(\d+)\]') + joint_columns = [col for col in header if joint_pattern.match(col)] + + if len(joint_columns) != 7: + raise ValueError(f"Expected 7 joint columns, found {len(joint_columns)}.") + + # Sort joint columns based on their index and rename them to J1-J7 + joint_columns_sorted = sorted( + joint_columns, + key=lambda x: int(joint_pattern.match(x).group(1)) + ) + joint_rename_map = {col: f'A{idx+1}' for idx, col in enumerate(joint_columns_sorted)} + df.rename(columns=joint_rename_map, inplace=True) + + # Step 4: Extract and combine time columns + if 'ZeitInSec' not in df.columns or 'ZeitInNanoSec' not in df.columns: + raise ValueError("Required time columns 'ZeitInSec' and/or 'ZeitInNanoSec' are missing.") + + # Combine 'ZeitInSec' and 'ZeitInNanoSec' into a single 'time' column in seconds + df['time'] = df['ZeitInSec'] + df['ZeitInNanoSec'] * 1e-9 + + # Drop the original time columns as they are now combined + df.drop(columns=['ZeitInSec', 'ZeitInNanoSec'], inplace=True) + + # Step 5: Extract and rename force and torque columns + force_columns = { + 'cartForce1_X': 'Fx', + 'cartForce1_Y': 'Fy', + 'cartForce1_Z': 'Fz' + } + torque_columns = { + 'cartTorque1_TauX': 'Tx', + 'cartTorque1_TauY': 'Ty', + 'cartTorque1_TauZ': 'Tz' + } + + # Verify that the required force and torque columns exist + missing_force_columns = [col for col in force_columns.keys() if col not in df.columns] + missing_torque_columns = [col for col in torque_columns.keys() if col not in df.columns] + missing_columns = missing_force_columns + missing_torque_columns + if missing_columns: + raise ValueError(f"The following required columns are missing in the data: {missing_columns}") + + # Rename force and torque columns + df.rename(columns=force_columns, inplace=True) + df.rename(columns=torque_columns, inplace=True) + + # Step 6: Select the relevant columns + selected_columns = ['time'] + list(joint_rename_map.values()) + list(force_columns.values()) + list(torque_columns.values()) + df_selected = df[selected_columns] + + return df_selected + + +def main(): + + with open('config.json', 'r') as config_file: + config = json.load(config_file) + + log_folder_path = config['log_folder_path'] + save_df_path = config['processed_csv_folder_path'] + + os.makedirs(save_df_path, exist_ok=True) + + log_files = glob.glob(os.path.join(log_folder_path, "*.log")) + + for file_path in log_files: + returned_df = parse_robot_data(file_path=file_path) + save_file_path = os.path.join(save_df_path, os.path.splitext(os.path.basename(file_path))[0] + '.csv') + returned_df.to_csv(save_file_path, index=False) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/data_collector/transformations.py b/data_collector/transformations.py new file mode 100644 index 00000000..52a55527 --- /dev/null +++ b/data_collector/transformations.py @@ -0,0 +1,37 @@ +from scipy.spatial.transform import Rotation as R +import numpy as np + + +def construct_adjoint_matrix(tcp_pose): + """ + Construct the adjoint matrix for a spatial velocity vector + :args: tcp_pose: (x, y, z, qx, qy, qz, qw) + """ + rotation = R.from_quat(tcp_pose[3:]).as_matrix() + translation = np.array(tcp_pose[:3]) + skew_matrix = np.array( + [ + [0, -translation[2], translation[1]], + [translation[2], 0, -translation[0]], + [-translation[1], translation[0], 0], + ] + ) + adjoint_matrix = np.zeros((6, 6)) + adjoint_matrix[:3, :3] = rotation + adjoint_matrix[3:, 3:] = rotation + adjoint_matrix[3:, :3] = skew_matrix @ rotation + return adjoint_matrix + + +def construct_homogeneous_matrix(tcp_pose): + """ + Construct the homogeneous transformation matrix from given pose. + args: tcp_pose: (x, y, z, qx, qy, qz, qw) + """ + rotation = R.from_quat(tcp_pose[3:]).as_matrix() + translation = np.array(tcp_pose[:3]) + T = np.zeros((4, 4)) + T[:3, :3] = rotation + T[:3, 3] = translation + T[3, 3] = 1 + return T diff --git a/package.xml b/package.xml new file mode 100644 index 00000000..494d0fa0 --- /dev/null +++ b/package.xml @@ -0,0 +1,38 @@ + + + + data_collector + 0.0.0 + TODO: Package description + nisara + Apache-2.0 + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + sensor_msgs + trajectory_msgs + control_msgs + geometry_msgs + message_filters + numpy + pandas + cv2 + cv_bridge + + + + ament_python + + diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..f0a7afc8 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/data_collector +[install] +install_scripts=$base/lib/data_collector diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..43107e8c --- /dev/null +++ b/setup.py @@ -0,0 +1,27 @@ +from setuptools import find_packages, setup + +package_name = 'data_collector' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools', 'pandas', 'numpy'], + zip_safe=True, + maintainer='nisara', + maintainer_email='sarawgi.nikita@gmail.com', + description='TODO: Package description', + license='Apache-2.0', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'kuka_data_recorder = data_collector.kuka_data_recorder:main', + 'joint_cartesian_converter = data_collector.joint_cartesian_converter:main' + ], + }, +) From 13f087d1c885d33decdc2df543c7d7b462e52a3d Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Tue, 4 Mar 2025 17:53:58 -0800 Subject: [PATCH 10/24] Updating lbr FRI submodule to a new one --- lbr_fri_ros2_stack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lbr_fri_ros2_stack b/lbr_fri_ros2_stack index cca24ae8..8d550710 160000 --- a/lbr_fri_ros2_stack +++ b/lbr_fri_ros2_stack @@ -1 +1 @@ -Subproject commit cca24ae8a17d5644a414380d7ac420d37aec6686 +Subproject commit 8d550710146511d7aa54f3fcc38b981d4a3d369f From 8a07d89336b4619118d7b296e2d0fe715e6698ad Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Tue, 4 Mar 2025 17:59:17 -0800 Subject: [PATCH 11/24] updating lbr_fri_ros2_stack submodule in gitmodules --- .gitmodules | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 2cbdf96b..9176dcd1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -6,4 +6,5 @@ url = https://github.com/lbr-stack/lbr_fri_idl.git [submodule "lbr_fri_ros2_stack"] path = lbr_fri_ros2_stack - url = https://github.com/lbr-stack/lbr_fri_ros2_stack.git + url = https://github.com/RROS-Lab/serl_lbr_fri_ros2_stack.git + branch=serl-kuka From e0a207fec4be5f3b3a2cecc73fa4819969db51c4 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Wed, 5 Mar 2025 13:39:40 -0800 Subject: [PATCH 12/24] Updating submodule for fri_idl to rros-lab --- lbr_fri_idl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lbr_fri_idl b/lbr_fri_idl index e422bff2..7e32dc9d 160000 --- a/lbr_fri_idl +++ b/lbr_fri_idl @@ -1 +1 @@ -Subproject commit e422bff2b3d70ec47853145b35645980df623a60 +Subproject commit 7e32dc9d100e9ef7b675f79aaea1ad74f30cc426 From e34521f8c7fcb816e24752736edf1135a19d3f39 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Wed, 5 Mar 2025 13:42:01 -0800 Subject: [PATCH 13/24] Updating .gitmodules for lbr_fri_idl --- .gitmodules | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 9176dcd1..fe76ae72 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,7 +3,8 @@ url = git@github.com:lbr-stack/fri.git [submodule "lbr_fri_idl"] path = lbr_fri_idl - url = https://github.com/lbr-stack/lbr_fri_idl.git + url = https://github.com/RROS-Lab/serl_lbr_fri_idl.git + branch=serl-kuka [submodule "lbr_fri_ros2_stack"] path = lbr_fri_ros2_stack url = https://github.com/RROS-Lab/serl_lbr_fri_ros2_stack.git From 2ef56c77d520d9f0bf0f87f26e64eb7cecdb649c Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Wed, 5 Mar 2025 14:23:54 -0800 Subject: [PATCH 14/24] remove broken subtree reference --- data_collector/LICENSE | 202 -------------- data_collector/data_collector/__init__.py | 0 .../data_collector/arduino-python.py | 41 --- data_collector/data_collector/config.json | 17 -- data_collector/data_collector/create_pkl.py | 231 ---------------- .../joint_cartesian_converter.py | 127 --------- .../data_collector/kuka_data_recorder.py | 249 ------------------ .../data_collector/parse_robot_log.py | 110 -------- .../data_collector/transformations.py | 37 --- data_collector/package.xml | 38 --- data_collector/setup.cfg | 4 - data_collector/setup.py | 27 -- 12 files changed, 1083 deletions(-) delete mode 100644 data_collector/LICENSE delete mode 100644 data_collector/data_collector/__init__.py delete mode 100644 data_collector/data_collector/arduino-python.py delete mode 100644 data_collector/data_collector/config.json delete mode 100644 data_collector/data_collector/create_pkl.py delete mode 100644 data_collector/data_collector/joint_cartesian_converter.py delete mode 100644 data_collector/data_collector/kuka_data_recorder.py delete mode 100644 data_collector/data_collector/parse_robot_log.py delete mode 100644 data_collector/data_collector/transformations.py delete mode 100644 data_collector/package.xml delete mode 100644 data_collector/setup.cfg delete mode 100644 data_collector/setup.py diff --git a/data_collector/LICENSE b/data_collector/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/data_collector/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/data_collector/data_collector/__init__.py b/data_collector/data_collector/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/data_collector/data_collector/arduino-python.py b/data_collector/data_collector/arduino-python.py deleted file mode 100644 index 9947c7db..00000000 --- a/data_collector/data_collector/arduino-python.py +++ /dev/null @@ -1,41 +0,0 @@ -###################### -import serial -import time -import subprocess -import signal -import sys - -# Use the correct serial port based on what you found, either '/dev/ttyACM0' or '/dev/ttyUSB0' -arduino = serial.Serial(port='/dev/ttyACM0', baudrate=9600, timeout=.1) - -def send_command(command): - arduino.write(command.encode()) # Send the command to the Arduino - time.sleep(0.05) # Small delay to ensure the command is sent properly - -def start_two_cameras_script(): - # Launch the two_cameras.py script - subprocess.Popen(["python", "/home/lm-2023/jeon_team_ws/control_kuka/lbr_fri_ws-dev-socket/data_collection/data_collection/two_cameras_live.py"]) - -def signal_handler(sig, frame): - print("Signal received, sending 'X' to microcontroller...") - send_command('X') # Send the 'X' command when the process is terminated - sys.exit(0) # Exit the program - -# Register the signal handler for termination signals -signal.signal(signal.SIGINT, signal_handler) # Handles Ctrl+C -signal.signal(signal.SIGTERM, signal_handler) # Handles termination signal - -# Example usage -while True: - command = input("Enter command (X: Press Both Buttons, Y: Release Both Buttons and Start two_cameras.py): ").strip().upper() - - if command == 'X': - send_command('X') # Press both buttonsy - - elif command == 'Y': - send_command('Y') # Release both buttons - #start_two_cameras_script() # Start the two_cameras.py script - - else: - print("Invalid command. Please enter X or Y.") - diff --git a/data_collector/data_collector/config.json b/data_collector/data_collector/config.json deleted file mode 100644 index 3c828d65..00000000 --- a/data_collector/data_collector/config.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "log_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay", - "processed_csv_folder_path": "/home/omey/nisara/expert_data_collector/processed_replay/processed_replay/processed", - "replay_csv_name": "2013-01-01_01-10-52.csv", - - "ACTION_SCALE": [0.02, 0.1, 1], - "TARGET_POSE": [ - 0.5906439143742067, - 0.07771711953459341, - 0.337835826958042, - 3.1099675, - 0.0146619, - -0.0078615 - ], - "REWARD_THRESHOLD": [0.01, 0.01, 0.01, 0.2, 0.2, 0.2] - -} \ No newline at end of file diff --git a/data_collector/data_collector/create_pkl.py b/data_collector/data_collector/create_pkl.py deleted file mode 100644 index d383215b..00000000 --- a/data_collector/data_collector/create_pkl.py +++ /dev/null @@ -1,231 +0,0 @@ -''' -FORMAT OF THE CREATED PKL FILE: - The pkl file is a dictionary with the following keys: - dict( - observations=obs, - actions=actions, - next_observations=next_obs, - rewards=rew, - masks=1.0 - done, - dones=done, - ) - where - observations : { - "state": gym.spaces.Box(-np.inf, np.inf, shape=(7 + 6 + 3 + 3,)), - "wrist_1": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), - "wrist_2": gym.spaces.Box(0, 255, shape=(128, 128, 3), dtype=np.uint8), - } - actions = gym.spaces.Box( - np.ones((7,), dtype=np.float32) * -1, - np.ones((7,), dtype=np.float32), - ) - The action space's last element is actually for the gripper, but we are not using it. - It should be x,y,z,roll,pitch,yaw - state : concatenation of tcp_poses (7), tcp_velocities (6), tcp_forces (3), tcp_torques (3) - image names are in the format {timestamp}_A.png and {timestamp}_B.png -''' - - -import numpy as np -import pandas as pd -import pickle -import os -import json -from PIL import Image -from scipy.spatial.transform import Rotation as R -from transformations import construct_adjoint_matrix, construct_homogeneous_matrix - - - - -class SingleTrajectoryConverter: - def __init__(self, recorded_data_file_path, imageA_folder_path, imageB_folder_path, action_scale, target_pose, reward_threshold): - - self.recorded_data_file_path = recorded_data_file_path - self.imageA_folder_path = imageA_folder_path - self.imageB_folder_path = imageB_folder_path - self.action_scale = action_scale - self.target_pose = target_pose - self.reward_threshold = reward_threshold - - self.recorded_data = pd.read_csv(recorded_data_file_path) - - self.adjoint_matrix = None - self.T_r_o_inv = None - - self.transitions = [] - - def transform_observation(self, obs): - """ - Transform observations from spatial(base) frame into body(end-effector) frame - using the adjoint matrix - """ - adjoint_inv = np.linalg.inv(self.adjoint_matrix) - obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']] = adjoint_inv @ obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values - - T_b_o = construct_homogeneous_matrix(obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values) - T_b_r = self.T_r_o_inv @ T_b_o - - # Reconstruct transformed tcp_pose vector - p_b_r = T_b_r[:3, 3] - theta_b_r = R.from_matrix(T_b_r[:3, :3]).as_quat() - obs[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']] = np.concatenate((p_b_r, theta_b_r)) - - return obs - - def calculate_next_action(self, curr_pos, next_pos): - action = np.zeros(7) - action[:3] = (next_pos[:3] - curr_pos[:3]) / self.action_scale[0] - action[3:6] = (R.from_quat(next_pos[3:7]) * R.from_quat(curr_pos[3:7]).inv()).as_euler("xyz") / self.action_scale[1] - action[6] = 0.0 - return action - - def convertToPickle(self): - - observations = [] - actions = [] - next_observations = [] - rewards = [] - masks = [] - dones = [] - - done = 0.0 - - first_observation = self.recorded_data.iloc[0] - # Construct the adjoint matrix and T_r_o_inv using the first observation - # T_r_o_inv remains constant but the adjoint matrix updates every iteration - curr_pos = first_observation[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - self.adjoint_matrix = construct_adjoint_matrix(curr_pos) - self.T_r_o_inv = np.linalg.inv(construct_homogeneous_matrix(curr_pos)) - next_transformed_obs = self.transform_observation(first_observation) - - - for i in range(1, len(self.recorded_data) - 1): - - current_row = self.recorded_data.iloc[i-1] - next_row = self.recorded_data.iloc[i] - - curr_time = current_row["timestamp"] - next_time = next_row["timestamp"] - - img_size = (128, 128) - - curr_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{curr_time}_A.jpg")).resize(img_size) - curr_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{curr_time}_B.jpg")).resize(img_size) - - next_wrist_1_image = Image.open(os.path.join(self.imageA_folder_path, f"{next_time}_A.jpg")).resize(img_size) - next_wrist_2_image = Image.open(os.path.join(self.imageB_folder_path, f"{next_time}_B.jpg")).resize(img_size) - - curr_obs_no_transform_pose = current_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - next_obs_no_transform_pose = next_row[['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']].values - action = self.calculate_next_action(curr_obs_no_transform_pose, next_obs_no_transform_pose) - - self.adjoint_matrix = construct_adjoint_matrix(curr_obs_no_transform_pose) - - curr_transformed_obs = next_transformed_obs - next_transformed_obs = self.transform_observation(next_row) - - curr_euler_angles = R.from_quat(curr_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') - next_euler_angles = R.from_quat(next_transformed_obs[['P4', 'P5', 'P6', 'P7']].values).as_euler('xyz') - - observation = { - "state": np.concatenate([ - curr_transformed_obs[['P1', 'P2', 'P3']].values, - curr_euler_angles, - curr_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, - curr_transformed_obs[['Fx', 'Fy', 'Fz']].values, - curr_transformed_obs[['Tx', 'Ty', 'Tz']].values - ]), - "wrist_1": np.array(curr_wrist_1_image), - "wrist_2": np.array(curr_wrist_2_image) - } - - next_observation = { - "state": np.concatenate([ - next_transformed_obs[['P1', 'P2', 'P3']].values, - next_euler_angles, - next_transformed_obs[['V1', 'V2', 'V3', 'V4', 'V5', 'V6']].values, - next_transformed_obs[['Fx', 'Fy', 'Fz']].values, - next_transformed_obs[['Tx', 'Ty', 'Tz']].values - ]), - "wrist_1": np.array(next_wrist_1_image), - "wrist_2": np.array(next_wrist_2_image) - } - - if done != 1.0: - tcp_pose = current_row[['X', 'Y', 'Z', 'Roll', 'Pitch', 'Yaw']].values - tcp_pose[3:] = np.abs(tcp_pose[3:]) - if np.all(tcp_pose - self.target_pose <= self.reward_threshold): - reward = 1.0 - done = 1.0 - else: - reward = 0.0 - else: - reward = 0.0 - - - data_dict = { - "observations": observation, - "actions": action, - "next_observations": next_observation, - "rewards": reward, - "masks": 1.0 - done, - "dones": done - } - - self.transitions.append(data_dict) - - # observations.append(observation) - # actions.append(action) - # next_observations.append(next_observation) - # rewards.append(reward) - # masks.append(1.0 - done) - # dones.append(done) - - curr_wrist_1_image.close() - curr_wrist_2_image.close() - next_wrist_1_image.close() - next_wrist_2_image.close() - - return self.transitions - -def main(): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - processed_folder_path = config['processed_csv_folder_path'] - output_pkl_file_path = os.path.join(processed_folder_path, 'recorded_data.pkl') - - action_scale = np.array(config['ACTION_SCALE']) - target_pose = np.array(config['TARGET_POSE']) - reward_threshold = np.array(config['REWARD_THRESHOLD']) - - - all_transitions = [] - - for folder_name in os.listdir(processed_folder_path): - folder_path = os.path.join(processed_folder_path, folder_name) - if os.path.isdir(folder_path): - recorded_data_file_path = os.path.join(folder_path, 'recorded_data.csv') - imageA_folder_path = os.path.join(folder_path, 'imageA') - imageB_folder_path = os.path.join(folder_path, 'imageB') - if os.path.exists(recorded_data_file_path) and os.path.exists(imageA_folder_path) and os.path.exists(imageB_folder_path): - singleTrajectoryConverter = SingleTrajectoryConverter( - recorded_data_file_path, - imageA_folder_path, - imageB_folder_path, - action_scale, - target_pose, - reward_threshold) - transitions = singleTrajectoryConverter.convertToPickle() - all_transitions.extend(transitions) - - with open(output_pkl_file_path, 'wb') as f: - pickle.dump(all_transitions, f) - - print(f"Data saved to {output_pkl_file_path}") - - -if __name__ == "__main__": - main() - diff --git a/data_collector/data_collector/joint_cartesian_converter.py b/data_collector/data_collector/joint_cartesian_converter.py deleted file mode 100644 index 72d8c3d2..00000000 --- a/data_collector/data_collector/joint_cartesian_converter.py +++ /dev/null @@ -1,127 +0,0 @@ -import time - -import rclpy -import rclpy.duration -from rclpy.node import Node -from rclpy.callback_groups import MutuallyExclusiveCallbackGroup - -import numpy as np -import pandas as pd -import json -import os -from scipy.spatial.transform import Rotation as R - -from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped -import rclpy.task -from sensor_msgs.msg import JointState - -from moveit_msgs.msg import ( - RobotState, - MoveItErrorCodes, -) -from moveit_msgs.srv import GetPositionFK - - - -class JointCartesianConverter(Node): - - move_group_name_ = "arm" - namespace_ = "lbr" - - joint_state_topic_ = "joint_states" - lbr_state_topic_ = "state" - pose_topic_ = "pose" - plan_srv_name_ = "plan_kinematic_path" - ik_srv_name_ = "compute_ik" - fk_srv_name_ = "compute_fk" - execute_action_name_ = "execute_trajectory" - fri_execute_action_name_ = "joint_trajectory_controller/follow_joint_trajectory" - - base_ = "link_0" - end_effector_ = "link_ee" - - def __init__(self, joint_data_file_path): - super().__init__("joint_cartesian_converter") - - self.joint_data_file_path = joint_data_file_path - self.fk_client_callback = MutuallyExclusiveCallbackGroup() - self.fk_client_ = self.create_client(GetPositionFK, f"{self.namespace_}/{self.fk_srv_name_}", callback_group=self.fk_client_callback) - if not self.fk_client_.wait_for_service(timeout_sec=5.0): - self.get_logger().error("FK service not available.") - exit(1) - - self.joint_data = pd.read_csv(self.joint_data_file_path) - - def quat_2_euler(self, quat): - """calculates and returns: yaw, pitch, roll from given quaternion""" - return R.from_quat(quat).as_euler("xyz") - - def convert(self): - - # Convert each row in the dataframe to cartesian coordinates and save in the dataframe - for index, row in self.joint_data.iterrows(): - - joint_state = JointState() - joint_state.name = ["A1", "A2", "A4", "A3", "A5", "A6", "A7"] - joint_state.position = [row["P1"], row["P2"], row["P3"], row["P4"], row["P5"], row["P6"], row["P7"]] - joint_state.velocity = [row["V1"], row["V2"], row["V3"], row["V4"], row["V5"], row["V6"], row["V7"]] - joint_state.effort = [row["E1"], row["E2"], row["E3"], row["E4"], row["E5"], row["E6"], row["E7"]] - joint_state.header.stamp = self.get_clock().now().to_msg() - joint_state.header.frame_id = f"{self.namespace_}/{self.base_}" - current_robot_state = RobotState() - current_robot_state.joint_state = joint_state - - request = GetPositionFK.Request() - - request.header.frame_id = f"{self.namespace_}/{self.base_}" - request.header.stamp = self.get_clock().now().to_msg() - - request.fk_link_names.append(self.end_effector_) - request.robot_state = current_robot_state - - future = self.fk_client_.call_async(request) - - rclpy.spin_until_future_complete(self, future) - if future.result() is None: - self.get_logger().error("Failed to get FK solution") - return None - - response = future.result() - if response.error_code.val != MoveItErrorCodes.SUCCESS: - self.get_logger().error( - f"Failed to get FK solution: {response.error_code.val}" - ) - return None - - pose = response.pose_stamped[0].pose - euler_pose = self.quat_2_euler(np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])) - self.joint_data.at[index, "X"] = pose.position.x - self.joint_data.at[index, "Y"] = pose.position.y - self.joint_data.at[index, "Z"] = pose.position.z - self.joint_data.at[index, "QX"] = pose.orientation.x - self.joint_data.at[index, "QY"] = pose.orientation.y - self.joint_data.at[index, "QZ"] = pose.orientation.z - self.joint_data.at[index, "QW"] = pose.orientation.w - self.joint_data.at[index, "Roll"] = euler_pose[0] - self.joint_data.at[index, "Pitch"] = euler_pose[1] - self.joint_data.at[index, "Yaw"] = euler_pose[2] - - # Compute the end_effector velocity - - self.joint_data.to_csv(self.joint_data_file_path, index=False) - -def main(args=None): - rclpy.init(args=args) - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - - processed_folder_path = config['processed_csv_folder_path'] - csv_name = config['replay_csv_name'] - csv_folder = os.path.join(processed_folder_path, os.path.splitext(csv_folder)[0]) - joint_data_file_path = os.path.join(csv_folder, 'recorded_data.csv') - - joint_cartesian_converter = JointCartesianConverter(joint_data_file_path) - joint_cartesian_converter.convert() - - rclpy.shutdown() \ No newline at end of file diff --git a/data_collector/data_collector/kuka_data_recorder.py b/data_collector/data_collector/kuka_data_recorder.py deleted file mode 100644 index d8e560ca..00000000 --- a/data_collector/data_collector/kuka_data_recorder.py +++ /dev/null @@ -1,249 +0,0 @@ -import rclpy -import os -import pandas as pd -import csv -import math -import json -from cv_bridge import CvBridge -import cv2 -from rclpy.node import Node -from rclpy.executors import MultiThreadedExecutor -from rclpy.action import ActionClient -from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint -from control_msgs.action import FollowJointTrajectory -from sensor_msgs.msg import Image, JointState -from geometry_msgs.msg import WrenchStamped -from message_filters import Subscriber, ApproximateTimeSynchronizer - -class KukaDataRecorder(Node): - def __init__(self, data_folder, joint_states_file): - super().__init__("kuka_data_recorder") - - self.data_folder = data_folder - self.joint_states_file = joint_states_file - self.save_data_folder = os.path.join(self.data_folder, os.path.splitext(self.joint_states_file)[0]) - os.makedirs(self.save_data_folder, exist_ok=True) - # Create two folders for the images - self.imageA_folder = os.path.join(self.save_data_folder, "imageA") - self.imageB_folder = os.path.join(self.save_data_folder, "imageB") - os.makedirs(self.imageA_folder, exist_ok=True) - os.makedirs(self.imageB_folder, exist_ok=True) - - # topic_callback_group = MutuallyExclusiveCallbackGroup() - # client_callback_group = MutuallyExclusiveCallbackGroup() - - # Initialize everything for trajectory replaying - self.feedback = None - self._action_client = ActionClient(self, FollowJointTrajectory, '/lbr/joint_trajectory_controller/follow_joint_trajectory') - self.joint_names = ["A1", "A2", "A3", "A4", "A5", "A6", "A7"] - self.joint_trajectories = self.readJoinStatesFromCsv(os.path.join(self.data_folder, "processed", self.joint_states_file)) - # This can be done through goal sending - self.initializeRobotPose() - - # Create a synchronized callback for all the information - self.imageA_topic = "/camera1/camera1/color/image_raw" - self.imageB_topic = "/camera2/camera2/color/image_raw" - self.joint_state_topic = "/lbr/joint_states" - self.wrench_topic = "/lbr/force_torque_broadcaster/wrench" - - self.imageA_sub = Subscriber(self, Image, self.imageA_topic) - self.imageB_sub = Subscriber(self, Image, self.imageB_topic) - self.joint_state_sub = Subscriber(self, JointState, self.joint_state_topic) - self.wrench_sub = Subscriber(self, WrenchStamped, self.wrench_topic) - - self.synchronizer = ApproximateTimeSynchronizer( - [self.imageA_sub, self.imageB_sub, self.joint_state_sub, self.wrench_sub], - queue_size=100, - slop=0.01, - ) - - self.cvbridge = CvBridge() - - self.synchronizer.registerCallback(self.recordSyncData) - self.get_logger().info("Data Synchronizer Node Initialized") - - # self.dataframe = pd.DataFrame(columns=[ - # 'timestamp', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', - # 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', - # 'E1', 'E2', 'E3', 'E4', 'E5', 'E6', 'E7', 'Fx', 'Fy', 'Fz', 'Tx', 'Ty', 'Tz' - # ]) - self.dataframe = [] - - # This needs to be done through moveit planner since the points need to be interpolated to a trajectory - self.executeTrajectory() - - - def recordSyncData(self, imageA_msg, imageB_msg, joint_state_msg, wrench_msg): - # The timestamp of imageA_msg will be used to name the columns - timestamp = f"{imageA_msg.header.stamp.sec}_{imageA_msg.header.stamp.nanosec}" - - self.get_logger().info(f"Recording data at timestamp {timestamp}") - - # Save the joint state - data_row = {'timestamp': timestamp} - - for i in range(7): - # imp imp: Remember that kuka publishes A1, A2, A4, A3, A5, A6, A7 (NOTE THE ORDER) - j = i - if i == 3: - j = 4 - if i == 4: - j = 3 - data_row[f'P{j+1}'] = joint_state_msg.position[i] - data_row[f'V{j+1}'] = joint_state_msg.velocity[i] - data_row[f'E{j+1}'] = joint_state_msg.effort[i] - - # Save the wrench - data_row['Fx'] = wrench_msg.wrench.force.x - data_row['Fy'] = wrench_msg.wrench.force.y - data_row['Fz'] = wrench_msg.wrench.force.z - data_row['Tx'] = wrench_msg.wrench.torque.x - data_row['Ty'] = wrench_msg.wrench.torque.y - data_row['Tz'] = wrench_msg.wrench.torque.z - - self.dataframe.append(data_row) - - # Save the images - imageA_filename = os.path.join(self.imageA_folder, f"{timestamp}_A.jpg") - imageB_filename = os.path.join(self.imageB_folder, f"{timestamp}_B.jpg") - image_A = self.cvbridge.imgmsg_to_cv2(imageA_msg, desired_encoding='bgr8') - cv2.imwrite(imageA_filename, image_A) - image_B = self.cvbridge.imgmsg_to_cv2(imageB_msg, desired_encoding='bgr8') - cv2.imwrite(imageB_filename, image_B) - - - def readJoinStatesFromCsv(self, file_path): - joint_states = [] - with open(file_path, mode='r') as file: - csv_reader = csv.DictReader(file) - for row in csv_reader: - joint_states.append([math.radians(float(row['A1'])), math.radians(float(row['A2'])), math.radians(float(row['A3'])), - math.radians(float(row['A4'])), math.radians(float(row['A5'])), math.radians(float(row['A6'])), math.radians(float(row['A7']))]) - print(joint_states[0]) - return joint_states - - def initializeRobotPose(self): - # The robot needs to be initialized to the first position of the trajectory - # to ensure that there is no sudden movement when the trajectory is replayed - goal_msg = FollowJointTrajectory.Goal() - trajectory_msg = JointTrajectory() - trajectory_msg.joint_names = self.joint_names - - self.get_logger().info(f"Processing trajectory point init") - - point = JointTrajectoryPoint() - point.positions = self.joint_trajectories[0] - point.time_from_start.sec = 4 # Set the seconds part to 0 - - trajectory_msg.points.append(point) - goal_msg.trajectory = trajectory_msg - self._action_client.wait_for_server() - - send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) - - rclpy.spin_until_future_complete(self, send_goal_future) - - goal_handle = send_goal_future.result() - - if not goal_handle.accepted: - self.get_logger().info(f"Goal for init point was rejected") - return - - self.get_logger().info(f"Goal for init point was accepted") - get_result_future = goal_handle.get_result_async() - rclpy.spin_until_future_complete(self, get_result_future) - result = get_result_future.result().result - self.get_logger().info(f"Result : {result}, Initial position reached") - print("Press Enter to continue ...") - input() - - def executeTrajectory(self): - goal_msg = FollowJointTrajectory.Goal() - trajectory_msg = JointTrajectory() - trajectory_msg.joint_names = self.joint_names - for i, joint_values in enumerate(self.joint_trajectories): - point = JointTrajectoryPoint() - point.positions = joint_values - point.time_from_start = rclpy.duration.Duration(seconds=(i+1) * 0.1).to_msg() - trajectory_msg.points.append(point) - goal_msg.trajectory = trajectory_msg - self._action_client.wait_for_server() - - send_goal_future = self._action_client.send_goal_async(goal_msg, feedback_callback=self.feedback_callback) - - # rclpy.spin_until_future_complete(self, send_goal_future) - - # goal_handle = send_goal_future.result() - - # if not goal_handle.accepted: - # self.get_logger().info(f"Goal for trajectory was rejected") - # return - - # self.get_logger().info(f"Goal for trajectory was accepted") - # get_result_future = goal_handle.get_result_async() - # rclpy.spin_until_future_complete(self, get_result_future) - # result = get_result_future.result().result - # self.get_logger().info(f"Result : {result}, Trajectory executed?") - - send_goal_future.add_done_callback(self.goal_response_callback) - - def goal_response_callback(self, future): - goal_handle = future.result() - if not goal_handle.accepted: - self.get_logger().info(f"Goal for trajectory was rejected") - return - self.get_logger().info(f"Goal for trajectory was accepted") - get_result_future = goal_handle.get_result_async() - get_result_future.add_done_callback(self.result_callback) - - def result_callback(self, future): - result = future.result() - if result.result.error_code == FollowJointTrajectory.Result.SUCCESSFUL: - self.get_logger().info("WOHOOO") - self.shutdown_node() - else: - self.get_logger().info("OHH NO") - - def feedback_callback(self, feedback_msg): - self.feedback = feedback_msg.feedback - - def shutdown_node(self): - self.get_logger().info("Shutting down node now") - self.save_data() - self.destroy_node() - rclpy.shutdown() - - def save_data(self): - df = pd.DataFrame(self.dataframe) - save_path = os.path.join(self.save_data_folder, 'recorded_data.csv') - df.to_csv(save_path, index=False) - self.get_logger().info("Saved data!") - -def main(args=None): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - - data_folder = config['log_folder_path'] - joint_states_file = config['replay_csv_name'] - rclpy.init(args=args) - executor = MultiThreadedExecutor() - node = KukaDataRecorder(data_folder=data_folder, joint_states_file=joint_states_file) - executor.add_node(node) - try: - - executor.spin() - except Exception as e: - node.get_logger().error(f"An error occurred: {e}") - finally: - # if hasattr(node, 'dataframe'): - # df = pd.DataFrame(node.dataframe) - # save_path = os.path.join(node.data_folder, 'recorded_data.csv') - # df.to_csv(save_path, index=False) - # node.get_logger().info(f"Recorded data saved!") - if rclpy.ok(): - node.destroy_node() - rclpy.shutdown() - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/data_collector/data_collector/parse_robot_log.py b/data_collector/data_collector/parse_robot_log.py deleted file mode 100644 index 2b11adcd..00000000 --- a/data_collector/data_collector/parse_robot_log.py +++ /dev/null @@ -1,110 +0,0 @@ -import pandas as pd -import json -import re -import os -import glob - -def parse_robot_data(file_path): - """ - Parses the robot data file, renames joint columns to J1-J7, - extracts and renames time, force, and torque values. - - Parameters: - file_path (str): Path to the data file. - - Returns: - pd.DataFrame: A DataFrame containing time, renamed joint values, - and renamed force/torque data. - """ - # Step 1: Read the header line starting with '%' - with open(file_path, 'r') as file: - for line in file: - if line.startswith('%'): - header = line.strip().lstrip('%').strip().split() - break - else: - raise ValueError("No header line starting with '%' found in the file.") - - # Step 2: Load the data into a DataFrame - df = pd.read_csv( - file_path, - delim_whitespace=True, # Assuming the data is space-separated - comment='%', # Skip any lines starting with '%' - names=header, # Use the extracted header - skiprows=1 # Skip the header line - ) - - # Step 3: Identify and rename joint columns - joint_pattern = re.compile(r'axisQMsr_LBR_iiwa_14_R820_1\[(\d+)\]') - joint_columns = [col for col in header if joint_pattern.match(col)] - - if len(joint_columns) != 7: - raise ValueError(f"Expected 7 joint columns, found {len(joint_columns)}.") - - # Sort joint columns based on their index and rename them to J1-J7 - joint_columns_sorted = sorted( - joint_columns, - key=lambda x: int(joint_pattern.match(x).group(1)) - ) - joint_rename_map = {col: f'A{idx+1}' for idx, col in enumerate(joint_columns_sorted)} - df.rename(columns=joint_rename_map, inplace=True) - - # Step 4: Extract and combine time columns - if 'ZeitInSec' not in df.columns or 'ZeitInNanoSec' not in df.columns: - raise ValueError("Required time columns 'ZeitInSec' and/or 'ZeitInNanoSec' are missing.") - - # Combine 'ZeitInSec' and 'ZeitInNanoSec' into a single 'time' column in seconds - df['time'] = df['ZeitInSec'] + df['ZeitInNanoSec'] * 1e-9 - - # Drop the original time columns as they are now combined - df.drop(columns=['ZeitInSec', 'ZeitInNanoSec'], inplace=True) - - # Step 5: Extract and rename force and torque columns - force_columns = { - 'cartForce1_X': 'Fx', - 'cartForce1_Y': 'Fy', - 'cartForce1_Z': 'Fz' - } - torque_columns = { - 'cartTorque1_TauX': 'Tx', - 'cartTorque1_TauY': 'Ty', - 'cartTorque1_TauZ': 'Tz' - } - - # Verify that the required force and torque columns exist - missing_force_columns = [col for col in force_columns.keys() if col not in df.columns] - missing_torque_columns = [col for col in torque_columns.keys() if col not in df.columns] - missing_columns = missing_force_columns + missing_torque_columns - if missing_columns: - raise ValueError(f"The following required columns are missing in the data: {missing_columns}") - - # Rename force and torque columns - df.rename(columns=force_columns, inplace=True) - df.rename(columns=torque_columns, inplace=True) - - # Step 6: Select the relevant columns - selected_columns = ['time'] + list(joint_rename_map.values()) + list(force_columns.values()) + list(torque_columns.values()) - df_selected = df[selected_columns] - - return df_selected - - -def main(): - - with open('config.json', 'r') as config_file: - config = json.load(config_file) - - log_folder_path = config['log_folder_path'] - save_df_path = config['processed_csv_folder_path'] - - os.makedirs(save_df_path, exist_ok=True) - - log_files = glob.glob(os.path.join(log_folder_path, "*.log")) - - for file_path in log_files: - returned_df = parse_robot_data(file_path=file_path) - save_file_path = os.path.join(save_df_path, os.path.splitext(os.path.basename(file_path))[0] + '.csv') - returned_df.to_csv(save_file_path, index=False) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/data_collector/data_collector/transformations.py b/data_collector/data_collector/transformations.py deleted file mode 100644 index 52a55527..00000000 --- a/data_collector/data_collector/transformations.py +++ /dev/null @@ -1,37 +0,0 @@ -from scipy.spatial.transform import Rotation as R -import numpy as np - - -def construct_adjoint_matrix(tcp_pose): - """ - Construct the adjoint matrix for a spatial velocity vector - :args: tcp_pose: (x, y, z, qx, qy, qz, qw) - """ - rotation = R.from_quat(tcp_pose[3:]).as_matrix() - translation = np.array(tcp_pose[:3]) - skew_matrix = np.array( - [ - [0, -translation[2], translation[1]], - [translation[2], 0, -translation[0]], - [-translation[1], translation[0], 0], - ] - ) - adjoint_matrix = np.zeros((6, 6)) - adjoint_matrix[:3, :3] = rotation - adjoint_matrix[3:, 3:] = rotation - adjoint_matrix[3:, :3] = skew_matrix @ rotation - return adjoint_matrix - - -def construct_homogeneous_matrix(tcp_pose): - """ - Construct the homogeneous transformation matrix from given pose. - args: tcp_pose: (x, y, z, qx, qy, qz, qw) - """ - rotation = R.from_quat(tcp_pose[3:]).as_matrix() - translation = np.array(tcp_pose[:3]) - T = np.zeros((4, 4)) - T[:3, :3] = rotation - T[:3, 3] = translation - T[3, 3] = 1 - return T diff --git a/data_collector/package.xml b/data_collector/package.xml deleted file mode 100644 index 494d0fa0..00000000 --- a/data_collector/package.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - data_collector - 0.0.0 - TODO: Package description - nisara - Apache-2.0 - - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest - - rclpy - sensor_msgs - trajectory_msgs - control_msgs - geometry_msgs - message_filters - numpy - pandas - cv2 - cv_bridge - - - - ament_python - - diff --git a/data_collector/setup.cfg b/data_collector/setup.cfg deleted file mode 100644 index f0a7afc8..00000000 --- a/data_collector/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/data_collector -[install] -install_scripts=$base/lib/data_collector diff --git a/data_collector/setup.py b/data_collector/setup.py deleted file mode 100644 index 43107e8c..00000000 --- a/data_collector/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'data_collector' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools', 'pandas', 'numpy'], - zip_safe=True, - maintainer='nisara', - maintainer_email='sarawgi.nikita@gmail.com', - description='TODO: Package description', - license='Apache-2.0', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'kuka_data_recorder = data_collector.kuka_data_recorder:main', - 'joint_cartesian_converter = data_collector.joint_cartesian_converter:main' - ], - }, -) From c8fb5377a47feb3d5202d77944bbcdee040a245f Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Wed, 5 Mar 2025 14:31:25 -0800 Subject: [PATCH 15/24] Added new submodule --- .gitmodules | 4 ++++ data_collector | 1 + 2 files changed, 5 insertions(+) create mode 160000 data_collector diff --git a/.gitmodules b/.gitmodules index fe76ae72..e3d46646 100644 --- a/.gitmodules +++ b/.gitmodules @@ -9,3 +9,7 @@ path = lbr_fri_ros2_stack url = https://github.com/RROS-Lab/serl_lbr_fri_ros2_stack.git branch=serl-kuka +[submodule "data_collector"] + path = data_collector + url = https://github.com/nikitasarawgi/expert_data_collector.git + branch = data-collector-only diff --git a/data_collector b/data_collector new file mode 160000 index 00000000..b8263e9f --- /dev/null +++ b/data_collector @@ -0,0 +1 @@ +Subproject commit b8263e9f3ae287665bdbcfe08d225ebe90702f20 From a1c3add26f813f395c0add6e4839256b6b208c10 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Wed, 5 Mar 2025 14:51:15 -0800 Subject: [PATCH 16/24] Updating the data_collector submodule --- data_collector | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_collector b/data_collector index b8263e9f..744ed656 160000 --- a/data_collector +++ b/data_collector @@ -1 +1 @@ -Subproject commit b8263e9f3ae287665bdbcfe08d225ebe90702f20 +Subproject commit 744ed65614de860e3b5637eb65cad4f8b65b285e From ce21ae9dca891beeacc627ce803a584bcbdae454 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Wed, 5 Mar 2025 14:51:53 -0800 Subject: [PATCH 17/24] Adding serl files for kuka --- .gitignore | 8 +- .../async_drq_randomized.py | 34 +-- examples/async_peg_insert_drq/run_actor.sh | 4 +- examples/async_peg_insert_drq/run_learner.sh | 4 +- kuka_server/kuka_server/data.txt | 55 +++++ kuka_server/kuka_server/robot_interface.py | 10 +- kuka_server/kuka_server/utils.py | 31 +-- serl_robot_infra/kuka_env/envs/kuka_env.py | 108 ++++++--- .../kuka_env/envs/peg_env/config.py | 33 +-- serl_robot_infra/kuka_env/envs/wrappers.py | 221 ++++++++++++++++++ serl_robot_infra/kuka_env/utils/rotations.py | 11 + 11 files changed, 432 insertions(+), 87 deletions(-) create mode 100644 kuka_server/kuka_server/data.txt create mode 100644 serl_robot_infra/kuka_env/envs/wrappers.py create mode 100644 serl_robot_infra/kuka_env/utils/rotations.py diff --git a/.gitignore b/.gitignore index 06005b52..242d87db 100644 --- a/.gitignore +++ b/.gitignore @@ -187,5 +187,9 @@ wandb/ realsense-ros/ .vscode/ -data_collector/data_collector/processed_data/ -data_collector/data_collector/11_dec_logs/ \ No newline at end of file +data_collector/data_collector/**/*.csv +data_collector/data_collector/**/*.jpg +data_collector/data_collector/**/*.jpeg +data_collector/data_collector/**/*.png + +.~lock.* \ No newline at end of file diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 07c6f177..f1d547fd 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -2,11 +2,14 @@ import sys import os import getpass + username = getpass.getuser() env = "serl" print("Ensure to change anaconda3 or miniconda3 and change your environment name") print("Conda Environment name is: ", env) -sys.path.append("/home/"+username+"/anaconda3/envs/"+env+"/lib/python3.10/site-packages") +sys.path.append( + "/home/" + username + "/anaconda3/envs/" + env + "/lib/python3.10/site-packages" +) import rclpy import rclpy.duration from rclpy.executors import MultiThreadedExecutor @@ -41,15 +44,17 @@ from serl_launcher.wrappers.serl_obs_wrappers import SERLObsWrapper import os + MAIN_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) sys.path.append(MAIN_DIR) from serl_robot_infra.kuka_env.envs.relative_env import RelativeFrame -from serl_robot_infra.franka_env.envs.wrappers import ( +from serl_robot_infra.kuka_env.envs.wrappers import ( Quat2EulerWrapper, ) import serl_robot_infra.franka_env import serl_robot_infra.kuka_env + # sys.argv = sys.argv[:1] # # `app.run` calls `sys.exit` @@ -66,7 +71,9 @@ flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_bool("save_model", False, "Whether to save model.") flags.DEFINE_integer("critic_actor_ratio", 4, "critic to actor update ratio.") -flags.DEFINE_boolean("load_checkpoint", False, "Whether to start from previous checkpoint or not.") +flags.DEFINE_boolean( + "load_checkpoint", False, "Whether to start from previous checkpoint or not." +) flags.DEFINE_string("load_checkpoint_path", None, "Checkpoint to start training from.") flags.DEFINE_integer("batch_size", 256, "Batch size for training the policy.") @@ -194,6 +201,8 @@ def update_params(params): deterministic=False, ) actions = np.asarray(jax.device_get(actions)) + # nisara : Comment + # print("Actions in Training Loop: ", actions) # Step environment with timer.context("step_env"): @@ -348,7 +357,7 @@ def main(_): executor.add_node(robot_interface_node) robot_interface_node.get_logger().info("Robot interface node started.") # executor.spin_once() - + assert FLAGS.batch_size % num_devices == 0 # seed rng = jax.random.PRNGKey(FLAGS.seed) @@ -359,7 +368,7 @@ def main(_): FLAGS.env, fake_env=FLAGS.learner, save_video=FLAGS.eval_checkpoint_step, - robot_interface_node=robot_interface_node + robot_interface_node=robot_interface_node, ) print("Environment initialized") # env = GripperCloseEnv(env) @@ -383,10 +392,10 @@ def main(_): image_keys=image_keys, encoder_type=FLAGS.encoder_type, ) - - if(FLAGS.load_checkpoint): + + if FLAGS.load_checkpoint: print(f"Loading Checkpoint from Previous Run:{FLAGS.load_checkpoint_path}") - + ckpt = checkpoints.restore_checkpoint( FLAGS.load_checkpoint_path, agent.state, @@ -429,11 +438,7 @@ def main(_): # learner loop print_green("starting learner loop") - learner( - sampling_rng, - agent, - replay_buffer - ) + learner(sampling_rng, agent, replay_buffer) elif FLAGS.actor: print("Initializing Actor Node") @@ -446,9 +451,6 @@ def main(_): else: raise NotImplementedError("Must be either a learner or an actor") - - - rclpy.shutdown() diff --git a/examples/async_peg_insert_drq/run_actor.sh b/examples/async_peg_insert_drq/run_actor.sh index 230473ba..a1998d2c 100644 --- a/examples/async_peg_insert_drq/run_actor.sh +++ b/examples/async_peg_insert_drq/run_actor.sh @@ -10,10 +10,10 @@ python3 async_drq_randomized.py "$@" \ --random_steps 0 \ --training_starts 200 \ --encoder_type resnet-pretrained \ - --demo_path /home/omey/nisara/expert_data_collector/processed_replay/processed_replay/2013-01-01_01-09-43/recorded_data.pkl \ + --demo_path /home/omey/SERL/src/data_collector/data_collector/feb_28_data/kuka_csv_logs/all_success_final.pkl \ --eval_checkpoint_step 0 \ --loaded_checkpoint_step 1000 \ --eval_n_trajs 4 \ --load_checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_12 \ + --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ # --load_checkpoint 0 \ - # --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints/ diff --git a/examples/async_peg_insert_drq/run_learner.sh b/examples/async_peg_insert_drq/run_learner.sh index 942cae1f..ef0bd81c 100644 --- a/examples/async_peg_insert_drq/run_learner.sh +++ b/examples/async_peg_insert_drq/run_learner.sh @@ -12,10 +12,10 @@ python3 async_drq_randomized.py "$@" \ --batch_size 256 \ --eval_period 2000 \ --encoder_type resnet-pretrained \ - --demo_path /home/omey/nisara/expert_data_collector/processed_replay/processed_replay/2013-01-01_01-09-43/recorded_data.pkl \ + --demo_path /home/omey/SERL/src/data_collector/data_collector/feb_28_data/kuka_csv_logs/all_success_final.pkl \ --checkpoint_period 1000 \ --loaded_checkpoint_step 1000 \ --load_checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_12/ \ - --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints/ + --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ # --load_checkpoint 0 \ diff --git a/kuka_server/kuka_server/data.txt b/kuka_server/kuka_server/data.txt new file mode 100644 index 00000000..956382df --- /dev/null +++ b/kuka_server/kuka_server/data.txt @@ -0,0 +1,55 @@ +difference: [np.float64(0.0019766595082607585) np.float64(0.0004628820030692024) + np.float64(0.028018749190857797) 0.02504313037427308 0.003815908558840551 + 0.010258544890220822] + +Processed folder: OM_Feb8_SERL_2013-01-02_05-27-28 + +NOT INSERTED + + + + + +difference: [np.float64(0.001436364561131831) np.float64(0.0004622432071985996) + np.float64(0.006333442177054199) 0.017690793548203132 0.03313624476328898 + 0.007315268674972231] + +Processed folder: OM_Feb8_SERL_2013-01-02_05-24-36 + +INSERTED + + + + + +difference: [np.float64(0.0015521302069487763) np.float64(0.00042343143758170193) + np.float64(0.006008299968794095) 0.030791564054494902 0.00497890869768973 + 0.007003545883071727] + +Processed folder: OM_Feb8_SERL_2013-01-02_05-25-44 + +INSERTED + + + + + +difference: [np.float64(0.0029457311018545873) np.float64(0.0009184325443422001) + np.float64(0.028379779133595004) 0.021396361779027595 + 0.014063390831159997 0.002100298832244274] + +Processed folder: OM_Feb8_SERL_2013-01-02_05-24-46 + +NOT INSERTED + + + + + +difference: [np.float64(0.003487982539743917) np.float64(0.0008351758736137027) + np.float64(0.028138092827257696) 0.01969423339553389 0.004430696798094312 + 0.00665583903938094] + +Processed folder: OM_Feb8_SERL_2013-01-02_05-26-50 + +NOT INSERTED \ No newline at end of file diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index 3a95c3c6..82a1f131 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -132,10 +132,10 @@ def state_callback(self, state: LBRState): def get_ik(self, target_pose: Pose) -> JointState | None: request = GetPositionIK.Request() - request.ik_request.group_name = self.move_group_name_ tf_prefix = self.get_namespace()[1:] request.ik_request.pose_stamped.header.frame_id = f"{tf_prefix}/{self.base_}" + request.ik_request.pose_stamped.header.stamp = self.get_clock().now().to_msg() request.ik_request.pose_stamped.pose = target_pose request.ik_request.avoid_collisions = True @@ -204,6 +204,10 @@ def get_fk(self) -> Pose | None: ) return None + # print( + # "In get_fk, printing the current pose after fk: ", + # response.pose_stamped[0].pose, + # ) return response.pose_stamped[0].pose, current_joint_state def get_fk_lbr(self, commanded: bool = False) -> Pose | None: @@ -389,7 +393,7 @@ def get_best_ik(self, target_pose: Pose, attempts: int = 100) -> JointState | No best_cost = np.inf best_joint_state = None - + # self.get_logger().info(f"Computing Best IK for: {target_pose}") for _ in range(attempts): joint_state = self.get_ik(target_pose) if joint_state is None: @@ -431,12 +435,14 @@ def get_motion_plan( # return RobotTrajectory() current_joint_state = self.get_joint_state() + # self.get_logger().info(f"Current joint state: {current_joint_state}") if current_joint_state is None: self.get_logger().error("Failed to get joint state") return None current_robot_state = RobotState() current_robot_state.joint_state.position = current_joint_state.position + current_robot_state.joint_state.name = current_joint_state.name target_joint_state = self.get_best_ik(target_pose) if target_joint_state is None: diff --git a/kuka_server/kuka_server/utils.py b/kuka_server/kuka_server/utils.py index 0ddfb955..bf0a3c44 100644 --- a/kuka_server/kuka_server/utils.py +++ b/kuka_server/kuka_server/utils.py @@ -1,14 +1,15 @@ from scipy.spatial.transform import Rotation as R import numpy as np -def euler_to_quat(euler_angles_abc, degrees = True): - q = R.from_euler('xyz', [euler_angles_abc[2], euler_angles_abc[1], euler_angles_abc[0]], degrees=degrees).as_quat() + +def euler_to_quat(euler_angles_abc, degrees=False): + q = R.from_euler("ZYX", euler_angles_abc, degrees=degrees).as_quat() return q -def euler_to_rotmat(euler_angles_abc, degrees = True): - rot_mat = R.from_euler('xyz', [euler_angles_abc[2], euler_angles_abc[1], euler_angles_abc[0]], degrees=degrees).as_matrix() +def euler_to_rotmat(euler_angles_abc, degrees=False): + rot_mat = R.from_euler("ZYX", euler_angles_abc, degrees=degrees).as_matrix() return rot_mat @@ -26,9 +27,10 @@ def quat_to_rotmat(quat): return rotmat -def quat_to_euler(quat): - euler = R.from_quat(quat).as_euler("xyz") +def quat_to_euler(quat, degrees=False): + + euler = R.from_quat(quat).as_euler("ZYX", degrees=degrees) return euler @@ -37,15 +39,16 @@ def convert_wrench_to_numpy(msg): torque = msg.wrench.torque return np.array([force.x, force.y, force.z, torque.x, torque.y, torque.z]) -def xyzabc_to_se3(xyzabc, degrees = True): + +def xyzabc_to_se3(xyzabc, degrees=False): tranformation_mat = np.eye(4) - tranformation_mat[0,3] = xyzabc[0] - tranformation_mat[1,3] = xyzabc[1] - if(len(xyzabc) == 6): - tranformation_mat[2,3] = xyzabc[2] - tranformation_mat[:3,:3] = euler_to_rotmat(xyzabc[3:], degrees) + tranformation_mat[0, 3] = xyzabc[0] + tranformation_mat[1, 3] = xyzabc[1] + if len(xyzabc) == 6: + tranformation_mat[2, 3] = xyzabc[2] + tranformation_mat[:3, :3] = euler_to_rotmat(xyzabc[3:], degrees) else: - tranformation_mat[:3,:3] = euler_to_rotmat(xyzabc[2:], degrees) + tranformation_mat[:3, :3] = euler_to_rotmat(xyzabc[2:], degrees) - return tranformation_mat \ No newline at end of file + return tranformation_mat diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index 37ff6a55..2160a674 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -1,12 +1,16 @@ """Gym Interface for Franka""" + import sys import time import getpass + username = getpass.getuser() env = "serl" print("Ensure to change anaconda3 or miniconda3 and change your environment name") print("Conda Environment name is: ", env) -sys.path.append("/home/"+username+"/anaconda3/envs/"+env+"/lib/python3.10/site-packages") +sys.path.append( + "/home/" + username + "/anaconda3/envs/" + env + "/lib/python3.10/site-packages" +) import numpy as np import gym @@ -19,16 +23,16 @@ from datetime import datetime from collections import OrderedDict from typing import Dict + sys.path.append("/home/cam/omey_ws/serl-rros/src/") from serl_robot_infra.franka_env.camera.video_capture import VideoCapture from serl_robot_infra.franka_env.camera.rs_capture import RSCapture -from serl_robot_infra.franka_env.utils.rotations import euler_2_quat, quat_2_euler +from serl_robot_infra.kuka_env.utils.rotations import euler_2_quat, quat_2_euler # from kuka_server.kuka_server.robot_interface import RobotInterfaceNode - class ImageDisplayer(threading.Thread): def __init__(self, queue): threading.Thread.__init__(self) @@ -88,9 +92,9 @@ def __init__( save_video=False, config: DefaultEnvConfig = None, max_episode_length=100, - robot_interface_node=None + robot_interface_node=None, ): - print("Initializing KukaEnv") + print("Initializing KukaEnv") self.robot_interface_node = robot_interface_node self.action_scale = config.ACTION_SCALE self._TARGET_POSE = config.TARGET_POSE @@ -98,6 +102,7 @@ def __init__( self.url = config.ROBOT_IP self.config = config self.max_episode_length = max_episode_length + # self.max_episode_length = 5 # convert last 3 elements from euler to quat, from size (6,) to (7,) self.resetpos = np.concatenate( @@ -142,7 +147,7 @@ def __init__( np.ones((7,), dtype=np.float32) * -1, np.ones((7,), dtype=np.float32), ) - + self.observation_space = gym.spaces.Dict( { "state": gym.spaces.Dict( @@ -156,7 +161,7 @@ def __init__( } ), "images": gym.spaces.Dict( - { + { "wrist_1": gym.spaces.Box( 0, 255, shape=(128, 128, 3), dtype=np.uint8 ), @@ -186,48 +191,68 @@ def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: pose[:3] = np.clip( pose[:3], self.xyz_bounding_box.low, self.xyz_bounding_box.high ) - euler = Rotation.from_quat(pose[3:]).as_euler("xyz") + euler = Rotation.from_quat(pose[3:]).as_euler("ZYX") # Clip first euler angle separately due to discontinuity from pi to -pi - sign = np.sign(euler[0]) - euler[0] = sign * ( + sign = np.sign(euler[2]) + euler[2] = sign * ( np.clip( - np.abs(euler[0]), - self.rpy_bounding_box.low[0], - self.rpy_bounding_box.high[0], + np.abs(euler[2]), + self.rpy_bounding_box.low[2], + self.rpy_bounding_box.high[2], ) ) - euler[1:] = np.clip( - euler[1:], self.rpy_bounding_box.low[1:], self.rpy_bounding_box.high[1:] + euler[:2] = np.clip( + euler[:2], self.rpy_bounding_box.low[:2], self.rpy_bounding_box.high[:2] ) - pose[3:] = Rotation.from_euler("xyz", euler).as_quat() + pose[3:] = Rotation.from_euler("ZYX", euler).as_quat() return pose def step(self, action: np.ndarray) -> tuple: """standard gym step function.""" print("In step function") - + start_time = time.time() + # nisara : Comment + # print("Action before clipping: ", action) action = np.clip(action, self.action_space.low, self.action_space.high) xyz_delta = action[:3] + # nisara : Comment + # print("Action after clipping ==xyz_delta==: ", xyz_delta) self.nextpos = self.currpos.copy() + # nisara : Comment + # print("Current position in step: ", self.nextpos) self.nextpos[:3] = self.nextpos[:3] + xyz_delta * self.action_scale[0] # GET ORIENTATION FROM ACTION - self.nextpos[3:] = ( - Rotation.from_euler("xyz", action[3:6] * self.action_scale[1]) - * Rotation.from_quat(self.currpos[3:]) + self.nextpos[3:] = Rotation.from_matrix( + ( + np.matmul( + Rotation.from_quat(self.currpos[3:]).as_matrix(), + Rotation.from_euler( + "ZYX", action[3:6] * self.action_scale[1] + ).as_matrix(), + ) + ) ).as_quat() + nextPos_euler = Rotation.from_quat(self.nextpos[3:]).as_euler( + "ZYX", degrees=True + ) + # nisara : Comment + # print("Next position's euler: ", nextPos_euler) + # print("Next position in step: ", self.nextpos) + ##Remove gripper action NOTE: Omey - if(self.use_gripper): + if self.use_gripper: gripper_action = action[6] * self.action_scale[2] gripper_action_effective = self._send_gripper_command(gripper_action) - + self._send_pos_command(self.clip_safety_box(self.nextpos)) + # self._send_pos_command(self.nextpos) self.curr_path_length += 1 dt = time.time() - start_time @@ -235,7 +260,7 @@ def step(self, action: np.ndarray) -> tuple: self._update_currpos() ob = self._get_obs() - if(self.use_gripper): + if self.use_gripper: reward = self.compute_reward(ob, gripper_action_effective) else: reward = self.compute_reward(ob) @@ -243,10 +268,10 @@ def step(self, action: np.ndarray) -> tuple: done = self.curr_path_length >= self.max_episode_length or reward == 1 return ob, reward, done, False, {} - def compute_reward(self, obs, gripper_action_effective = None) -> bool: + def compute_reward(self, obs, gripper_action_effective=None) -> bool: """We are using a sparse reward function.""" current_pose = obs["state"]["tcp_pose"] - + # convert from quat to euler first euler_angles = quat_2_euler(current_pose[3:]) euler_angles = np.abs(euler_angles) @@ -319,7 +344,10 @@ def go_to_rest(self, joint_reset=False): """ # Change to precision mode for reset # requests.post(self.url + "update_param", json=self.config.PRECISION_PARAM) - + + # nisara : Comment + # print("In the function go_to_rest to reset the pose") + # Perform Carteasian reset if self.randomreset: # randomize reset position in xy plane reset_pose = self.resetpos.copy() @@ -380,7 +408,7 @@ def init_cameras(self, name_serial_dict=None): self.close_cameras() self.cap = OrderedDict() - + for cam_name, cam_serial in name_serial_dict.items(): cap = VideoCapture( RSCapture(name=cam_name, serial_number=cam_serial, depth=False) @@ -402,10 +430,21 @@ def _recover(self): def _send_pos_command(self, pos: np.ndarray): """Internal function to send position command to the robot.""" + # nisara : Comment + # print("Current pose in _send_pos_command: ", self.currpos) + arr = np.array(pos).astype(np.float32) + + # nisara : Comment + # arr_euler = Rotation.from_quat(arr[3:]).as_euler("ZYX", degrees=True) + # print("In euler, send_pos_command: ", arr_euler) + # print("In the function to send position command to move to pos: ", arr) + # print("In the same function, reset pose: ", self.resetpos) + # print("\n") + self.robot_interface_node.move_to_pose(arr, self.resetpos) print("Done moving the robot") - + def _send_gripper_command(self, pos: float, mode="binary"): """Internal function to send gripper command to the robot.""" if mode == "binary": @@ -440,12 +479,14 @@ def _update_currpos(self): self.currforce[:] = np.array(ps["force"], dtype=np.float32) self.currtorque[:] = np.array(ps["torque"], dtype=np.float32) - self.currjacobian[:] = np.reshape(np.array(ps["jacobian"], dtype=np.float32), (6, 7)) + self.currjacobian[:] = np.reshape( + np.array(ps["jacobian"], dtype=np.float32), (6, 7) + ) self.q[:] = np.array(ps["q"], dtype=np.float32) self.dq[:] = np.array(ps["dq"], dtype=np.float32) - - if(self.use_gripper): + + if self.use_gripper: self.curr_gripper_pos = np.array(ps["gripper_pos"]) def _get_obs(self) -> dict: @@ -459,8 +500,5 @@ def _get_obs(self) -> dict: return copy.deepcopy(dict(images=images, state=state_observation)) -if __name__ == '__main__': +if __name__ == "__main__": env = gym.make("KukaEnv") - - - \ No newline at end of file diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index ca306a78..9d42b7bc 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -7,21 +7,26 @@ class PegEnvConfig(DefaultEnvConfig): ROBOT_IP: str = "192.168.10.122" REALSENSE_CAMERAS = { - "wrist_1": "840412060409", - "wrist_2": "932122060300", + "wrist_1": "932122060300", + "wrist_2": "023322060732", } TARGET_POSE = np.array( + [0.76991, -0.03989, 0.08642, -1.5308283, 0.01884956, -3.11663445] + ) + # nisara - P2 + RESET_POSE = np.array( [ - 0.69243, - -0.01238, - 0.2242, - np.deg2rad(179.03), - np.deg2rad(1.41), - np.deg2rad(176.33), + 0.76791, + -0.03337, + 0.12004, + np.deg2rad(-92.54), + np.deg2rad(0.54), + np.deg2rad(179.84), ] ) - RESET_POSE = TARGET_POSE + np.array([0.0, 0.0, 0.2, 0.0, 0.0, 0.0]) - REWARD_THRESHOLD: np.ndarray = np.array([0.002, 0.002, 0.006, np.deg2rad(2), np.deg2rad(2), np.deg2rad(2)]) + REWARD_THRESHOLD: np.ndarray = np.array( + [0.002, 0.002, 0.01, 0.0349066, 0.0349066, 0.0349066] + ) APPLY_GRIPPER_PENALTY = False ACTION_SCALE = np.array([0.02, 0.1, 1]) RANDOM_RESET = False @@ -33,9 +38,9 @@ class PegEnvConfig(DefaultEnvConfig): TARGET_POSE[0] - RANDOM_XY_RANGE, TARGET_POSE[1] - RANDOM_XY_RANGE, TARGET_POSE[2], - TARGET_POSE[3] - 0.01, + TARGET_POSE[3] - RANDOM_RZ_RANGE, TARGET_POSE[4] - 0.01, - TARGET_POSE[5] - RANDOM_RZ_RANGE, + TARGET_POSE[5] - 0.01, ] ) ABS_POSE_LIMIT_HIGH = np.array( @@ -43,9 +48,9 @@ class PegEnvConfig(DefaultEnvConfig): TARGET_POSE[0] + RANDOM_XY_RANGE, TARGET_POSE[1] + RANDOM_XY_RANGE, TARGET_POSE[2] + 0.05, - TARGET_POSE[3] + 0.01, + TARGET_POSE[3] + RANDOM_RZ_RANGE, TARGET_POSE[4] + 0.01, - TARGET_POSE[5] + RANDOM_RZ_RANGE, + TARGET_POSE[5] + 0.01, ] ) COMPLIANCE_PARAM = { diff --git a/serl_robot_infra/kuka_env/envs/wrappers.py b/serl_robot_infra/kuka_env/envs/wrappers.py new file mode 100644 index 00000000..321b813f --- /dev/null +++ b/serl_robot_infra/kuka_env/envs/wrappers.py @@ -0,0 +1,221 @@ +import time +from gym import Env, spaces +import gym +import numpy as np +from gym.spaces import Box +import copy +from franka_env.spacemouse.spacemouse_expert import SpaceMouseExpert +from kuka_env.utils.rotations import quat_2_euler + +sigmoid = lambda x: 1 / (1 + np.exp(-x)) + + +class FWBWFrontCameraBinaryRewardClassifierWrapper(gym.Wrapper): + """ + This wrapper uses the front camera images to compute the reward, + which is not part of the RL policy's observation space. This is used for the + forward backward reset-free bin picking task, where there are two classifiers, + one for classifying success + failure for the forward and one for the + backward task. Here we also use these two classifiers to decide which + task to transition into next at the end of the episode to maximize the + learning efficiency. + """ + + def __init__(self, env: Env, fw_reward_classifier_func, bw_reward_classifier_func): + # check if env.task_id exists + assert hasattr(env, "task_id"), "fwbw env must have task_idx attribute" + assert hasattr(env, "task_graph"), "fwbw env must have a task_graph method" + + super().__init__(env) + self.reward_classifier_funcs = [ + fw_reward_classifier_func, + bw_reward_classifier_func, + ] + + def task_graph(self, obs): + """ + predict the next task to transition into based on the current observation + if the current task is not successful, stay in the current task + else transition to the next task + """ + success = self.compute_reward(obs) + if success: + return (self.task_id + 1) % 2 + return self.task_id + + def compute_reward(self, obs): + reward = self.reward_classifier_funcs[self.task_id](obs).item() + return (sigmoid(reward) >= 0.5) * 1 + + def step(self, action): + obs, rew, done, truncated, info = self.env.step(action) + success = self.compute_reward(self.env.get_front_cam_obs()) + rew += success + done = done or success + return obs, rew, done, truncated, info + + +class FrontCameraBinaryRewardClassifierWrapper(gym.Wrapper): + """ + This wrapper uses the front camera images to compute the reward, + which is not part of the observation space + """ + + def __init__(self, env: Env, reward_classifier_func): + super().__init__(env) + self.reward_classifier_func = reward_classifier_func + + def compute_reward(self, obs): + if self.reward_classifier_func is not None: + logit = self.reward_classifier_func(obs).item() + return (sigmoid(logit) >= 0.5) * 1 + return 0 + + def step(self, action): + obs, rew, done, truncated, info = self.env.step(action) + success = self.compute_reward(self.env.get_front_cam_obs()) + rew += success + done = done or success + return obs, rew, done, truncated, info + + +class BinaryRewardClassifierWrapper(gym.Wrapper): + """ + Compute reward with custom binary reward classifier fn + """ + + def __init__(self, env: Env, reward_classifier_func): + super().__init__(env) + self.reward_classifier_func = reward_classifier_func + + def compute_reward(self, obs): + if self.reward_classifier_func is not None: + logit = self.reward_classifier_func(obs).item() + return (sigmoid(logit) >= 0.5) * 1 + return 0 + + def step(self, action): + obs, rew, done, truncated, info = self.env.step(action) + success = self.compute_reward(obs) + rew += success + done = done or success + return obs, rew, done, truncated, info + + +class ZOnlyWrapper(gym.ObservationWrapper): + """ + Removal of X and Y coordinates + """ + + def __init__(self, env: Env): + super().__init__(env) + self.observation_space["state"] = spaces.Box(-np.inf, np.inf, shape=(14,)) + + def observation(self, observation): + observation["state"] = np.concatenate( + ( + observation["state"][:4], + np.array(observation["state"][6])[..., None], + observation["state"][10:], + ), + axis=-1, + ) + return observation + + +class Quat2EulerWrapper(gym.ObservationWrapper): + """ + Convert the quaternion representation of the tcp pose to euler angles + """ + + def __init__(self, env: Env): + super().__init__(env) + # from xyz + quat to xyz + euler + self.observation_space["state"]["tcp_pose"] = spaces.Box( + -np.inf, np.inf, shape=(6,) + ) + + def observation(self, observation): + # convert tcp pose from quat to euler + tcp_pose = observation["state"]["tcp_pose"] + observation["state"]["tcp_pose"] = np.concatenate( + (tcp_pose[:3], quat_2_euler(tcp_pose[3:])) + ) + return observation + + +class GripperCloseEnv(gym.ActionWrapper): + """ + Use this wrapper to task that requires the gripper to be closed + """ + + def __init__(self, env): + super().__init__(env) + ub = self.env.action_space + assert ub.shape == (7,) + self.action_space = Box(ub.low[:6], ub.high[:6]) + + def action(self, action: np.ndarray) -> np.ndarray: + new_action = np.zeros((7,), dtype=np.float32) + new_action[:6] = action.copy() + return new_action + + def step(self, action): + new_action = self.action(action) + obs, rew, done, truncated, info = self.env.step(new_action) + if "intervene_action" in info: + info["intervene_action"] = info["intervene_action"][:6] + return obs, rew, done, truncated, info + + +class SpacemouseIntervention(gym.ActionWrapper): + def __init__(self, env): + super().__init__(env) + + self.gripper_enabled = True + if self.action_space.shape == (6,): + self.gripper_enabled = False + + self.expert = SpaceMouseExpert() + self.last_intervene = 0 + self.left, self.right = False, False + + def action(self, action: np.ndarray) -> np.ndarray: + """ + Input: + - action: policy action + Output: + - action: spacemouse action if nonezero; else, policy action + """ + expert_a, buttons = self.expert.get_action() + self.left, self.right = tuple(buttons) + + if np.linalg.norm(expert_a) > 0.001: + self.last_intervene = time.time() + + if self.gripper_enabled: + if self.left: # close gripper + gripper_action = np.random.uniform(-1, -0.9, size=(1,)) + self.last_intervene = time.time() + elif self.right: # open gripper + gripper_action = np.random.uniform(0.9, 1, size=(1,)) + self.last_intervene = time.time() + else: + gripper_action = np.zeros((1,)) + expert_a = np.concatenate((expert_a, gripper_action), axis=0) + + if time.time() - self.last_intervene < 0.5: + return expert_a, True + + return action, False + + def step(self, action): + + new_action, replaced = self.action(action) + + obs, rew, done, truncated, info = self.env.step(new_action) + if replaced: + info["intervene_action"] = new_action + info["left"] = self.left + info["right"] = self.right + return obs, rew, done, truncated, info diff --git a/serl_robot_infra/kuka_env/utils/rotations.py b/serl_robot_infra/kuka_env/utils/rotations.py new file mode 100644 index 00000000..3dd40147 --- /dev/null +++ b/serl_robot_infra/kuka_env/utils/rotations.py @@ -0,0 +1,11 @@ +from scipy.spatial.transform import Rotation as R + + +def quat_2_euler(quat): + """calculates and returns: yaw, pitch, roll from given quaternion""" + return R.from_quat(quat).as_euler("ZYX") + + +def euler_2_quat(xyz_as_abc): + q = R.from_euler("ZYX", xyz_as_abc).as_quat() + return q From f3ca7456f1fa846167173ef97666ff6cd28bdae1 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Thu, 13 Mar 2025 13:45:28 -0700 Subject: [PATCH 18/24] Updating the data_collector submodule with changes to pkl file for SERL --- data_collector | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_collector b/data_collector index 744ed656..fc8f0bae 160000 --- a/data_collector +++ b/data_collector @@ -1 +1 @@ -Subproject commit 744ed65614de860e3b5637eb65cad4f8b65b285e +Subproject commit fc8f0bae54984ef851454516bfa8a01dbeb86d6b From af0cb6b4357a49fab9ef6dd4899ee45db0d18f12 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Thu, 13 Mar 2025 13:51:55 -0700 Subject: [PATCH 19/24] Updating submodule for pickle protocol changes --- data_collector | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_collector b/data_collector index fc8f0bae..4ae59dfe 160000 --- a/data_collector +++ b/data_collector @@ -1 +1 @@ -Subproject commit fc8f0bae54984ef851454516bfa8a01dbeb86d6b +Subproject commit 4ae59dfe1259f73d6622be8ce96358638060ee17 From fce667eda2bc4249d27bb20cf556711ecbc2bbf4 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Thu, 13 Mar 2025 14:36:34 -0700 Subject: [PATCH 20/24] Mostly changes to logging --- .gitignore | 5 ++++- .../async_peg_insert_drq/async_drq_randomized.py | 2 +- examples/async_peg_insert_drq/run_actor.sh | 10 +++++----- examples/async_peg_insert_drq/run_learner.sh | 10 +++++----- kuka_server/kuka_server/robot_interface.py | 15 +++++++++++---- serl_robot_infra/kuka_env/envs/kuka_env.py | 14 ++++++++------ 6 files changed, 34 insertions(+), 22 deletions(-) diff --git a/.gitignore b/.gitignore index 242d87db..9db332de 100644 --- a/.gitignore +++ b/.gitignore @@ -192,4 +192,7 @@ data_collector/data_collector/**/*.jpg data_collector/data_collector/**/*.jpeg data_collector/data_collector/**/*.png -.~lock.* \ No newline at end of file +.~lock.* + +examples/async_peg_insert_drq/checkpoints*/** +examples/async_peg_insert_drq/log*/** \ No newline at end of file diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index f1d547fd..3c378c81 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -431,7 +431,7 @@ def main(_): import pickle as pkl with open(FLAGS.demo_path, "rb") as f: - trajs = pkl.load(f) + trajs = pkl.load(f, fix_imports=True) for traj in trajs: demo_buffer.insert(traj) print(f"demo buffer size: {len(demo_buffer)}") diff --git a/examples/async_peg_insert_drq/run_actor.sh b/examples/async_peg_insert_drq/run_actor.sh index a1998d2c..1d075b82 100644 --- a/examples/async_peg_insert_drq/run_actor.sh +++ b/examples/async_peg_insert_drq/run_actor.sh @@ -10,10 +10,10 @@ python3 async_drq_randomized.py "$@" \ --random_steps 0 \ --training_starts 200 \ --encoder_type resnet-pretrained \ - --demo_path /home/omey/SERL/src/data_collector/data_collector/feb_28_data/kuka_csv_logs/all_success_final.pkl \ + --demo_path /home/rp/SERL/src/examples/async_peg_insert_drq/demo_data/updated_final.pkl \ --eval_checkpoint_step 0 \ - --loaded_checkpoint_step 1000 \ + --loaded_checkpoint_step 19000 \ --eval_n_trajs 4 \ - --load_checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_12 \ - --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ - # --load_checkpoint 0 \ + --load_checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints \ + --checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ + --load_checkpoint 1 \ diff --git a/examples/async_peg_insert_drq/run_learner.sh b/examples/async_peg_insert_drq/run_learner.sh index ef0bd81c..747f12bd 100644 --- a/examples/async_peg_insert_drq/run_learner.sh +++ b/examples/async_peg_insert_drq/run_learner.sh @@ -12,10 +12,10 @@ python3 async_drq_randomized.py "$@" \ --batch_size 256 \ --eval_period 2000 \ --encoder_type resnet-pretrained \ - --demo_path /home/omey/SERL/src/data_collector/data_collector/feb_28_data/kuka_csv_logs/all_success_final.pkl \ + --demo_path /home/rp/SERL/src/examples/async_peg_insert_drq/demo_data/updated_final.pkl \ --checkpoint_period 1000 \ - --loaded_checkpoint_step 1000 \ - --load_checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_12/ \ - --checkpoint_path /home/omey/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ - # --load_checkpoint 0 \ + --loaded_checkpoint_step 43000 \ + --load_checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints/ \ + --checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ + --load_checkpoint 1 \ diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index 82a1f131..51f667bd 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -372,7 +372,7 @@ def move_to_pose(self, pose: np.ndarray, reset_pose: np.ndarray): if not goal_handle.accepted: self.get_logger().error("Failed to execute trajectory") else: - self.get_logger().info("Trajectory accepted") + self.get_logger().debug("Trajectory accepted") result_future = goal_handle.get_result_async() @@ -381,7 +381,7 @@ def move_to_pose(self, pose: np.ndarray, reset_pose: np.ndarray): while not result_future.done() and time.time() < expect_time: time.sleep(0.01) - self.get_logger().info("Trajectory executed") + self.get_logger().debug("Trajectory executed") return @@ -393,7 +393,9 @@ def get_best_ik(self, target_pose: Pose, attempts: int = 100) -> JointState | No best_cost = np.inf best_joint_state = None + # nisara : Comment # self.get_logger().info(f"Computing Best IK for: {target_pose}") + for _ in range(attempts): joint_state = self.get_ik(target_pose) if joint_state is None: @@ -416,6 +418,9 @@ def get_joint_state(self) -> JointState: if not current_joint_state_set: self.get_logger().error("Failed to get current joint state") return None + # nisara : Comment + # if current_joint_state is None: + # print("in get_joint_state function, Current Joint State: ", current_joint_state) return current_joint_state @@ -444,6 +449,8 @@ def get_motion_plan( current_robot_state.joint_state.position = current_joint_state.position current_robot_state.joint_state.name = current_joint_state.name + # nisara : Comment + # print("Printing target pose before it goes to get_best_ik: ", target_pose) target_joint_state = self.get_best_ik(target_pose) if target_joint_state is None: self.get_logger().error("Failed to get target joint state") @@ -505,7 +512,7 @@ def get_fri_motion_execute(self, traj: RobotTrajectory, short_wait=False) -> boo if not goal_handle.accepted: self.get_logger().error("Trajectory goal rejected") return False - self.get_logger().info("Trajectory goal accepted") + self.get_logger().debug("Trajectory goal accepted") result_future = goal_handle.get_result_async() rclpy.spin_until_future_complete(self, result_future) @@ -531,7 +538,7 @@ def get_fri_motion_execute(self, traj: RobotTrajectory, short_wait=False) -> boo time.sleep(0.01) - self.get_logger().info("Trajectory executed") + self.get_logger().debug("Trajectory executed") return True diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index 2160a674..2ad018f3 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -9,7 +9,7 @@ print("Ensure to change anaconda3 or miniconda3 and change your environment name") print("Conda Environment name is: ", env) sys.path.append( - "/home/" + username + "/anaconda3/envs/" + env + "/lib/python3.10/site-packages" + "/home/" + username + "/miniconda3/envs/" + env + "/lib/python3.10/site-packages" ) import numpy as np @@ -24,7 +24,7 @@ from collections import OrderedDict from typing import Dict -sys.path.append("/home/cam/omey_ws/serl-rros/src/") +sys.path.append("/home/rp/SERL/src/") from serl_robot_infra.franka_env.camera.video_capture import VideoCapture from serl_robot_infra.franka_env.camera.rs_capture import RSCapture from serl_robot_infra.kuka_env.utils.rotations import euler_2_quat, quat_2_euler @@ -212,7 +212,7 @@ def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: def step(self, action: np.ndarray) -> tuple: """standard gym step function.""" - print("In step function") + # print("In step function") start_time = time.time() # nisara : Comment @@ -239,9 +239,9 @@ def step(self, action: np.ndarray) -> tuple: ) ).as_quat() - nextPos_euler = Rotation.from_quat(self.nextpos[3:]).as_euler( - "ZYX", degrees=True - ) + # nextPos_euler = Rotation.from_quat(self.nextpos[3:]).as_euler( + # "ZYX", degrees=True + # ) # nisara : Comment # print("Next position's euler: ", nextPos_euler) # print("Next position in step: ", self.nextpos) @@ -362,6 +362,8 @@ def go_to_rest(self, joint_reset=False): self._send_pos_command(reset_pose) else: reset_pose = self.resetpos.copy() + # nisara : Comment + # print("In reset pose, sending the reset pose: ", reset_pose) self._send_pos_command(reset_pose) # Change to compliance mode From 67d1875b53b620238a9026c8aaea07f3a3dc81d3 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Mon, 17 Mar 2025 18:38:52 -0700 Subject: [PATCH 21/24] Updating gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 9db332de..c0071699 100644 --- a/.gitignore +++ b/.gitignore @@ -195,4 +195,5 @@ data_collector/data_collector/**/*.png .~lock.* examples/async_peg_insert_drq/checkpoints*/** -examples/async_peg_insert_drq/log*/** \ No newline at end of file +examples/async_peg_insert_drq/log*/** +examples/async_peg_insert_drq/demo_data*/**/** From 3227fd50477b17a194ad3479439226838617171a Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Mon, 17 Mar 2025 18:39:18 -0700 Subject: [PATCH 22/24] Updating submodule data_collector with fixes --- data_collector | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_collector b/data_collector index 4ae59dfe..b511fd4e 160000 --- a/data_collector +++ b/data_collector @@ -1 +1 @@ -Subproject commit 4ae59dfe1259f73d6622be8ce96358638060ee17 +Subproject commit b511fd4ef702111fd36de7fba38a192ced0e7f98 From 30eabfb088935a21292ba8e4ebf0f5b826b6d2cd Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Mon, 17 Mar 2025 18:40:14 -0700 Subject: [PATCH 23/24] New system changes + IMPORTANT reward calc changes --- .../async_drq_randomized.py | 2 +- examples/async_peg_insert_drq/run_actor.sh | 10 +++--- examples/async_peg_insert_drq/run_learner.sh | 10 +++--- kuka_server/kuka_server/robot_interface.py | 17 ++++++++-- serl_robot_infra/kuka_env/envs/kuka_env.py | 32 ++++++++++++------- .../kuka_env/envs/peg_env/config.py | 2 +- 6 files changed, 47 insertions(+), 26 deletions(-) diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 3c378c81..2dddc552 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -428,7 +428,7 @@ def main(_): capacity=10000, image_keys=image_keys, ) - import pickle as pkl + import pickle5 as pkl with open(FLAGS.demo_path, "rb") as f: trajs = pkl.load(f, fix_imports=True) diff --git a/examples/async_peg_insert_drq/run_actor.sh b/examples/async_peg_insert_drq/run_actor.sh index 1d075b82..83f74db3 100644 --- a/examples/async_peg_insert_drq/run_actor.sh +++ b/examples/async_peg_insert_drq/run_actor.sh @@ -10,10 +10,10 @@ python3 async_drq_randomized.py "$@" \ --random_steps 0 \ --training_starts 200 \ --encoder_type resnet-pretrained \ - --demo_path /home/rp/SERL/src/examples/async_peg_insert_drq/demo_data/updated_final.pkl \ + --demo_path /home/rp/SERL/src/examples/async_peg_insert_drq/demo_data/relaxed_reward_success.pkl \ --eval_checkpoint_step 0 \ - --loaded_checkpoint_step 19000 \ + --loaded_checkpoint_step 30000 \ --eval_n_trajs 4 \ - --load_checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints \ - --checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ - --load_checkpoint 1 \ + --load_checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/relaxed_reward/checkpoints/ \ + --checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/relaxed_reward/checkpoints_new/ \ + # --load_checkpoint 0 \ diff --git a/examples/async_peg_insert_drq/run_learner.sh b/examples/async_peg_insert_drq/run_learner.sh index 747f12bd..0ed99b65 100644 --- a/examples/async_peg_insert_drq/run_learner.sh +++ b/examples/async_peg_insert_drq/run_learner.sh @@ -12,10 +12,10 @@ python3 async_drq_randomized.py "$@" \ --batch_size 256 \ --eval_period 2000 \ --encoder_type resnet-pretrained \ - --demo_path /home/rp/SERL/src/examples/async_peg_insert_drq/demo_data/updated_final.pkl \ + --demo_path /home/rp/SERL/src/examples/async_peg_insert_drq/demo_data/relaxed_reward_success.pkl \ --checkpoint_period 1000 \ - --loaded_checkpoint_step 43000 \ - --load_checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints/ \ - --checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/checkpoints_new/ \ - --load_checkpoint 1 \ + --loaded_checkpoint_step 1000 \ + --load_checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/relaxed_reward/checkpoints/ \ + --checkpoint_path /home/rp/SERL/src/examples/async_peg_insert_drq/relaxed_reward/checkpoints_new/ \ + # --load_checkpoint 0 \ diff --git a/kuka_server/kuka_server/robot_interface.py b/kuka_server/kuka_server/robot_interface.py index 51f667bd..28bcf16c 100644 --- a/kuka_server/kuka_server/robot_interface.py +++ b/kuka_server/kuka_server/robot_interface.py @@ -423,6 +423,15 @@ def get_joint_state(self) -> JointState: # print("in get_joint_state function, Current Joint State: ", current_joint_state) return current_joint_state + + # https://github.com/moveit/moveit/issues/3583#issuecomment-2035540755 + def adjust_trajectory_timings(self, trajectory): + epsilon = 0.001 # Small time increment to adjust duplicate timings + for i in range(1, len(trajectory.joint_trajectory.points)): + if trajectory.joint_trajectory.points[i].time_from_start == trajectory.joint_trajectory.points[i - 1].time_from_start: + print("THIS IS TRUE HERE") + trajectory.joint_trajectory.points[i].time_from_start += rclpy.duration.Duration(seconds=epsilon) + return trajectory def get_motion_plan( self, @@ -482,7 +491,8 @@ def get_motion_plan( request.motion_plan_request.pipeline_id = "ompl" request.motion_plan_request.planner_id = "APSConfigDefault" - for _ in range(attempts): + for i in range(attempts): + # print(f"Attempt {i} to generate a trajectory") plan_future = self.plan_client_.call_async(request) rclpy.spin_until_future_complete(self, plan_future) @@ -495,7 +505,10 @@ def get_motion_plan( f"Failed to get motion plan: {response.motion_plan_response.error_code.val}" ) else: - return response.motion_plan_response.trajectory + traj = response.motion_plan_response.trajectory + # # Adjust timings as mentioned above + # traj = self.adjust_trajectory_timings(traj) + return traj return None diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index 2ad018f3..eab0cee7 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -223,7 +223,7 @@ def step(self, action: np.ndarray) -> tuple: # print("Action after clipping ==xyz_delta==: ", xyz_delta) self.nextpos = self.currpos.copy() - # nisara : Comment + # # nisara : Comment # print("Current position in step: ", self.nextpos) self.nextpos[:3] = self.nextpos[:3] + xyz_delta * self.action_scale[0] @@ -239,10 +239,10 @@ def step(self, action: np.ndarray) -> tuple: ) ).as_quat() + # # nisara : Comment # nextPos_euler = Rotation.from_quat(self.nextpos[3:]).as_euler( # "ZYX", degrees=True # ) - # nisara : Comment # print("Next position's euler: ", nextPos_euler) # print("Next position in step: ", self.nextpos) @@ -275,8 +275,11 @@ def compute_reward(self, obs, gripper_action_effective=None) -> bool: # convert from quat to euler first euler_angles = quat_2_euler(current_pose[3:]) euler_angles = np.abs(euler_angles) + # nisara: IMPORTANT: Convert the euler angles of target pose to absolute values too + target_pose = self._TARGET_POSE.copy() + target_pose[3:] = np.abs(target_pose[3:]) current_pose = np.hstack([current_pose[:3], euler_angles]) - delta = np.abs(current_pose - self._TARGET_POSE) + delta = np.abs(current_pose - target_pose) if np.all(delta < self._REWARD_THRESHOLD): print("Received reward 1!!!!") reward = 1 @@ -305,7 +308,8 @@ def get_im(self) -> Dict[str, np.ndarray]: for key, cap in self.cap.items(): try: rgb = cap.read() - cropped_rgb = self.crop_image(key, rgb) + cropped_rgb = self.crop_image(key, rgb) + cropped_rgb = np.array(cropped_rgb) resized = cv2.resize( cropped_rgb, self.observation_space["images"][key].shape[:2][::-1] ) @@ -432,20 +436,24 @@ def _recover(self): def _send_pos_command(self, pos: np.ndarray): """Internal function to send position command to the robot.""" - # nisara : Comment - # print("Current pose in _send_pos_command: ", self.currpos) + + # # nisara : Comment + # np.set_printoptions(precision=3, suppress=True) + # curr_pos_euler = Rotation.from_quat(self.currpos[3:]).as_euler("ZYX", degrees=True) + # print_curr_pos = np.concatenate([self.currpos[:3] * 1000, curr_pos_euler]) + # print("Current pose in _send_pos_command: ", print_curr_pos) arr = np.array(pos).astype(np.float32) - # nisara : Comment - # arr_euler = Rotation.from_quat(arr[3:]).as_euler("ZYX", degrees=True) - # print("In euler, send_pos_command: ", arr_euler) - # print("In the function to send position command to move to pos: ", arr) - # print("In the same function, reset pose: ", self.resetpos) - # print("\n") + # # nisara : Comment + # arr_euler = Rotation.from_quat(arr[3:]).as_euler("ZYX", degrees=True) + # print_arr_pos = np.concatenate([arr[:3] * 1000, arr_euler]) + # print("In the function to send position command to move to pos: ", print_arr_pos) + # # print("In the same function, reset pose: ", self.resetpos) self.robot_interface_node.move_to_pose(arr, self.resetpos) print("Done moving the robot") + # print("\n") def _send_gripper_command(self, pos: float, mode="binary"): """Internal function to send gripper command to the robot.""" diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index 9d42b7bc..7e88ab3a 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -25,7 +25,7 @@ class PegEnvConfig(DefaultEnvConfig): ] ) REWARD_THRESHOLD: np.ndarray = np.array( - [0.002, 0.002, 0.01, 0.0349066, 0.0349066, 0.0349066] + [0.002, 0.002, 0.025, 0.0349066, 0.0349066, 0.0349066] ) APPLY_GRIPPER_PENALTY = False ACTION_SCALE = np.array([0.02, 0.1, 1]) From 4de15a9f93e1a63d5c34eb095b015a58fb38dc31 Mon Sep 17 00:00:00 2001 From: Nikita Sarawgi Date: Sun, 31 Aug 2025 14:09:05 -0700 Subject: [PATCH 24/24] project sync 8/31 --- .gitignore | 1 + .../async_drq_randomized.py | 41 +-- .../rect_peg/SERL_rectangle v1.stl | Bin 0 -> 84684 bytes examples/async_peg_insert_drq/run_actor.sh | 18 +- examples/async_peg_insert_drq/run_learner.sh | 16 +- kuka_server/kuka_server/robot_interface.py | 76 ++++- serl_robot_infra/kuka_env/__init__.py | 4 +- serl_robot_infra/kuka_env/envs/kuka_env.py | 274 ++++++++++++++---- .../kuka_env/envs/peg_env/config.py | 71 ++++- .../kuka_env/envs/peg_env/kuka_peg_insert.py | 3 +- serl_robot_infra/kuka_env/utils/rotations.py | 4 +- 11 files changed, 388 insertions(+), 120 deletions(-) create mode 100644 examples/async_peg_insert_drq/rect_peg/SERL_rectangle v1.stl diff --git a/.gitignore b/.gitignore index c0071699..be6b8f02 100644 --- a/.gitignore +++ b/.gitignore @@ -197,3 +197,4 @@ data_collector/data_collector/**/*.png examples/async_peg_insert_drq/checkpoints*/** examples/async_peg_insert_drq/log*/** examples/async_peg_insert_drq/demo_data*/**/** +examples/async_peg_insert_drq/experiments*/**/** diff --git a/examples/async_peg_insert_drq/async_drq_randomized.py b/examples/async_peg_insert_drq/async_drq_randomized.py index 2dddc552..10c84b1e 100644 --- a/examples/async_peg_insert_drq/async_drq_randomized.py +++ b/examples/async_peg_insert_drq/async_drq_randomized.py @@ -67,7 +67,10 @@ flags.DEFINE_string("env", "KukaPegInsert-Vision-v0", "Name of environment.") flags.DEFINE_string("agent", "drq", "Name of agent.") flags.DEFINE_string("exp_name", None, "Name of the experiment for wandb logging.") -flags.DEFINE_integer("max_traj_length", 100, "Maximum length of trajectory.") + +## TODO :: NISARA :: change max trajectory length to at least 300 (or see what the new data wants) +flags.DEFINE_integer("max_traj_length", 200, "Maximum length of trajectory.") + flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_bool("save_model", False, "Whether to save model.") flags.DEFINE_integer("critic_actor_ratio", 4, "critic to actor update ratio.") @@ -141,9 +144,12 @@ def actor(agent: DrQAgent, data_store, env, sampling_rng): done = False start_time = time.time() while not done: + sampling_rng, key = jax.random.split(sampling_rng) actions = agent.sample_actions( observations=jax.device_put(obs), - argmax=True, + # argmax=True, + seed=key, + # deterministic=False, ) actions = np.asarray(jax.device_get(actions)) @@ -245,7 +251,7 @@ def update_params(params): ############################################################################## -def learner(rng, agent: DrQAgent, replay_buffer): +def learner(rng, agent: DrQAgent, replay_buffer, demo_buffer): """ The learner loop, which runs when "--learner" is set to True. """ @@ -293,18 +299,18 @@ def stats_callback(type: str, payload: dict) -> dict: # 50/50 sampling from RLPD, half from demo and half from online experience replay_iterator = replay_buffer.get_iterator( sample_args={ - "batch_size": FLAGS.batch_size, + "batch_size": FLAGS.batch_size // 2, + "pack_obs_and_next_obs": True, + }, + device=sharding.replicate(), + ) + demo_iterator = demo_buffer.get_iterator( + sample_args={ + "batch_size": FLAGS.batch_size // 2, "pack_obs_and_next_obs": True, }, device=sharding.replicate(), ) - # demo_iterator = demo_buffer.get_iterator( - # sample_args={ - # "batch_size": FLAGS.batch_size // 2, - # "pack_obs_and_next_obs": True, - # }, - # device=sharding.replicate(), - # ) # wait till the replay buffer is filled with enough data timer = Timer() @@ -314,8 +320,8 @@ def stats_callback(type: str, payload: dict) -> dict: for critic_step in range(FLAGS.critic_actor_ratio - 1): with timer.context("sample_replay_buffer"): batch = next(replay_iterator) - # demo_batch = next(demo_iterator) - # batch = concat_batches(batch, demo_batch, axis=0) + demo_batch = next(demo_iterator) + batch = concat_batches(batch, demo_batch, axis=0) with timer.context("train_critics"): agent, critics_info = agent.update_critics( @@ -324,8 +330,8 @@ def stats_callback(type: str, payload: dict) -> dict: with timer.context("train"): batch = next(replay_iterator) - # demo_batch = next(demo_iterator) - # batch = concat_batches(batch, demo_batch, axis=0) + demo_batch = next(demo_iterator) + batch = concat_batches(batch, demo_batch, axis=0) agent, update_info = agent.update_high_utd(batch, utd_ratio=1) # publish the updated network @@ -340,7 +346,7 @@ def stats_callback(type: str, payload: dict) -> dict: if FLAGS.checkpoint_period and update_steps % FLAGS.checkpoint_period == 0: assert FLAGS.checkpoint_path is not None checkpoints.save_checkpoint( - FLAGS.checkpoint_path, agent.state, step=update_steps, keep=100 + FLAGS.checkpoint_path, agent.state, step=update_steps, keep=200 ) update_steps += 1 @@ -364,6 +370,7 @@ def main(_): print("Initializing environment") # create env and load dataset print("Value of learner: ", FLAGS.learner) + env = gym.make( FLAGS.env, fake_env=FLAGS.learner, @@ -438,7 +445,7 @@ def main(_): # learner loop print_green("starting learner loop") - learner(sampling_rng, agent, replay_buffer) + learner(sampling_rng, agent=agent, replay_buffer=replay_buffer, demo_buffer=demo_buffer) elif FLAGS.actor: print("Initializing Actor Node") diff --git a/examples/async_peg_insert_drq/rect_peg/SERL_rectangle v1.stl b/examples/async_peg_insert_drq/rect_peg/SERL_rectangle v1.stl new file mode 100644 index 0000000000000000000000000000000000000000..be805c84987e5f95b3d01de4eceed05adef3fea5 GIT binary patch literal 84684 zcmb82f6Q)KRo~B4{6X;#f5&LWadgns5v17GYPrvSu0lvU0rqpY!yapZt_3{G=IU*IxY0 zp19}Qh6q~3?|vE~ zDIccNJ5q4yzZF1icsnPpU3?53;mM@EtS6G+HH4y{qbY& ze7tFpxaTb&bU!UUp}SHZl$Q^?|C1)7=sT|c!Iyo_@x{kmLSqoRD_VL&8mbG`$(AVk zj%z>tU3VRy^730lg9I%-Aq}-cwMJ=>sPr9b$7t;!&_C|Mo$#*IN-igE1Fg1;d@OY* z3%qVa>E4K_oG&0A*jEi&&X@9fU|;D4t%SrAKK=2xme&LOszIi2HjU8|tzF$(UN=uf zwJ{R9D_V+vU|+x2U7<%!1ih-?NP`3|Js}N@4CsS5?~8t5U!#OHNYK&~EM4Wia(grC zgJ}_HCFoJC!JGHhdN#yV=k%(y+7;R^`UhL$12(RY{-KXuoKKi@5VTFI_?7^F|LX^RLFs*jQoLA{GF5w++2#j!y zMVMAAVa}^`8JFCK`9lQns)%!UwJlu|mF66!eDJQSM0xKdq|w(0@0~NJG)UAuc#e<; zMmUVr^Bi8|?nVP+BI7ouH#x`r!MmF4=3GAJ>oR{+J}|a3(qme8#hh0TXRrFb2T==E=oQD(2_j~a0@ZNJy(BSJma{w+n^gE29Qh!bP}*og>r*Qj*PYX1A2 zah!x3v;bxw_*$XV2`gtFS)RmO(ql`ZiSB5KXODy=>kZPztusdTm&qi;k+?{cq7 zt3IL+x&|$k&X#EOmWaOXmY%>^;u<6>oilZ4#OUKhjEQdP32DR}bw#a&M5S|142?nj z#>?Mvf6Vl5=?Q7XT>I3&^0hxZ5k+SN5E_HfU9q*3sI=PE)NYuELPY`}r_4{qPM8- zAfEQ^-*O9yn&EuiPfuP(DCd1Xc<)RrA45Z>3qhCe2}PKn=!6=kvQMa^LfCD>IK`fV(Ip!pmxQa zmsiHSe(#m18xi!Xv|0&sPD`aT>Nh z%qa~LRa49@32F2r^*o2yICfrxaU0V&%6a7@pDe6C9-Rj7Pz3W)_sd9+=|@K}_7M@g z8N0{+(Z{^mMm1aAT1T=P;c_p;@}bd3W;p+y5{P60!f0 zmY#^QWT(8u{znovu0-0Oard_W5gN+12(=X(anjNg(okJki$5WX_BnBH`yZh}!ba+} z^n^6j3e{3{S0pO!5kT|;l@$rLb`q6VE4iHTh$HS+Z5R1C)U|we4lkceq6{|#^5JuB zdQ~aJd_L+!Yck>GaY7PRHzfY*b3Re~A4$-w((yFmEiv{#qGfobA8DVRqaBm5>d*>g zTI#waVm==|Y9fmEXR7G6(m)^7N@`kqLK z%^tihtEzM6gK4!Z=Df2KW{cjX$D&RNZWpEJ&dG-4(@A&OR=4~_Fg&Z2fDl$C4LwX%H7D_i0vMAVvlRa#?v zv|ZPrrPA4sjlK~P?aI9>t@?;Q=o++CI$NUATO#_lTY5rw6(gK$kf?Od)S*El#zeRD zgfwDS(|Nct<2X@t&WWKxB1Y;P5YmXbcANy%jO;|w83BX_iRvFYhnpAgO6}@$A|no? z)pq-Q>ried}8r?XFbiWgqBL@icV;-U6ofp3AtCbe6>WjU0N!gD@A^X=ZWeoIU5*(=ylW2 zdGAcCKKi?2KB%#+^J*Es|DmTlzmuvPjSQm(HTuv}Y2P={vzpO2BC2KNdWjJl88}Ck z7iXMY{fM$UN7S4+pL?!+Jnr|Oe1CuM+k6ZSo1O2kyW+i5Lp8{QX6mUE3)5?Fs`f?ic_AP&-?rRsk>CJu$| zU6H6Vgoc6hUD2y%(JfKWXL<5$uiRcrc$t{jaoz1nx+yDOD+u>ush=<14WT{yM#HX^ z77|tp#}S>MXe2=|E9v9?)Gv*cQ`b%q)#-`!s|w! z8Vy=VM9v$6782H`kL#Y-wybC&VSVLTJ6LS(w2-j=c&z;>610%8{(r39B@(Q`=uw_q zqxYkv=hCHxMD*sStVqx+Mu~=?B}SjWz+I7uk+0E+8a%4)M%{Qi%FWX!InVwE`4bC?N-xW|U;j79r@rpHLW2Y? zJt2)>`_V7kSz&mDOrp}u@=G7~{m0v%@x7rzf|j0;#zR-G>~6jNGhKs3rI+PDJn#FC zcYg7|4h<5t^n^4%{$(Gxd*~CN0(izdXvSu2uB1Jp7ZdJ^u4|-5(kxXz2-Qs3dIm zcMTGiwug>qyc!xH4z%<{0;Zq&Ke zM-sI3gfy&`TtvG%kf`*sy!vy#9&MKdEj=L(YwZ`&KMo`+y)0k;zg~nsNP?E0kcRbI z)wli~NK|@Re*5khU@Rd)OHW9{MxToquMQ+Cy)3_T%ky&Nn+aNaLK-$wU)b2LQS?Bf z(q>t{>Nt(x9Z@ zSb9PlQG+{`2#HFs?dtho`?Am=p?V&co{&bgMAslu>9v1c`}LGbEz0#_4^^I@!-_HasJpm1m!bw!x$M(J5?X2O1t~)F}fwJK$)6x^tU=&WG(mo#VZFCI{5;oSR zr6;7Jk{EG2iAwv}o>!WPgN?Om>50fkM&TqX?PGhENX}|1-Ghy_Y3YfmkBq`eR9gAG z4|N`Mt!vQI6ViwX;DjjJ$M$TYGht(GT6#hn5y`j)iAwv}o;__QY^+U7Pe>yovMAjm ziuSQRM~s=Uu{JF|Aq~duBr5G=dyafFVPkt*dO{kE+euVfb^ez9G`1sdFH28ABjWZV z>bh$_#=Meu<%F&~EIom;@_CR%rPunfHJRw=jN4T{!_pJdh&tb?L`YP6ZCAD?6B;B` z&%@Fa(ulU}8YC*c_77W=2@Mizufx(4(uh9j8YC*M9NxFLHJQ*LK}%0aBSr?-AW`Xc zys|Z!&>*4lV_14Z8jQk8RC*nEZA~ULNN5}!mY$G?N@B$ABr3hmALB|hq49iJdLr_X zaXX1huk+w2k(|}MbTuyxOHV|7WZX`o(#q%EsPmX>qXx^;6ViwX;Do5_u5rm|p);X4 zVOV-X8WG9328l|q@zv;QGog5ASb9Pl5s|qDiAt}r-54=uLUG%$^n^4Rx09&!8t;yg zZzdFf4ogo+gK_(BzV?Uq-}0xoT_|p^^s>D0&wp^gEN}b){l19X|LJ{iqGd~b;JFVP z0gZ=@xP^q$!z*YjPiT1DPD@XqtUPWfQR!uQ*WZ3{|0_>*#1wx^AoPFWkrIPo{&cLLDwKrY1R2p z?LYaJ&j}3@wDg2DVq|a)5|vh2J+yz$pJBdm!%V+%f&qD*mftH?#d}Q2CqSDLq`5*EQlvT`8D%}GuJrVVhaXX1h ztImHJbslrAYtYgY(ufG)geZDhp752whPF$BmY$GCL^7^HqS7j>ccBlGprt3I5fNFG zZe6SBW%+*({U*i|614P$G#Iy&sPwY@@jra8#^c@E&q>hI6VhPZPNLGP^Y_kUd&cdw z^aM0K3MWzN%!id{jP25JBChL}oSs$ShW#vSak6U^|8c~Bg^h;h&@Jt2+7f7bPO5|z&RBQ!|Fc-hD|&3(y&&N>+LZkJ5lM3OG1N0#0fVb zq!E#f-xY~UXMA;${UcZCBi?aKPe{Xht*-AHBr2V;U1*SqxXmp+A&rPEee@ww>1By1 zGc-uh(i75P-2VKR{GIEs{TJ759KU&AY0KsE?CUT5^SAl$XM&cVfQComBr3fuqM!b} z|1@hjK}%1dtUPWfQR!uQ>0_>3|AFs#XJ|xOIYCQLNF!>{%6h(5(e}*w^v}5d6Hoik zp+SO{o{&bgUDqH{>1BD(XFcxv+n@DcLW2Y?Jt2+gQLaIv(#qjGFCKj5yF-HnEj=NP z7#UoHM5UMIqrdHNasN-gCp1XV(i75%5zaM8RC-xn@&`Zs@cVyVv58^^614P$G*l8} zy`4m*UE@k=fH;^}T6!Y#k#YNm2-?ePS7jA*lyZKsdrwQzB%(etZYNP`-S<0pRp&9+ zx&|#hAq{IKI44urD%#ub)oQyWtSzObC!`UvkKYxEN_!u?T78g&^@+6fgfy(z>iVuh zqSD^~uhv*X!uobvdO{izS!(n-kf^keiC1e(Bw=GzT6#hnjN3_6T6O;HtJBz?aXT$N z0S%ABNmP2x$Cy|0uAIJt2)~iLODS(rf>C+z*4qIxIaQjp$LXL88*i;X8Lvc-g-T z4HC5UgfwDga19cbUPqrNeZdch1__NH!_pJdh!M^;NK|?qcmM0B{7`6+&^S0OJs}O1 zgmpjHAW`Xc{uoy}As!xb^n^4bvK*sN5=F1^?il%I!p5kK!h1p*jN5G&QRgX- zig7BfI)97(u0JPJmY#q{#O+1Yb@RNBYv)}#q3hN;nL(34S!o^|=kzllE1l<-LL*Ao z3ESH|juSMcM?xA==NHl-QEA^xJ@)5hY;Q9yJt2)~yRJc^(s_<7@3O;1Q8 z`k-r&sI+qE=ix$w1T8%wjTjkRgG8l$FZI};ld-+cwDg2D7=@Flbe>O)e2}oc&Eq&h z(-YEANsPFiM5Xg=WN3i!)9OQ`Cn6six7%)L-c_aZylCYk3ESJuvg(PbkBr+%R9g4# z=Uzi2=33R@ftH?-MnnK7MA0_xuBUWEg9I%-A&rP+T!TcV^Bi($kg&bY?1Mcajflux zgG8nC40LFau)WP3OL{^YjKWD&I?siN1_|3s%`vejq`|nIM5R^de!e|4NYK&~(1^Ib zu2uA!k1?<0T{)ra4ogpWQR#KO8Yetv zLgUA<^n^4Rg_EfCI_{2>Dl?&Ra9Dam8Y+nqx09&!I)98S&4kADVd;s;N5<_WD!tBw zqeOC6^BUB=G%P(4^^tKqiApP<{=Qn&`5d>Gr6;6etpss<5p~@)E*UL!CKM+OOHW86 zA{oy)iAt~W)#zz6p?GIldO{iz!$s*9QS=(yjS*ue6t@jaPe_AtJBdoKG3prkW}@Qg zoA*5-4aV&@s_E3^ezCZ{(yDVm-|oMk30isr8WFeGwTjO3KC^}sx~}EWEj@v<@+h1{ zrSsfUXfSTKu~z4n+|m=$h&sPeiIAvtp1%qW5;oTA{FPgJLK@L_U4ulWeQeihvCtr4 zV{KY`LK@KrU4ulWl|w)E78)dI=?Q7faeG~>=sedK8YFD2)ww>m^n^4Rg_Edso=*%7 z5;oTAe4<-=LK-TG5x0}5be^*e4G@05eQ5MVBKFX>m zqCPTix6wJTRcYO~pL>mb#O&-EwDg2DA_6!eiq7-9p+UmN+HAW$A&rQAT!TcVeQeL3 zHWN11rllvO5iwkpZV^TM*q$TCOxRePmY$FXqi_zC#1o+ zokXSAcz29^Goko%Sb9PljN9$>-*KF8ue6_mJlgs8Pggu&#CpE{y=mDJH@)> zKi_U=64TNX(qI%$qSAf_^4Onmw=;=p=?Q75j7Ho}qSAf_GOsid2RoCPmY#@wWZX`o z(l)=n|Di0AoTF5O2U>a}>LcTJ5|#EdkXcVT*SdzCVoXaDytvKo;DM-^n^4Rx09%}pMlH~V!bwy*>mxLxtelAQ zaZ67~gK;~FN@u$Y4H8k$Zs`eWFm5MN>FgiZ*RmoJ?bR(kAq~duBr2W#JTyo|zjI4Z zNP}@ZiAv{q6&fUB{BTQ8NP}@ZiAv|V8yX~H9CS-hNMrGz^_&ceO6U9$8YE&ocS}!1 zJ~D16QR$oqLnCIjowQn2IF>}tg-WyHjc07+t>7BKYqU5e?Jqn6iose9)**rbe{K_HJpg^K5pp= zl$FQrBr2WfmO>*+*NHesLdaG^nhmY$FX<8~62&Qp4!K_brCxuqwh!ML47rSp7ZXpo5W zf^O*vX{aPd+)kp>dCoF4K=}D~I|J#Ko``&8+-_c1Ktwvvi&j38uzS~e(V@{0Q6Cw% zlc==r+t0m*M$ENpB?nr1LK@aert|G~p3<*Xbe`W04HDLtbbj{+gft?O@w*~XY43wN zZ5$dTtWTt+C!}G$R@Zk85|z#~(4j%X`nJwfyQL?j!6=+WrSn{PXppcmO6S7e(i75P z+)kp>mdo{gduWiLr6-`_aXX1hulX4BO5T+dy6&*_1j@>za1xbX>%-nRn9IrumCvyB zgfyZCcPbGQm0sJGy*CjWBvjAC(i75%mgpKJD!uj(d;cReNT|IIOHW86dX#IBsI+qE z@1BGP30itW8Zk1s28l|qrPujmT5V*B?+IyGYtJa0M5WjGYV@?3us)$T3tgip zq+z{QCF`Y2qS9+@H%5$^u)eJ~BweE?q`|nIM5WhwcZ_^9VPjNA;XNUZ|MBiuT>Rt5 ze29;tU-+Ysd*p40kGiV3z0#`l4}YY-Co_ou`^6voDYR^fcf9Bg@jaO!9x~$lNhm$M zg1){!M-9TH547}z^6{eoc=yHMc1TguXpo?#C#3Pf>%Z~h zqu%iZMfwL4mA1c%^Q^}{a{J*oLW2Y?Jt2+d*)O|z@S9)Y8YC*cEINP_@d{A1_@evLK=5I@vASM`^;Z)4HA`JmS6kiKRw>_;6Dis614P$G`{C; z|KQ^HzVv^)28l{9%a4BJAKCTC`yV7|=?Q5(`+0xw;w4Y|&F3ae(+2FZD^37r6;8ELI3S(7xz8?7S|wA z>1Fw?Z~Eoq?|$D;g$4;)dO{j^{`%)#{O$*y=o%y{t+l1M9rbS=OGwbt6VlN2MX5eW zRC-yI2VG&+AVEt{NJHhM+)IN*rI$r@p^{iNNYK&~(olV?7NtR=(#xVYr5ar|NYK&~ z(onlqYm^3wN}I*{#@aHq-cEv+o{)z6pL&%vNL1SXa*FkF^{_P`BxvagX=ohL=pYRe zm0lK&RT?Q)4HC5UgfuiBYLt@(iApbv#$1hfs|E>LdO{i+*EL#8gG8m5MRSKn?p1>X zEj=L(pPy`=k_L%Nn}*GSnt@i0iGZaiq@j6Mv!`p2$h5^*HkWHAT{TG1Qg=c^8j2Sb z1xSNLrI*Fxq3enWRt*xg^n^4lwt7g>j5J78dRgA_qEE1s35v1QKS4_+-r$$++Op>Uym+sZ74=bxAtbEea6Vk96yb^V;x=o_e-mb1z zyCPxroR*%DhPA{iim|Ld)JBu2wD*sF^^b}8!;eTyPe{Xh)RpLi>T5-0+WYyw`Z)>f zcWEh_gfwhqxDsQD#*8E??c>$H#w!vwex#)*q+ui6l^7E>_9aniA9wdP?vk)^FfBbH z4I8Pi#MrJeJc&xHroMGw^9Ko9dO{jD?6lgM^L7Y3T`R*tot^#3T(8mG{v{z1ZKm9+GPG;Ds_dBkKI zBr5Ie4A*Q1y1r_Vu-P#!Js}O7XLpL2ln)Y>_Vto$Hj{=137h59(i76KcwrZ7G7E`H zFN?(_*DN9k4HC5UgfuLQ*(qXDK1ftrmJh$C|E^_4f|j0uhOd#4sB~&yoixU(nsVrt zod}?=?Q5>OLPqqmCiQ3pW3by z(O%us6ViwtbwU)KeKRyjM89)OPe>z12G<}_=^SH1gG7uUZs`eW#0cjaBr2U_WoVFy zanLP2A&nTRU4ulWbIcD75;2~;r6;5jGtN%!ibSPz?g|YOF)z8LC!`TGr)!X?bk4z{ zK_cc?xAcTGA_8y?5|z$aA~Z-uoZyz8kVZr@u0f*G854yDiHLXH(i75%h|D!eR666t z&>#_Un_GH98WCB#28l{%yc-%MBK~wsPe>yoWY-{3>5SV$gG9v9Zs`eWM5ON;Br2^| z?av>fL4uZ^kVZVQxCV(z=W|qOkcj6RxAcTG;>pW3NK`tX2W>9YIs=J#K66V?NF$yg zU4ulW^C>knNW^odTY5qo@g(dTBr2WH=b=F&o|oOy6Viw$de0BEKjfq%S4cyWb z(ukFbRf9yPbG;-qNW}WZs?ieCh!qysAW`XD_X!OWu@2*wo{&bY6uAb8O6U4jXpo5Y zB)9ZLXlRYhHAqxC*WpSd3A^rf#XM@&JrQN4wZ4&$B#O?}!O&3TItVMD>xy}#(G${8 zJgtaHBL;~|d%L=>$Y$+>BoxgLOHW9{p9}2yKpG?}?fpaRyP+`=>&mZNdO{j{zEQ;F z8YD8E>(rq^!up+7uvd+ikcOVq6fsGIM5S}RJv2zz_@R|}xAcTG^t`EvNg5<7o%;qc z?vk)^P&*24=?Q7*xmgjDG)Poh7T^Du~~Ng5<7o%?E`LBiq$?YOz6C#0cuB}Giq zAW`Yu9}Eo=7Vl{1&@DY74ZFVfAYFq*rE{M%G)Pz;v;*pvo{)x>llD?wgG8lszcw^T zSY2o**DXCE4XbbM3A+Y~O6R_FXppcrr5$Ov^n^65-D+>!HAqz2pBawUGDCxeJrAU% zC!}HhPkZ$Kj69F;`Td7dFONZ44VlMZg_32E4P zs52Aec`%8h^IS$~kgzdVCpO&D6VkA8U1vXBgG8nCd`W1KuvtYXPu$WI($M~jM@-fS zNmM$|$y{CMK@v6x>I98jdO{jD&+06W=YvG0^Sn=Jkg&O2CxP736VkBRUuTNO3QH12 z`?^n_nVJcUNz&32URJg*xcJXuch_BCe!T0hCmdBKIte(=Z`<$pj}!#olq(UxD^C?{+3=Az^Q?=hrvS6ZEoo(erzR zNgS-mY&@wu;T_tW{`s}XB&@`&JYXU52VeFz#}^-OnRD(MBC!^t@TspnzVGdiZ8S*G%ih(?HkPI9Ij4n0y-}ya`8wUZHI@92#P*gbzDfc-_cfL(oDZ%CI45ArbX-_4KZ2ArWn% zA!s2H?QJF&<(w80(Wd9pJt627{iY#kArZZ~A!s2Hqr@cyy<${q2wF(QXjjC+vTZ+y z9SM5H=vu@I7(ok(7_~1U=oK?V5rY0%!b$N%@rt^DJvj#0Btgq}X>hnCEC+KDUKfjAL z6SR=9KEJo9uhAeuFB@0(7WEZjb)g?08B(2IHjd@*=eweXgpH4Ti~1T367;eW{7mW6 zLc+%Ry+wVE1_^rEypnBf&Ic_dY_`K+SY69$Eh`fAvZ!xg-`*j?>)On@x2VtWo%DQH zw2-h_dtcuvq6RG_EHc>F*ON%lLc*dC{Jm5Xw2-g}XJ22MB0&oYi=y`Rtrrrskg!P2 z%541AX%e)Mi1cxamJIZ$FRw+f(7re&Xdw~#YY19ML>V>&EhM6zuAkl&EhM5XGz2Xq zqP;Z)EhM5%H>FF0UeRwFf)*0dn;U`_5;00#LeMKlrG}t|M2vQq5cG=ixFKjE5u^4c z1ifNrxP+ir%v}vZ3yGNFE+Oa@GipQ7LL%n*O9**NYKlozWf%!Jb&oz%<<;EBH8g}c6)pMx^@Ll#}ZmdSQ+NG<7W*L^orfA zM@}_pAz^Ld_=T@}Q*&1&=oPzJ4M7Wu*qdqyT1Z$g$glrzb6#&8(aU;s{!IcW4vYUB zb}UiWjh-3C7wu;aT1e{K?2esK`-UQo}-G8fc@lCky}W_^G6W}2Q~z~tUv16r-&6W zf)*0-ymJXbuXqZZ3A?kyjux)BQ+sYD92qzo#25c(4adiT)g@kq)wflE^SG15&{g3Y*%o-%<6(_424O&R39gpue%o-%C*IBosf`Uk7gLoCLk%WL2X<3ke(T^4lqMK1k3@quuyQ$$5fa zaTaU7D<4H^Az|tKuF0&iA<`?(V$B-f+G*i+HO`MOq0Aa2=%u;jq&_?!BOa40l6U1AB&I?XwX7}k#lNzyW-an_4V)Z9ddiK!>-@0e$YaqzU#f6 zqe#$;Ckp1fqJ>0!uhAm5(6Bb8pTmv>z3QaG_O56l5#OC`G)T~^PF!p?Xdw~b&%B!X z@LVV#BE7m?1ik94$@Z>jArW6Et-Cs`w{_T&pjVxl*=o>2BEGWPXpo>+o&DM7 zoE8%HS2uLZVt-n?BTKLrgBB9f8-E#W?&l=vrBWDw=W8Zd2ds1Z zD=0dP)6_XFB;w4()zel&f?l?2sFNIx1}!9PWfEVNUrSd1olkNEab0&3+Eypq(xrt& zy1ik9p$RIrDB+_qIvUD^^7s4O&R(>E-N> zd;VP+dd13Zqd^M^J@1^|KhHgSdg>M*VpXIx@~%kGE7ndLf)*07!qO15kkD?;_)CU!os*!KcAduGS3FP9ODFLy0%`7w z782S$>orKwOK%id1kz~GLPEQMea=bHOK(Ql8w-sFEhMxzX{&0N5cINnKK~-)T)Nbt zm+i>tyC#hWEhH>5$iK)qYmlIqay{M)nF(4*Sk$K#*5?;T~3e!2VmY|o# zaQYHTQy;XDP;K^4UL@#c(V)JB(rC~^!Xm&kr(Q_V%c4Pj38m4Xg@i?bXU^h~pqE91 z`VvZ`K?@0s0MDGPAwe&T?ery-tEck^EhH>vJaZO@1idT*JaZO@b)b=9JZ-Vu=GION z343}uQ-idS&}i2`d1*!-Elv@@XyJ^B=DVVW1S7SXuwIn=fYu-Nme^5s5YM=to(Wn= z*odL`*e)UHCGCFCixv{rANB0qXwX8!MpwPTcL_l+YYVx$z}?irhl4lam9fK?@0+ZSd98;jVd*Eu4KNR;lW)2DGDvgj%Te;F&l`g9N?W zbq3SPbvVBk3AHyHF-pU(ub=hI#IV@NkauDu#-rGyCt+hrju>7LIK?@0cPEh~2grJv=mFl%c96aNeb6%Hs7&{Qn6ALXQVrQe#AVII#m1#6+ArU(? zjRpyN#g0&?fv*&hh@GZR0|dQdXRFbmg+%O_H5w%76+3i|1}!9F=dU!@Dz`c(L9f_d zEFxP833}C6Dz*_aEhMzgV7srSk$1Hr(#y23+L}HI39jzU>@lSVOc`!(;!fMhgjzL}UFjHLM@{X{x*yy`>DDV(fK95hn14p`^02Grz( z780?7G`B?8AVIHKTWU0DA;I^J&X*N)YpW9ZjqjPDg@j6^KO;CXt0pqH&{G&l-Xj60yg22|=&e$1B46hJJiEBfqg_EiofKt*EKwPiGujNLU*? zv(7+*Ua^YTXwX8!+St7Maqw(dK1k5Z@{r$Kn`@9364olUI@sJ533|nfUJ-d$By|iLr&x*}m(L%!NKi;m{mK6zl#rj;MK?@0M`SF&}R)Yk+Vy&;!SXb`cLc;oE zyzO+N0fJt!KG$f_Lc+$Cc!O)JL4sa3j>Y?GTY?r6vDVkrISG2%h#C=cXdE;GYiy^5 zgpIoqw{P!?781I{_08$61_^rE+F3kDZ8c~ip^{$T;oWMGpqH%y#?#$agBB8MG3z_L zTMZKQiqn)$eb7QeE#FpEi#T|7SbdP7m$cWX?Jgg~LP9-w{c>cNk3rB&Ww@>yY&B>h zp^?Z=W;XdCK`*t1b@gMbK?@0ukG5vrXpo?n+O(bOC}J(#Xzjy7!qQq7FCy!M*P@sG zUgz|Sphj9q*xkl{ZIkn~kg!tFccU5&UW;B<>ap9q)u4rh)uz6f)o75Qm$k&$6W(gj zLc&_5_7EBk67-7wgodDng!L}%G|Yt6qW38Kt=_O-|GN0_lJ>gp)8%7WcwHMMwA0XN z@LKdz8LlfuTMb%B*l4GnhDL(~z0?-g6_%|AEhKEzj&E6P30uuRu3s0@*SOz%yy>m? z+F!ubPyI#kIi{wCL|n1uZ0O zU0m<{G#Vu6WozX6(siRj3kh5CJldDP8VwTkvQ>3`f4kA3g@jgH`Wc4=z3ME^w(ZhF zLaRpo%t?Y?bq;8&K?@06fj%?tlAu?eXWDAeLZZ(5-H4!xmLgW*_jp+wl03AC6b_**2~s!Zf|o=3yFA(u4(Ng=%w|tSHI@2 zHs@s_Vc#h`Gcxd6^wN6S`<|hl{cS#IAz@!nJ2PI9pqJLmdJS4g*f-tIjJqW0r4_cm ztY{%&Uz$5Je~_S;*3Qn=d09x<_wO{LHuDFsMXyH}S(2!4F>h;-*P@rc{P%(9KKKKt zbxsS3h=cbxCg>Gk{@a}rw2-h8iPO>Be2}15d@HcgpoN6hYqDF ze+faa*zIo!T1ePTt(A!)4vYUBE=bTT_U?;V0gFXxArbraMWhA^dd1#-5vf573F%o} zatT2%mBM( zA?T%9dwmmW8}-pbBKF&^DYtW-lc1NLMb@)8TY?r6^}T}|5%jXvZhdQV&iO(0!M4O! z8GAZ2;WbDL3Dwg{Wa%0t=oNc9jRq|wRI?|ufoqVUm+hWtuc*YYtOctrciBNP=D*W9IRSYp>Q4bJr?PIv#cDnV^M)BDH=$iUhsl zq+_E&3kk)K>xtj(U6G)dwVrsVdP~qkLUHhV;`c@by%e2WeKh%?2EDX$u9813U0O)! zDPw&PdV5zS=oKd&8x2}W=xM3fAVDu{Z}Cp`_O56lp{KsStVqyH&!Q*wanP8k5qY!* z?F}i8SBbWS{Z{>>u0@0t5~pS-S~ebzVDPc%T#D^9mI8nlqmv(NhG{Z@knz2aPN zqd^M^J^QTR0|BrL5H*mM8zmbf9(%e1fC zT1ubuwD7v>k7sk91ids$O!c9%@_bO8UOF9Qt4PgV(LzF_-PEp9g9N>FI>=Vs8Vy=V zXw;tiM{1Cum(KdwI$);(3kl6WQ$H^a5cJYnA6o%!G-x5AS#*qDIl_5;kf0Z%k0k8Q z=D36eW1^XeardZqrJ^-mlhJXPocf-MuP;sv}$D0Qd8%& zkgz8(eGTyvf?lzM-4OJmhDIKZ8jJnRIj4n$?Vjjsh)vE(&`TqzM$%IaT1eRLiN1!| zXpo?nW&(}kry8`7u>BT&4e=6!UbcIpuOT-5oEr4f9H{y0^sZp)?L+ zHRF(=SCslydCmQU77|gLjRpyNMeAuaXdw};@)Ckx(ejHpEdJxWmhoL2&FAAQq{|XL zZ7;Vu=d_TB9(*;4gHkG867-4@qtT#+M2tjNbzQ%!gHqBUL9ZD3W{ty$GRBIRqJgn4 z7EgVT=(=;xX(3^2HSw3swgfFCY^^5#lG&D^g@j^>vuk~MEqW<3=vN@AL9bX!M94#amKhF8EJIR$s%aguUyjSb9Qm{Ped*P>}PgzLB z&ekOay` JointState | None: current_joint_state = self.get_joint_state() if current_joint_state is None: @@ -393,8 +434,6 @@ def get_best_ik(self, target_pose: Pose, attempts: int = 100) -> JointState | No best_cost = np.inf best_joint_state = None - # nisara : Comment - # self.get_logger().info(f"Computing Best IK for: {target_pose}") for _ in range(attempts): joint_state = self.get_ik(target_pose) @@ -418,9 +457,7 @@ def get_joint_state(self) -> JointState: if not current_joint_state_set: self.get_logger().error("Failed to get current joint state") return None - # nisara : Comment - # if current_joint_state is None: - # print("in get_joint_state function, Current Joint State: ", current_joint_state) + return current_joint_state @@ -435,10 +472,11 @@ def adjust_trajectory_timings(self, trajectory): def get_motion_plan( self, - target_pose: Pose, + target_pose: Pose = None, linear: bool = False, scaling_factor: float = 0.1, attempts: int = 10, + target_joint_state_in: np.ndarray = None, ) -> RobotTrajectory | None: current_pose = self.get_fk()[0] if current_pose is None: @@ -449,7 +487,6 @@ def get_motion_plan( # return RobotTrajectory() current_joint_state = self.get_joint_state() - # self.get_logger().info(f"Current joint state: {current_joint_state}") if current_joint_state is None: self.get_logger().error("Failed to get joint state") return None @@ -458,13 +495,18 @@ def get_motion_plan( current_robot_state.joint_state.position = current_joint_state.position current_robot_state.joint_state.name = current_joint_state.name - # nisara : Comment - # print("Printing target pose before it goes to get_best_ik: ", target_pose) - target_joint_state = self.get_best_ik(target_pose) - if target_joint_state is None: - self.get_logger().error("Failed to get target joint state") - return None - + if target_joint_state_in is not None: + target_joint_state = JointState() + target_joint_state.position = [float(val) for val in target_joint_state_in] + target_joint_state.name = current_joint_state.name + # print("Printing target joint state: ", target_joint_state) + + else: + target_joint_state = self.get_best_ik(target_pose) + if target_joint_state is None: + self.get_logger().error("Failed to get target joint state") + return None + target_constraint = Constraints() for i in range(len(target_joint_state.position)): joint_constraint = JointConstraint() @@ -474,6 +516,8 @@ def get_motion_plan( joint_constraint.tolerance_below = 0.001 joint_constraint.weight = 1.0 target_constraint.joint_constraints.append(joint_constraint) + + request = GetMotionPlan.Request() request.motion_plan_request.group_name = self.move_group_name_ diff --git a/serl_robot_infra/kuka_env/__init__.py b/serl_robot_infra/kuka_env/__init__.py index e1e059e7..4e90e1a5 100644 --- a/serl_robot_infra/kuka_env/__init__.py +++ b/serl_robot_infra/kuka_env/__init__.py @@ -4,11 +4,11 @@ register( id="KukaEnv-Vision-v0", entry_point="kuka_env.envs:KukaEnv", - max_episode_steps=100, + max_episode_steps=200, ) register( id="KukaPegInsert-Vision-v0", entry_point="kuka_env.envs.peg_env:KukaPegInsert", - max_episode_steps=100, + max_episode_steps=200, ) diff --git a/serl_robot_infra/kuka_env/envs/kuka_env.py b/serl_robot_infra/kuka_env/envs/kuka_env.py index eab0cee7..3d85aa87 100644 --- a/serl_robot_infra/kuka_env/envs/kuka_env.py +++ b/serl_robot_infra/kuka_env/envs/kuka_env.py @@ -23,6 +23,9 @@ from datetime import datetime from collections import OrderedDict from typing import Dict +import logging +import os +import shutil sys.path.append("/home/rp/SERL/src/") from serl_robot_infra.franka_env.camera.video_capture import VideoCapture @@ -102,13 +105,16 @@ def __init__( self.url = config.ROBOT_IP self.config = config self.max_episode_length = max_episode_length - # self.max_episode_length = 5 + self.max_episode_length = 200 + self.task_reward = 1.0 # convert last 3 elements from euler to quat, from size (6,) to (7,) self.resetpos = np.concatenate( [config.RESET_POSE[:3], euler_2_quat(config.RESET_POSE[3:])] ) + self.reset_joint_pos = config.RESET_JOINT_POSITION.copy() + self.currpos = self.resetpos.copy() self.currvel = np.zeros((6,)) self.q = np.zeros((7,)) @@ -131,6 +137,33 @@ def __init__( self.save_video = save_video self.recording_frames = [] + # nisara : Saving some important information here + if not fake_env: + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + self.exp_folder = os.path.join('experiments', f"exp_{timestamp}") + os.makedirs(self.exp_folder, exist_ok=True) + + # Save the parameters (config.py) used for the experiment + config_path = '/home/rp/SERL/src/serl_robot_infra/kuka_env/envs/peg_env/config.py' + parameters_path = os.path.join(self.exp_folder, "parameters.txt") + shutil.copy2(config_path, parameters_path) + + # Initialize a reward over the whole episode + self.episode_max_reward = 0 + self.save_reward_stats = True + self.reward_stats_file = os.path.join(self.exp_folder, "reward_stats.txt") + + # Setting logging + self.logger = logging.getLogger() + self.logger.handlers.pop(0) + self.logger.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s') + file_handler = logging.FileHandler(os.path.join(self.exp_folder, 'reward_log.log')) + file_handler.setLevel(logging.INFO) + file_handler.setFormatter(formatter) + self.logger.addHandler(file_handler) + self.logger.propagate = False + # boundary box self.xyz_bounding_box = gym.spaces.Box( config.ABS_POSE_LIMIT_LOW[:3], @@ -143,9 +176,10 @@ def __init__( dtype=np.float64, ) # Action/Observation Space + # TODO :: change to 6 self.action_space = gym.spaces.Box( - np.ones((7,), dtype=np.float32) * -1, - np.ones((7,), dtype=np.float32), + np.ones((6,), dtype=np.float32) * -1, + np.ones((6,), dtype=np.float32), ) self.observation_space = gym.spaces.Dict( @@ -186,6 +220,34 @@ def __init__( self.use_gripper = config.USE_GRIPPER print("Initialized Kuka") + # Initialization for the sdf based reward + self.mesh_file_path = config.MESH_FILE_PATH + self.num_sampled_points = 1000 + self.voxel_size = 0.001 + import trimesh + import pysdf + self.og_peg_mesh = trimesh.load(self.mesh_file_path) + # The given mesh file is not in meters, convert it + self.og_peg_mesh.apply_scale(0.001) + + self.target_transform = np.eye(4) + self.target_transform[:3, 3] = self._TARGET_POSE[:3] + self.target_transform[:3, :3] = Rotation.from_euler("ZYX", self._TARGET_POSE[3:]).as_matrix() + self.transformed_peg_mesh = self.og_peg_mesh.copy() + self.transformed_peg_mesh.apply_transform(self.target_transform) + + vertices = np.array(self.transformed_peg_mesh.vertices, dtype=np.float32) + faces = np.array(self.transformed_peg_mesh.faces, dtype=np.uint32) + + self.sdf = pysdf.SDF(vertices, faces, robust = True) + self.local_surface_points, _ = trimesh.sample.sample_surface_even(self.transformed_peg_mesh, self.num_sampled_points) + self.local_surface_points = np.array(self.local_surface_points) + self.transform_target_inverse = np.linalg.inv(self.target_transform) + + + + + def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: """Clip the pose to be within the safety box.""" pose[:3] = np.clip( @@ -193,19 +255,44 @@ def clip_safety_box(self, pose: np.ndarray) -> np.ndarray: ) euler = Rotation.from_quat(pose[3:]).as_euler("ZYX") - # Clip first euler angle separately due to discontinuity from pi to -pi - sign = np.sign(euler[2]) - euler[2] = sign * ( + # Clip first two euler angles separately due to discontinuity from pi to -pi + # sign = np.sign(euler[2]) + # euler[2] = sign * ( + # np.clip( + # np.abs(euler[2]), + # self.rpy_bounding_box.low[2], + # self.rpy_bounding_box.high[2], + # ) + # ) + + # nisara : Clipping the B and C euler angles separately because they are at inflection point + + # A + euler[0] = ( + np.clip( + euler[0], + self.rpy_bounding_box.low[0], + self.rpy_bounding_box.high[0], + ) + ) + # B - The max range is -10 to +10 degrees + euler[1] = ( + np.clip( + euler[1], + self.rpy_bounding_box.low[1], + self.rpy_bounding_box.high[1], + ) + ) + # C - The max range is -170 to +170 degrees + sign_C = np.sign(euler[2]) + euler[2] = sign_C * ( np.clip( np.abs(euler[2]), - self.rpy_bounding_box.low[2], self.rpy_bounding_box.high[2], + np.abs(np.deg2rad(180)), ) ) - euler[:2] = np.clip( - euler[:2], self.rpy_bounding_box.low[:2], self.rpy_bounding_box.high[:2] - ) pose[3:] = Rotation.from_euler("ZYX", euler).as_quat() return pose @@ -215,12 +302,10 @@ def step(self, action: np.ndarray) -> tuple: # print("In step function") start_time = time.time() - # nisara : Comment - # print("Action before clipping: ", action) + self.logger.info(f"Action before clipping: {action}") action = np.clip(action, self.action_space.low, self.action_space.high) xyz_delta = action[:3] - # nisara : Comment - # print("Action after clipping ==xyz_delta==: ", xyz_delta) + self.logger.info(f"Action after clipping ==xyz_delta==: {xyz_delta}") self.nextpos = self.currpos.copy() # # nisara : Comment @@ -239,20 +324,19 @@ def step(self, action: np.ndarray) -> tuple: ) ).as_quat() - # # nisara : Comment - # nextPos_euler = Rotation.from_quat(self.nextpos[3:]).as_euler( - # "ZYX", degrees=True - # ) - # print("Next position's euler: ", nextPos_euler) - # print("Next position in step: ", self.nextpos) - ##Remove gripper action NOTE: Omey if self.use_gripper: gripper_action = action[6] * self.action_scale[2] gripper_action_effective = self._send_gripper_command(gripper_action) self._send_pos_command(self.clip_safety_box(self.nextpos)) - # self._send_pos_command(self.nextpos) + + # ## GO TO TARGET POSE FOR DEBUGGING + # tp = self._TARGET_POSE.copy() + # self.nextpos[:3] = tp[:3] + # self.nextpos[3:] = euler_2_quat(tp[3:]) + # self._send_pos_command(self.clip_safety_box(self.nextpos)) + # ## END DEBUGGING self.curr_path_length += 1 dt = time.time() - start_time @@ -265,7 +349,7 @@ def step(self, action: np.ndarray) -> tuple: else: reward = self.compute_reward(ob) - done = self.curr_path_length >= self.max_episode_length or reward == 1 + done = self.curr_path_length >= self.max_episode_length or reward == self.task_reward return ob, reward, done, False, {} def compute_reward(self, obs, gripper_action_effective=None) -> bool: @@ -273,23 +357,96 @@ def compute_reward(self, obs, gripper_action_effective=None) -> bool: current_pose = obs["state"]["tcp_pose"] # convert from quat to euler first + euler_angles_degrees = Rotation.from_quat(current_pose[3:]).as_euler("ZYX", degrees=True) euler_angles = quat_2_euler(current_pose[3:]) - euler_angles = np.abs(euler_angles) - # nisara: IMPORTANT: Convert the euler angles of target pose to absolute values too + + # nisara: IMPORTANT: Convert the target pose to absolute values too target_pose = self._TARGET_POSE.copy() - target_pose[3:] = np.abs(target_pose[3:]) + current_pose = np.hstack([current_pose[:3], euler_angles]) - delta = np.abs(current_pose - target_pose) - if np.all(delta < self._REWARD_THRESHOLD): - print("Received reward 1!!!!") - reward = 1 - else: - # print(f'Goal not reached, the difference is {delta}, the desired threshold is {_REWARD_THRESHOLD}') - reward = 0 + curr_pose_print = np.hstack([current_pose[:3], euler_angles_degrees]) + + give_reward = np.zeros(6, dtype=bool) + difference = np.zeros(6) + + difference[:3] = np.abs(current_pose[:3] - target_pose[:3]) + give_reward[:3] = difference[:3] <= self._REWARD_THRESHOLD[:3] + + # Calculation of B and C's delta is going to be different + # For B, the range is -5 to +5 degrees + # For C, the range is -175 to -180 and +175 to +180 degrees + + ## A - if self.config.APPLY_GRIPPER_PENALTY and gripper_action_effective: - reward -= self.config.GRIPPER_PENALTY + difference[3] = np.abs(current_pose[3] - target_pose[3]) + give_reward[3] = difference[3] <= self._REWARD_THRESHOLD[3] + ## B + difference[4] = np.abs(current_pose[4] - target_pose[4]) + if difference[4] <= self._REWARD_THRESHOLD[4]: + give_reward[4] = 1 + + ## C + difference[5] = np.pi - np.abs(current_pose[5]) + if difference[5] <= self._REWARD_THRESHOLD[5]: + give_reward[5] = 1 + + if np.all(give_reward): + print("RECEIVED COMPLETE REWARD 1.0!!!!") + self.logger.info("RECEIVED COMPLETE REWARD 1.0!!!!") + self.logger.info(f"Received reward 1.0 at \n\tcurrent pose: {curr_pose_print} and \n\tdiff: {difference}") + reward = self.task_reward + + else: + ## DO i add reward for 0.099 = z-value, reward = 0.98 + if current_pose[2] <= 0.099: + reward = 0.98 + self.logger.info("Received surface reward 0.98") + else: + # SDF BASED REWARD + tcp_transform = np.eye(4) + tcp_transform[:3, 3] = current_pose[:3] + tcp_transform[:3, :3] = Rotation.from_euler("ZYX", current_pose[3:]).as_matrix() + transform_target_current = np.linalg.inv(tcp_transform) @ self.target_transform + tcp_rot_matrix = transform_target_current[:3, :3] + tcp_t = transform_target_current[:3, 3] + global_surface_points = (tcp_rot_matrix @ self.local_surface_points.T).T + tcp_t + + # Calculate the signed distance field + sdf_values = np.array([self.sdf(point) for point in global_surface_points]) + + # Calculate the reward based on the sdf values + reward = np.sqrt(np.mean(np.square(sdf_values))) + reward = 1.0 - (reward / 0.1) # Normalize the reward + reward = np.clip(reward, -1.0, 1.0) # Ensure the reward is between -1 and 1 + self.logger.info(f"Goal not reached, the difference is \n\tcurrent pose: {curr_pose_print} and \n\tdiff (in rad): {difference} ") + + """ Commenting out the existing reward function to make place for sdf + else: + # if current_pose[2] < 0.10000: + # # Let's give more reward if z is even lower + # self.logger.info("Received surface reward 0.6") + # reward = 0.7 + # else: + # if current_pose[2] < 0.10300: + # self.logger.info("Received surface reward 0.4") + # reward = 0.4 + # else: + # reward = 0.0 + + ## Give reward based on euclidean distance + euclidean_distance = np.linalg.norm(np.abs(current_pose) - np.abs(target_pose)) + ## The range of th edistance is 0.06 and 0.018 in demo buffer so we follow the same thing here + reward = np.abs(1.0 - (euclidean_distance / 0.06)) + self.logger.info(f"Goal not reached, the difference is \n\tcurrent pose: {curr_pose_print} and \n\tdiff (in rad): {difference} ") + """ + self.logger.info(f"Reward: {reward}") + self.logger.info("\n") + + # if self.config.APPLY_GRIPPER_PENALTY and gripper_action_effective: + # reward -= self.config.GRIPPER_PENALTY + + self.episode_max_reward = max(self.episode_max_reward, reward) return reward def crop_image(self, name, image) -> np.ndarray: @@ -349,9 +506,6 @@ def go_to_rest(self, joint_reset=False): # Change to precision mode for reset # requests.post(self.url + "update_param", json=self.config.PRECISION_PARAM) - # nisara : Comment - # print("In the function go_to_rest to reset the pose") - # Perform Carteasian reset if self.randomreset: # randomize reset position in xy plane reset_pose = self.resetpos.copy() @@ -366,16 +520,39 @@ def go_to_rest(self, joint_reset=False): self._send_pos_command(reset_pose) else: reset_pose = self.resetpos.copy() - # nisara : Comment - # print("In reset pose, sending the reset pose: ", reset_pose) - self._send_pos_command(reset_pose) + + # self._send_pos_command(reset_pose) + + # Reset to a specified joint position and not cartesian position to avoid singularity + reset_joint_pose = self.reset_joint_pos.copy() + self._send_reset_joint_command(reset_joint_pose) # Change to compliance mode # requests.post(self.url + "update_param", json=self.config.COMPLIANCE_PARAM) + def _send_reset_joint_command(self, joint_pos: np.ndarray): + """ + Internal function to send joint position command to the robot. + Make sure that the joint angles are in radians and not degrees. + """ + pos = np.array(joint_pos).astype(np.float32) + self.robot_interface_node.move_to_joint_pos(pos) + + + def reset(self, joint_reset=False, **kwargs): # requests.post(self.url + "update_param", json=self.config.COMPLIANCE_PARAM) + # nisara: Have a maximum episode reward also and log that somewhere to maintain the increasing trend if any + + if self.save_reward_stats: + with open(self.reward_stats_file, "a") as f: + f.write(f"{self.episode_max_reward}\n") + self.logger.info(f"Max reward for the episode: {self.episode_max_reward}") + self.episode_max_reward = 0 + + self.logger.info(f"\nResetting the environment\n") + if self.save_video: self.save_video_recording() @@ -437,23 +614,22 @@ def _recover(self): def _send_pos_command(self, pos: np.ndarray): """Internal function to send position command to the robot.""" + np.set_printoptions(precision=5, suppress=True) + # # nisara : Comment - # np.set_printoptions(precision=3, suppress=True) # curr_pos_euler = Rotation.from_quat(self.currpos[3:]).as_euler("ZYX", degrees=True) # print_curr_pos = np.concatenate([self.currpos[:3] * 1000, curr_pos_euler]) # print("Current pose in _send_pos_command: ", print_curr_pos) - arr = np.array(pos).astype(np.float32) + # nisara : Comment + arr_euler = Rotation.from_quat(pos[3:]).as_euler("ZYX", degrees=True) + print_arr_pos = np.concatenate([pos[:3], arr_euler]) + self.logger.info(f"Sending position command to \n\tmove to pos: {print_arr_pos}") - # # nisara : Comment - # arr_euler = Rotation.from_quat(arr[3:]).as_euler("ZYX", degrees=True) - # print_arr_pos = np.concatenate([arr[:3] * 1000, arr_euler]) - # print("In the function to send position command to move to pos: ", print_arr_pos) - # # print("In the same function, reset pose: ", self.resetpos) + arr = np.array(pos).astype(np.float32) self.robot_interface_node.move_to_pose(arr, self.resetpos) print("Done moving the robot") - # print("\n") def _send_gripper_command(self, pos: float, mode="binary"): """Internal function to send gripper command to the robot.""" diff --git a/serl_robot_infra/kuka_env/envs/peg_env/config.py b/serl_robot_infra/kuka_env/envs/peg_env/config.py index 7e88ab3a..de19db07 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/config.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/config.py @@ -10,27 +10,62 @@ class PegEnvConfig(DefaultEnvConfig): "wrist_1": "932122060300", "wrist_2": "023322060732", } + MESH_FILE_PATH = "/home/rp/SERL/src/examples/async_peg_insert_drq/rect_peg/SERL_rectangle v1.stl" TARGET_POSE = np.array( - [0.76991, -0.03989, 0.08642, -1.5308283, 0.01884956, -3.11663445] + [ + 0.77049, + -0.04725, + 0.09370, + -1.5875515, + 0.009599311, + -3.13583307 + ] ) # nisara - P2 RESET_POSE = np.array( [ - 0.76791, - -0.03337, - 0.12004, - np.deg2rad(-92.54), - np.deg2rad(0.54), - np.deg2rad(179.84), + 0.77644, + -0.04044, + 0.10319, + -1.5510741, + 0.009599311, + -3.13583307 + ] + ) + # RESET JOINT POSITION + # MAKE SURE IT IS: A1, A2, A4, A3, A5, A6, A7 + # Radians + ## From the topic: + # RESET_JOINT_POSITION = np.array( + # [ + # -0.010134220158701582, + # 1.2094114255907469, + # -0.6516944804629864, + # -0.14664185220364415, + # 0.15364255700526772, + # 1.2824023060585676, + # -1.6960934354739192, + # ] + # ) + ## From the pendant: + RESET_JOINT_POSITION = np.array( + [ + -0.01012291, + 1.209513, + -0.65170594, + -0.146608, + 0.1534144, + 1.2824679, + -1.696111 ] ) REWARD_THRESHOLD: np.ndarray = np.array( - [0.002, 0.002, 0.025, 0.0349066, 0.0349066, 0.0349066] + [0.0035, 0.0035, 0.001, 0.174533, 0.174533, 0.174533] ) APPLY_GRIPPER_PENALTY = False - ACTION_SCALE = np.array([0.02, 0.1, 1]) + ACTION_SCALE = np.array([0.005, 0.01, 1]) # action_scale[1] defines the scaling in radians RANDOM_RESET = False - RANDOM_XY_RANGE = 0.01 + RANDOM_XY_RANGE = 0.008 # only deviates 8mm from target pose RANDOM_RZ_RANGE = np.pi / 6 USE_GRIPPER: bool = False ABS_POSE_LIMIT_LOW = np.array( @@ -39,22 +74,26 @@ class PegEnvConfig(DefaultEnvConfig): TARGET_POSE[1] - RANDOM_XY_RANGE, TARGET_POSE[2], TARGET_POSE[3] - RANDOM_RZ_RANGE, - TARGET_POSE[4] - 0.01, - TARGET_POSE[5] - 0.01, + # Least value of B is -10 degrees + -0.174533, + # Least value of C is -170 degrees + -2.96706 ] ) ABS_POSE_LIMIT_HIGH = np.array( [ TARGET_POSE[0] + RANDOM_XY_RANGE, TARGET_POSE[1] + RANDOM_XY_RANGE, - TARGET_POSE[2] + 0.05, + TARGET_POSE[2] + 0.012, TARGET_POSE[3] + RANDOM_RZ_RANGE, - TARGET_POSE[4] + 0.01, - TARGET_POSE[5] + 0.01, + # Least value of B is +10 degrees + 0.174533, + # Least value of C is +170 degrees + 2.96706, ] ) COMPLIANCE_PARAM = { - "translational_stiffness": 2000, + "translational_stiffness": 1000, "translational_damping": 0.89, "rotational_stiffness": 150, "rotational_damping": 0.7, diff --git a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py index 3fa1ca91..691e3935 100644 --- a/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py +++ b/serl_robot_infra/kuka_env/envs/peg_env/kuka_peg_insert.py @@ -26,8 +26,9 @@ def go_to_rest(self, joint_reset=False): # Move up to clear the slot self._update_currpos() reset_pose = copy.deepcopy(self.currpos) - reset_pose[2] += 0.10 + reset_pose[2] += 0.05 self._send_pos_command(reset_pose) # execute the go_to_rest method from the parent class + time.sleep(1) super().go_to_rest(joint_reset) diff --git a/serl_robot_infra/kuka_env/utils/rotations.py b/serl_robot_infra/kuka_env/utils/rotations.py index 3dd40147..a8487be6 100644 --- a/serl_robot_infra/kuka_env/utils/rotations.py +++ b/serl_robot_infra/kuka_env/utils/rotations.py @@ -1,9 +1,9 @@ from scipy.spatial.transform import Rotation as R -def quat_2_euler(quat): +def quat_2_euler(quat, degrees=False): """calculates and returns: yaw, pitch, roll from given quaternion""" - return R.from_quat(quat).as_euler("ZYX") + return R.from_quat(quat).as_euler("ZYX", degrees=degrees) def euler_2_quat(xyz_as_abc):