diff --git a/.github/workflows/continuous.yml b/.github/workflows/continuous.yml new file mode 100644 index 000000000..a009e4f1e --- /dev/null +++ b/.github/workflows/continuous.yml @@ -0,0 +1,25 @@ +name: Continuous + +on: + push: + branches: + - 'main' +jobs: + QC: + runs-on: ubuntu-20.04 + steps: + - name: Checkout Repository and Submodules + uses: actions/checkout@v4 + - name: Quality Control + uses: "./.github/workflows/qc" + Documentation: + runs-on: ubuntu-20.04 + env: + DOCS_DEPLOY_KEY: ${{ secrets.DOCS_DEPLOY_KEY }} + steps: + - name: Checkout Repository and Submodules + uses: actions/checkout@v4 + with: + path: "src/lasr-base" + - name: Build & Deploy Documentation + uses: "./src/lasr-base/.github/workflows/docs" \ No newline at end of file diff --git a/.github/workflows/docs/action.yml b/.github/workflows/docs/action.yml new file mode 100644 index 000000000..425d9f99b --- /dev/null +++ b/.github/workflows/docs/action.yml @@ -0,0 +1,48 @@ +name: Build & Deploy Documentation + +runs: + using: "composite" + steps: + + - name: Install ROS Noetic + shell: bash + run: | + sudo add-apt-repository universe + sudo add-apt-repository restricted + sudo add-apt-repository multiverse + + sudo apt update + + sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' + + sudo apt install curl # if you haven't already installed curl + curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add - + + sudo apt update + sudo apt install ros-noetic-ros-base python3-catkin-tools + + - name: Build documentation package + shell: bash + run: | + source /opt/ros/noetic/setup.bash + catkin build documentation + + - name: Use Node.js + uses: actions/setup-node@v3 + with: + node-version: 20 + + - name: Build documentation + shell: bash + run: | + source /opt/ros/noetic/setup.bash + source devel/setup.bash + rosrun documentation build.py + + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + if: github.event_name != 'pull_request' && github.ref_name == 'main' + with: + deploy_key: ${{ env.DOCS_DEPLOY_KEY }} + publish_dir: ./src/lasr-base/documentation/web/build + external_repository: lasr-at-home/docs \ No newline at end of file diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index 4c38eb089..000000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: Build & Deploy Documentation - -on: - push: - pull_request: - branches: [main] - -jobs: - build: - runs-on: ubuntu-20.04 - - steps: - - name: Checkout repository and submodules - uses: actions/checkout@v3 - with: - path: "src/lasr-base" - - - name: Install ROS Noetic - run: | - sudo add-apt-repository universe - sudo add-apt-repository restricted - sudo add-apt-repository multiverse - - sudo apt update - - sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' - - sudo apt install curl # if you haven't already installed curl - curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add - - - sudo apt update - sudo apt install ros-noetic-ros-base python3-catkin-tools - - - name: Build documentation package - run: | - source /opt/ros/noetic/setup.bash - catkin build documentation - - - name: Use Node.js - uses: actions/setup-node@v3 - with: - node-version: 20 - - - name: Build documentation - run: | - source /opt/ros/noetic/setup.bash - source devel/setup.bash - rosrun documentation build.py - - - name: Deploy to GitHub Pages - uses: peaceiris/actions-gh-pages@v3 - if: github.event_name != 'pull_request' && github.ref_name == 'main' - with: - deploy_key: ${{ secrets.DOCS_DEPLOY_KEY }} - publish_dir: ./src/lasr-base/documentation/web/build - external_repository: lasr-at-home/docs diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 000000000..481a655f1 --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,14 @@ +name: Pull Request + +on: + pull_request: + branches: + - '*' +jobs: + QC: + runs-on: ubuntu-20.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Quality Control + uses: "./.github/workflows/qc" \ No newline at end of file diff --git a/.github/workflows/qc/action.yml b/.github/workflows/qc/action.yml new file mode 100644 index 000000000..24a4be88c --- /dev/null +++ b/.github/workflows/qc/action.yml @@ -0,0 +1,12 @@ +name: Quality Control + +runs: + using: "composite" + steps: + + - name: Python Formatting + uses: psf/black@stable + with: + options: "--check --verbose" + jupyter: true + version: "24.3.0" \ No newline at end of file diff --git a/common/helpers/cv2_img/setup.py b/common/helpers/cv2_img/setup.py index 3d1bf6317..cbb0a95d5 100644 --- a/common/helpers/cv2_img/setup.py +++ b/common/helpers/cv2_img/setup.py @@ -3,9 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['cv2_img'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["cv2_img"], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/helpers/cv2_img/src/cv2_img/__init__.py b/common/helpers/cv2_img/src/cv2_img/__init__.py index 822b2839a..962ef8bb6 100644 --- a/common/helpers/cv2_img/src/cv2_img/__init__.py +++ b/common/helpers/cv2_img/src/cv2_img/__init__.py @@ -20,7 +20,7 @@ def cv2_img_to_msg(img, stamp=None): msg.header.stamp = rospy.Time.now() if stamp is None else stamp msg.width = width msg.height = height - msg.encoding = 'bgr8' + msg.encoding = "bgr8" msg.is_bigendian = 1 msg.step = 3 * width msg.data = img.tobytes() @@ -37,13 +37,13 @@ def msg_to_pillow_img(msg: SensorImage): """ size = (msg.width, msg.height) - if msg.encoding in ['bgr8', '8UC3']: - img = Image.frombytes('RGB', size, msg.data, 'raw') + if msg.encoding in ["bgr8", "8UC3"]: + img = Image.frombytes("RGB", size, msg.data, "raw") # BGR => RGB img = Image.fromarray(np.array(img)[:, :, ::-1]) - elif msg.encoding == 'rgb8': - img = Image.frombytes('RGB', size, msg.data, 'raw') + elif msg.encoding == "rgb8": + img = Image.frombytes("RGB", size, msg.data, "raw") else: raise Exception("Unsupported format.") @@ -80,8 +80,7 @@ def extract_mask_region(frame, mask, expand_x=0.5, expand_y=0.5): # only requiring cv2 if we need it import cv2 - contours, _ = cv2.findContours( - mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if contours: largest_contour = max(contours, key=cv2.contourArea) @@ -99,6 +98,6 @@ def extract_mask_region(frame, mask, expand_x=0.5, expand_y=0.5): new_w = min(frame.shape[1] - x, new_w) new_h = min(frame.shape[0] - y, new_h) - face_region = frame[y:y+int(new_h), x:x+int(new_w)] + face_region = frame[y : y + int(new_h), x : x + int(new_w)] return face_region return None diff --git a/common/helpers/numpy2message/setup.py b/common/helpers/numpy2message/setup.py index d79792c72..9674e9cb9 100644 --- a/common/helpers/numpy2message/setup.py +++ b/common/helpers/numpy2message/setup.py @@ -4,8 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['numpy2message'], - package_dir={'': 'src'} + packages=["numpy2message"], package_dir={"": "src"} ) setup(**setup_args) diff --git a/common/helpers/numpy2message/src/numpy2message/__init__.py b/common/helpers/numpy2message/src/numpy2message/__init__.py index 7a330ca5f..328194092 100644 --- a/common/helpers/numpy2message/src/numpy2message/__init__.py +++ b/common/helpers/numpy2message/src/numpy2message/__init__.py @@ -8,7 +8,7 @@ def numpy2message(np_array: np.ndarray) -> list: return data, shape, dtype -def message2numpy(data:bytes, shape:list, dtype:str) -> np.ndarray: +def message2numpy(data: bytes, shape: list, dtype: str) -> np.ndarray: array_shape = tuple(shape) array_dtype = np.dtype(dtype) @@ -16,4 +16,3 @@ def message2numpy(data:bytes, shape:list, dtype:str) -> np.ndarray: deserialized_array = deserialized_array.reshape(array_shape) return deserialized_array - diff --git a/common/navigation/tiago_controllers/setup.py b/common/navigation/tiago_controllers/setup.py index dc87c2c70..2281d5bc1 100644 --- a/common/navigation/tiago_controllers/setup.py +++ b/common/navigation/tiago_controllers/setup.py @@ -5,8 +5,7 @@ # fetch values from package.xml setup_args = generate_distutils_setup( - packages=['tiago_controllers'], - package_dir={'': 'src'} + packages=["tiago_controllers"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/__init__.py b/common/navigation/tiago_controllers/src/tiago_controllers/__init__.py index f7a359cd5..7d9f18c34 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/__init__.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/__init__.py @@ -1,3 +1,2 @@ from .controllers.base_controller import BaseController from .controllers.head_controller import HeadController - diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/base_planner.py b/common/navigation/tiago_controllers/src/tiago_controllers/base_planner.py index b0a890f62..ecb2f0759 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/base_planner.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/base_planner.py @@ -2,7 +2,14 @@ import rospy import rosservice -from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, PoseStamped, Vector3 +from geometry_msgs.msg import ( + Pose, + PoseWithCovarianceStamped, + Point, + Quaternion, + PoseStamped, + Vector3, +) from std_msgs.msg import Header from nav_msgs.srv import GetPlan import numpy as np @@ -11,17 +18,21 @@ def make_plan(current_pose, x, y, tol=0.01, max_dist=None): - - if '/move_base/NavfnROS/make_plan' in rosservice.get_service_list(): - make_plan_service = '/move_base/NavfnROS/make_plan' - elif '/move_base/GlobalPlanner/make_plan' in rosservice.get_service_list(): - make_plan_service = '/move_base/GlobalPlanner/make_plan' + if "/move_base/NavfnROS/make_plan" in rosservice.get_service_list(): + make_plan_service = "/move_base/NavfnROS/make_plan" + elif "/move_base/GlobalPlanner/make_plan" in rosservice.get_service_list(): + make_plan_service = "/move_base/GlobalPlanner/make_plan" else: - rospy.loginfo('Failed to find the make_plan server') + rospy.loginfo("Failed to find the make_plan server") return False, None, None - - start = PoseStamped(header=Header(frame_id="map", stamp=rospy.Time.now()), pose=current_pose) - goal = PoseStamped(header=Header(frame_id="map", stamp=rospy.Time.now()), pose=Pose(position=Point(x, y, 0), orientation=current_pose.orientation)) + + start = PoseStamped( + header=Header(frame_id="map", stamp=rospy.Time.now()), pose=current_pose + ) + goal = PoseStamped( + header=Header(frame_id="map", stamp=rospy.Time.now()), + pose=Pose(position=Point(x, y, 0), orientation=current_pose.orientation), + ) rospy.wait_for_service(make_plan_service, timeout=5) make_plan_serv = rospy.ServiceProxy(make_plan_service, GetPlan) @@ -31,9 +42,9 @@ def make_plan(current_pose, x, y, tol=0.01, max_dist=None): dist = 0 # zips the poses like 1 and 2, 2 and 3, etc for current, next in zip(mp.poses, mp.poses[1:]): - dist+= euclidian_distance( + dist += euclidian_distance( (current.pose.position.x, current.pose.position.y), - (next.pose.position.x, next.pose.position.y) + (next.pose.position.x, next.pose.position.y), ) if dist > max_dist: return False, None, None @@ -43,7 +54,9 @@ def make_plan(current_pose, x, y, tol=0.01, max_dist=None): def draw_points(points): for x, y in points: - marker_pub = rospy.Publisher.publish("/visualization_marker", Marker, queue_size=2) + marker_pub = rospy.Publisher.publish( + "/visualization_marker", Marker, queue_size=2 + ) marker = Marker() marker.header.stamp = rospy.Time.now() marker.header.frame_id = "/map" @@ -77,8 +90,8 @@ def points_on_radius(x1, y1, radius=0.5): # print( "here is it: ", (x1 + 0.0, y1 - radius), " " , # (x1 - radius, y1 + 0.0), " ", # (x1 + radius, y1 + 0.0), " ", - # (x1 - radius, y1 - radius), " ", - # (x1 + radius, y1 - radius)) + # (x1 - radius, y1 - radius), " ", + # (x1 + radius, y1 - radius)) return [ (x1 + 0.0, y1 - radius), (x1 + 0.0, y1 + radius), @@ -90,37 +103,43 @@ def points_on_radius(x1, y1, radius=0.5): (x1 + radius, y1 - radius), ] + def euclidian_distance(p1, p2): x1, y1 = p1 x2, y2 = p2 a = np.array((x1, y1)) b = np.array((x2, y2)) - return np.linalg.norm(a-b) + return np.linalg.norm(a - b) + def plan_to_radius(current_pose, point, radius, tol=0.1): picked_points = [] points = None - for x,y in points_on_radius(*point, radius=radius): + for x, y in points_on_radius(*point, radius=radius): # print(f"the points are {x} , and {y}") success, point, points = make_plan(current_pose, x, y, tol) if success: - dist = euclidian_distance((current_pose.position.x, current_pose.position.y), (x,y)) + dist = euclidian_distance( + (current_pose.position.x, current_pose.position.y), (x, y) + ) picked_points.append([point, dist]) # draw_points(picked_points[0:]) - print("-"*30) + print("-" * 30) # if points: - # print(f"points of journey {points}") - # print(f"middle point of journey {points[round((len(points) - 1) / 2)].pose}") - picked_points = sorted(picked_points, key = lambda x: x[1]) + # print(f"points of journey {points}") + # print(f"middle point of journey {points[round((len(points) - 1) / 2)].pose}") + picked_points = sorted(picked_points, key=lambda x: x[1]) return [point[0] for point in picked_points] if picked_points else [] + NUMBER = 3 + def get_journey_points(current_pose, x, y, tol=0.01): success, point, points = make_plan(current_pose, x, y, tol) if len(points) > NUMBER + 2: - mid_points = [points[i] for i in range(points/NUMBER)] + mid_points = [points[i] for i in range(points / NUMBER)] else: mid_points = points return mid_points diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/arm_controller.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/arm_controller.py index 8f35a7e2a..6b9e4162a 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/arm_controller.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/arm_controller.py @@ -11,23 +11,36 @@ class ArmController: def __init__(self): - self._client = actionlib.SimpleActionClient('/arm_controller/follow_joint_trajectory', - FollowJointTrajectoryAction) + self._client = actionlib.SimpleActionClient( + "/arm_controller/follow_joint_trajectory", FollowJointTrajectoryAction + ) self._client.wait_for_server() print("done waiting") def _is_running(self): - return self._client.get_state() == GoalStatus.PENDING or self._client.get_state() == GoalStatus.ACTIVE + return ( + self._client.get_state() == GoalStatus.PENDING + or self._client.get_state() == GoalStatus.ACTIVE + ) - def sync_reach_to(self, joint1, join2, joint3, joint4, joint5, joint6, joint7, time_from_start=1): + def sync_reach_to( + self, joint1, join2, joint3, joint4, joint5, joint6, joint7, time_from_start=1 + ): """ Args: :param joint#: new position of joint :param time_from_start: time from start """ goal = FollowJointTrajectoryGoal() - goal.trajectory.joint_names = ['arm_1_joint', 'arm_2_joint', 'arm_3_joint', 'arm_4_joint', 'arm_5_joint', - 'arm_6_joint', 'arm_7_joint'] + goal.trajectory.joint_names = [ + "arm_1_joint", + "arm_2_joint", + "arm_3_joint", + "arm_4_joint", + "arm_5_joint", + "arm_6_joint", + "arm_7_joint", + ] points = JointTrajectoryPoint() points.positions = [joint1, join2, joint3, joint4, joint5, joint6, joint7] points.time_from_start = rospy.Duration(time_from_start) @@ -39,7 +52,9 @@ def sync_reach_to(self, joint1, join2, joint3, joint4, joint5, joint6, joint7, t return done and state def cancel_goal(self): - return cancel_goal(self, '/arm_controller/follow_joint_trajectory/cancel', self._client) + return cancel_goal( + self, "/arm_controller/follow_joint_trajectory/cancel", self._client + ) def get_client(self): return self._client @@ -49,4 +64,4 @@ def is_running(self): @staticmethod def get_joint_values(): - return get_joint_values("/arm_controller/query_state") \ No newline at end of file + return get_joint_values("/arm_controller/query_state") diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/base_controller.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/base_controller.py index 382d940fc..ccfdd559f 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/base_controller.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/base_controller.py @@ -18,6 +18,7 @@ import numpy as np import numpy as np + class BaseController: def __init__(self): self._goal_sent = False @@ -33,21 +34,32 @@ def cancel_goal(self): if state == GoalStatus.PENDING or state == GoalStatus.ACTIVE: self.__client.cancel_goal() while True: - if (self.__client.get_state() in [GoalStatus.PREEMPTED, GoalStatus.ABORTED, GoalStatus.REJECTED, - GoalStatus.RECALLED, GoalStatus.SUCCEEDED]): + if self.__client.get_state() in [ + GoalStatus.PREEMPTED, + GoalStatus.ABORTED, + GoalStatus.REJECTED, + GoalStatus.RECALLED, + GoalStatus.SUCCEEDED, + ]: break rospy.sleep(0.5) self._goal_sent = False def check_active_state(self): - return self._client.get_state() == GoalStatus.PENDING or self._client.get_state() == GoalStatus.ACTIVE + return ( + self._client.get_state() == GoalStatus.PENDING + or self._client.get_state() == GoalStatus.ACTIVE + ) def check_terminated_state(self): - return self._client.get_state() == GoalStatus.LOST or self._client.get_state() == GoalStatus.PREEMPTED or \ - self._client.get_state() == GoalStatus.ABORTED + return ( + self._client.get_state() == GoalStatus.LOST + or self._client.get_state() == GoalStatus.PREEMPTED + or self._client.get_state() == GoalStatus.ABORTED + ) def get_current_pose(self): - msg = rospy.wait_for_message('/amcl_pose', PoseWithCovarianceStamped) + msg = rospy.wait_for_message("/amcl_pose", PoseWithCovarianceStamped) x = round(msg.pose.pose.position.x, 2) y = round(msg.pose.pose.position.y, 2) quat = msg.pose.pose.orientation @@ -63,7 +75,10 @@ def get_status(self): return self.__client.get_state() def is_active(self): - return self.__client.get_state() == GoalStatus.PENDING or self.__client.get_state() == GoalStatus.ACTIVE + return ( + self.__client.get_state() == GoalStatus.PENDING + or self.__client.get_state() == GoalStatus.ACTIVE + ) def is_terminated(self): return is_terminated(self.__client) @@ -71,11 +86,16 @@ def is_terminated(self): def __to_pose(self, pose, done_cb=None): if pose: goal = MoveBaseGoal() - goal.target_pose.header.frame_id = 'map' + goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose = pose - rospy.loginfo("base is going to (%.2f, %.2f, %.2f) pose", pose.position.x, pose.position.y, pose.position.z) + rospy.loginfo( + "base is going to (%.2f, %.2f, %.2f) pose", + pose.position.x, + pose.position.y, + pose.position.z, + ) self._goal_sent = True self.__client.send_goal(goal, done_cb=done_cb) @@ -103,6 +123,7 @@ def compute_face_quat(self, x, y): theta_deg = np.degrees(math.atan2(dist_y, dist_x)) try: from scipy.spatial.transform import Rotation as R + (x, y, z, w) = R.from_euler("z", theta_deg, degrees=True).as_quat() quaternion = Quaternion(x, y, z, w) except ImportError: @@ -125,9 +146,11 @@ def ensure_sync_to_pose(self, pose, wait=60): state = self.sync_to_pose(pose) return state - def rotate_to_face_object(self, object_name='/door/pose'): + def rotate_to_face_object(self, object_name="/door/pose"): object_pos = get_pose_from_param(object_name) - rotation_angle = self.compute_face_quat(object_pos.position.x, object_pos.position.y) + rotation_angle = self.compute_face_quat( + object_pos.position.x, object_pos.position.y + ) state = self.sync_to_pose(rotation_angle) return state @@ -150,19 +173,29 @@ def rotate_to_face_object(self, object_name='/door/pose'): def rotate(self, radians): x, y, current_orientation = self.get_current_pose() - current_orientation = np.array([current_orientation.x, current_orientation.y, - current_orientation.z, current_orientation.w]) + current_orientation = np.array( + [ + current_orientation.x, + current_orientation.y, + current_orientation.z, + current_orientation.w, + ] + ) r = R.from_quat(current_orientation) - rotated_r = r * R.from_euler('z', radians, degrees=False) + rotated_r = r * R.from_euler("z", radians, degrees=False) - pose = Pose(position=Point(x, y, 0.0), orientation=Quaternion(*rotated_r.as_quat())) + pose = Pose( + position=Point(x, y, 0.0), orientation=Quaternion(*rotated_r.as_quat()) + ) return self.sync_to_pose(pose) class CmdVelController: def __init__(self): - self._vel_pub = rospy.Publisher('mobile_base_controller/cmd_vel', Twist, queue_size=1) + self._vel_pub = rospy.Publisher( + "mobile_base_controller/cmd_vel", Twist, queue_size=1 + ) # rospy.sleep(0.1) # wait for publisher to activate def rotate(self, angular_velocity, angle, clockwise): @@ -213,11 +246,10 @@ def plan_to_radius(self, x, y, radius=0.5, tol=0.1): robot_x, robot_y, quaternion = self.get_current_pose() return _plan_to_radius( - Pose( - position=Point(robot_x, robot_y, 0.0), - orientation=quaternion - ), - (x, y), radius, tol + Pose(position=Point(robot_x, robot_y, 0.0), orientation=quaternion), + (x, y), + radius, + tol, ) def sync_face_to(self, x, y): @@ -237,8 +269,7 @@ def compute_face_quat(self, x, y): return pose - -if __name__ == '__main__': +if __name__ == "__main__": rospy.init_node("base_test", anonymous=True) b = BaseController() b.sync_face_to(2.84, 6.7) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/controllers.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/controllers.py index f8fdf4eb9..ecc685f56 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/controllers.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/controllers.py @@ -1,12 +1,17 @@ #!/usr/bin/env python3 -from tiago_controllers.controllers import BaseController, HeadController, TorsoController +from tiago_controllers.controllers import ( + BaseController, + HeadController, + TorsoController, +) + class Controllers: """ - Class to merge all controllers together + Class to merge all controllers together """ def __init__(self): self.base_controller = BaseController() self.head_controller = HeadController() - self.torso_controller = TorsoController() \ No newline at end of file + self.torso_controller = TorsoController() diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/gripper_controller.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/gripper_controller.py index bd6d66b0f..dc2ec9594 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/gripper_controller.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/gripper_controller.py @@ -10,13 +10,17 @@ class GripperController: JOINT_MAX = 0.045 def __init__(self): - self._client = actionlib.SimpleActionClient("gripper_controller/follow_joint_trajectory", - FollowJointTrajectoryAction) + self._client = actionlib.SimpleActionClient( + "gripper_controller/follow_joint_trajectory", FollowJointTrajectoryAction + ) self._client.wait_for_server() def sync_reach_to(self, joint1, joint2=None, time_from_start=1): goal = FollowJointTrajectoryGoal() - goal.trajectory.joint_names = ["gripper_left_finger_joint", "gripper_right_finger_joint"] + goal.trajectory.joint_names = [ + "gripper_left_finger_joint", + "gripper_right_finger_joint", + ] point = JointTrajectoryPoint() if not joint2: joint2 = joint1 @@ -31,10 +35,12 @@ def sync_reach_to(self, joint1, joint2=None, time_from_start=1): return state def cancel_goal(self): - return cancel_goal(self, '/torso_controller/follow_joint_trajectory/cancel', self._client) + return cancel_goal( + self, "/torso_controller/follow_joint_trajectory/cancel", self._client + ) def is_running(self): return is_running(self._client) def get_gripper_state(self): - return get_joint_values('/gripper_controller/query_state') + return get_joint_values("/gripper_controller/query_state") diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/head_controller.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/head_controller.py index 9a172bdaf..03b8983e6 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/head_controller.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/head_controller.py @@ -7,13 +7,17 @@ from tiago_controllers.helpers import get_joint_values, is_running, cancel_goal from pal_common_msgs.msg import DisableActionGoal, DisableAction + class HeadController: def __init__(self): - self._client = actionlib.SimpleActionClient("/head_controller/follow_joint_trajectory", - FollowJointTrajectoryAction) + self._client = actionlib.SimpleActionClient( + "/head_controller/follow_joint_trajectory", FollowJointTrajectoryAction + ) self._client.wait_for_server() - if rospy.get_published_topics(namespace='/pal_head_manager'): - self._client_disable_head = actionlib.SimpleActionClient('/pal_head_manager/disable', DisableAction) + if rospy.get_published_topics(namespace="/pal_head_manager"): + self._client_disable_head = actionlib.SimpleActionClient( + "/pal_head_manager/disable", DisableAction + ) def get_client(self): return self._client @@ -22,7 +26,9 @@ def is_running(self): return is_running(self._client) def cancel_goal(self): - return cancel_goal(self, '/head_controller/follow_joint_trajectory/cancel', self._client) + return cancel_goal( + self, "/head_controller/follow_joint_trajectory/cancel", self._client + ) def __go_to_position(self, joint1, joint2, time_from_start=1, velocities=None): """ @@ -42,11 +48,13 @@ def __go_to_position(self, joint1, joint2, time_from_start=1, velocities=None): goal.trajectory.points.append(point) self._client.send_goal(goal) - + def async_reach_to(self, joint1, joint2, time_from_start=1, velocities=None): self.__go_to_position(joint1, joint2, time_from_start, velocities) - def sync_reach_to(self, joint1, joint2, time_from_start=1, wait=10, velocities=None): + def sync_reach_to( + self, joint1, joint2, time_from_start=1, wait=10, velocities=None + ): self.__go_to_position(joint1, joint2, time_from_start, velocities) done = self._client.wait_for_result(rospy.Duration(wait)) state = self._client.get_state() @@ -54,13 +62,13 @@ def sync_reach_to(self, joint1, joint2, time_from_start=1, wait=10, velocities=N def nod_head(self): current_pos = self.current_joint_values() - self.sync_reach_to(0.5,0) + self.sync_reach_to(0.5, 0) self.sync_reach_to(current_pos[0], current_pos[1]) @staticmethod def get_joint_values(): return get_joint_values("/head_controller/query_state") - + def activate_head(self): self._client_disable_head.wait_for_server() self._client_disable_head.cancel_goal() @@ -77,12 +85,13 @@ def look_down(self): self.sync_reach_to(0.0, -1.0) def look_right(self): - self.sync_reach_to(-1,0.0) + self.sync_reach_to(-1, 0.0) def look_left(self): self.sync_reach_to(1, 0.0) -if __name__ == '__main__': + +if __name__ == "__main__": rospy.init_node("head_test", anonymous=True) _head = HeadController() _head.sync_reach_to(0, 0) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at.py index ad504dd75..46b1aa1be 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at.py @@ -1,7 +1,10 @@ #!/usr/bin/env python3 import rospy -from tiago_controllers.controllers.base_controller import BaseController, CmdVelController +from tiago_controllers.controllers.base_controller import ( + BaseController, + CmdVelController, +) from tiago_controllers.controllers.head_controller import HeadController from sensor_msgs.msg import Image from lasr_object_detection_yolo.detect_objects_v8 import detect_objects @@ -18,8 +21,14 @@ class LookAt: - - def __init__(self, head_controller: HeadController, base_controller: BaseController, cmd_vel_controller, object: str, model="coco"): + def __init__( + self, + head_controller: HeadController, + base_controller: BaseController, + cmd_vel_controller, + object: str, + model="coco", + ): self._base_controller = base_controller self._head_controller = head_controller self._cmd_vel_controller = cmd_vel_controller @@ -28,22 +37,22 @@ def __init__(self, head_controller: HeadController, base_controller: BaseControl def look_at(self, xywh, head_rot): self.turn_to(xywh, head_rot) - self._head_controller.sync_reach_to(0,0) + self._head_controller.sync_reach_to(0, 0) self.aim_head() def turn_to(self, xywh, head_rot): - x = xywh[0]-(xywh[3]/2) + x = xywh[0] - (xywh[3] / 2) is_clockwise = True - if x + (-100*head_rot) < 0: + if x + (-100 * head_rot) < 0: print("Counter clockwise ------------------") is_clockwise = False - angle = atan(DISTANCE/(PIXEL_DIST_CONST_MM*abs(x))) + angle = atan(DISTANCE / (PIXEL_DIST_CONST_MM * abs(x))) print("ANGLE: ", angle) self._cmd_vel_controller.rotate(70, angle, is_clockwise) def aim_head(self): robot_pos = np.array(self._base_controller.get_current_pose()[:2]) - img_sensor_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) + img_sensor_msg = rospy.wait_for_message("/xtion/rgb/image_raw", Image) hum_centroid = [img_sensor_msg.height // 2, img_sensor_msg.width // 2] hum_centroid[0], hum_centroid[1], xywh = self.__human_detected(robot_pos) tmp_hum_x, tmp_hum_y, tmp_xywh = self.__human_detected(robot_pos) @@ -52,29 +61,38 @@ def aim_head(self): hum_centroid[0] = tmp_hum_x hum_centroid[1] = tmp_hum_y print(f"moving head and the centroids") - self.__move_robot_head(hum_centroid[0], hum_centroid[1], tmp_xywh, robot_pos) + self.__move_robot_head( + hum_centroid[0], hum_centroid[1], tmp_xywh, robot_pos + ) - def __move_robot_head(self, x, y, _xywh, robot_pos): + def __move_robot_head(self, x, y, _xywh, robot_pos): try: - hum_x, hum_y, xywh = x,y, _xywh - img_sensor_msg = rospy.wait_for_message('/xtion/rgb/image_raw', Image) + hum_x, hum_y, xywh = x, y, _xywh + img_sensor_msg = rospy.wait_for_message("/xtion/rgb/image_raw", Image) rob_h, rob_w = (img_sensor_msg.height // 2), (img_sensor_msg.width // 2) - print("-"*40) + print("-" * 40) print(f"the robots pos is h = {rob_h}, and the w = {rob_w}") - print(f"the human pos is h = {xywh[3]}, and the w = {xywh[2]}, and all of xywh = {xywh}") + print( + f"the human pos is h = {xywh[3]}, and the w = {xywh[2]}, and all of xywh = {xywh}" + ) i = 0 moving_incr_x = 0.0 moving_incr_y = 0.0 - while abs(xywh[0] + (xywh[2]//2) - rob_w) > 50 and abs(xywh[1] + xywh[3] - rob_h) > 25: - print(f"in the while and {abs(xywh[0] + (xywh[2]//2) - rob_w)} and the y axis {abs(xywh[1] + xywh[2] - rob_h)} is bigger than 25") + while ( + abs(xywh[0] + (xywh[2] // 2) - rob_w) > 50 + and abs(xywh[1] + xywh[3] - rob_h) > 25 + ): + print( + f"in the while and {abs(xywh[0] + (xywh[2]//2) - rob_w)} and the y axis {abs(xywh[1] + xywh[2] - rob_h)} is bigger than 25" + ) moving_incr_x, moving_incr_y = self.check_moving_side( - xywh[0] + (xywh[2]//2), - xywh[1] + (xywh[3]//2), + xywh[0] + (xywh[2] // 2), + xywh[1] + (xywh[3] // 2), rob_w, rob_h, moving_incr_x, - moving_incr_y + moving_incr_y, ) if moving_incr_x is None: return @@ -87,7 +105,9 @@ def __move_robot_head(self, x, y, _xywh, robot_pos): except: rospy.logwarn("I cound NOT reach that head position") - def check_moving_side(self, hum_x, hum_y, rob_x, rob_y, _moving_incr_x, _moving_incr_y): + def check_moving_side( + self, hum_x, hum_y, rob_x, rob_y, _moving_incr_x, _moving_incr_y + ): try: if abs(_moving_incr_x) < 1.20 and 0.70 > _moving_incr_y > -0.9: if rob_x > hum_x: diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at_point.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at_point.py index bb350d4bd..9d9862381 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at_point.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/look_at_point.py @@ -21,6 +21,7 @@ # Intrinsic parameters of the camera camera_intrinsics = None + # Callback for every new image received def image_callback(img_msg): global camera_intrinsics @@ -32,11 +33,14 @@ def image_callback(img_msg): cv2.waitKey(0) camera_intrinsics = cv2.getOptimalNewCameraMatrix( - camera_intrinsics, camera_intrinsics, (img_msg.width, img_msg.height), 0) + camera_intrinsics, camera_intrinsics, (img_msg.width, img_msg.height), 0 + ) if camera_intrinsics is not None: u, v = cv2.selectROI(cv_img) - rospy.loginfo("Pixel selected ({}, {}). Making TIAGo look to that direction".format(u, v)) + rospy.loginfo( + "Pixel selected ({}, {}). Making TIAGo look to that direction".format(u, v) + ) point_stamped = PointStamped() point_stamped.header.frame_id = camera_frame @@ -61,12 +65,15 @@ def image_callback(img_msg): point_head_client.send_goal(goal) rospy.sleep(0.5) + # OpenCV callback function for mouse events on a window def onMouse(event, u, v, flags, param): if event != cv2.EVENT_LBUTTONDOWN: return - rospy.loginfo("Pixel selected ({}, {}). Making TIAGo look in that direction".format(u, v)) + rospy.loginfo( + "Pixel selected ({}, {}). Making TIAGo look in that direction".format(u, v) + ) point_stamped = PointStamped() point_stamped.header.frame_id = camera_frame @@ -92,24 +99,33 @@ def onMouse(event, u, v, flags, param): point_head_client.send_goal(goal) rospy.sleep(0.5) + # Set mouse handler for the window cv2.setMouseCallback(window_name, onMouse) if __name__ == "__main__": - rospy.init_node('look_to_point') + rospy.init_node("look_to_point") rospy.loginfo("Starting look_to_point application ...") # Create a ROS action client to move TIAGo's head - point_head_client = SimpleActionClient("/head_controller/point_head_action", PointHeadAction) + point_head_client = SimpleActionClient( + "/head_controller/point_head_action", PointHeadAction + ) rospy.loginfo("Creating action client to head controller ...") iterations = 0 max_iterations = 3 - while (not point_head_client.wait_for_server(rospy.Duration(2.0))) and rospy.is_shutdown() and iterations < max_iterations: + while ( + (not point_head_client.wait_for_server(rospy.Duration(2.0))) + and rospy.is_shutdown() + and iterations < max_iterations + ): rospy.logdebug("Waiting for the point_head_action server to come up") iterations += 1 if iterations == max_iterations: - raise RuntimeError("Error in createPointHeadClient: head controller action server not available") + raise RuntimeError( + "Error in createPointHeadClient: head controller action server not available" + ) # Create the window to show TIAGo's camera images cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) @@ -122,7 +138,9 @@ def onMouse(event, u, v, flags, param): transport_hint = TransportHints("compressed") rospy.loginfo("Subscribing to {} ...".format(image_topic)) - image_subscriber = image_transporter.subscribe(image_topic, Image, image_callback, transport_hint) + image_subscriber = image_transporter.subscribe( + image_topic, Image, image_callback, transport_hint + ) # Enter a loop that processes ROS callbacks. Press CTRL+C to exit the loop rospy.spin() diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/torso_controller.py b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/torso_controller.py index 0fbfc8b78..5c86108bc 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/controllers/torso_controller.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/controllers/torso_controller.py @@ -3,18 +3,24 @@ from __future__ import print_function import rospy import actionlib -from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal, JointTrajectoryControllerState +from control_msgs.msg import ( + FollowJointTrajectoryAction, + FollowJointTrajectoryGoal, + JointTrajectoryControllerState, +) from trajectory_msgs.msg import JointTrajectoryPoint from tiago_controllers.helpers import get_joint_values, is_running, cancel_goal # TODO: refactor sync and async + class TorsoController: JOINT_MAX = 0.35 def __init__(self): - self._client = actionlib.SimpleActionClient("torso_controller/follow_joint_trajectory", - FollowJointTrajectoryAction) + self._client = actionlib.SimpleActionClient( + "torso_controller/follow_joint_trajectory", FollowJointTrajectoryAction + ) self._client.wait_for_server() def get_client(self): @@ -41,7 +47,9 @@ def async_reach_to(self, joint1): self._client.send_goal(goal) def cancel_goal(self): - return cancel_goal(self, '/torso_controller/follow_joint_trajectory/cancel', self._client) + return cancel_goal( + self, "/torso_controller/follow_joint_trajectory/cancel", self._client + ) def is_running(self): return is_running(self._client) @@ -49,4 +57,3 @@ def is_running(self): @staticmethod def get_joint_values(): return get_joint_values("/torso_controller/query_state") - diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/arm_server_helpers.py b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/arm_server_helpers.py index ad4ae26ef..8ddbb7e5d 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/arm_server_helpers.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/arm_server_helpers.py @@ -5,9 +5,9 @@ def clear_octomap(): - rospy.wait_for_service('/clear_octomap') + rospy.wait_for_service("/clear_octomap") try: - clear = rospy.ServiceProxy('/clear_octomap', Empty) + clear = rospy.ServiceProxy("/clear_octomap", Empty) clear() except rospy.ServiceException as e: print("Service call failed: %s" % e) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/client_state.py b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/client_state.py index f64b9a821..bb701c1be 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/client_state.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/client_state.py @@ -6,7 +6,10 @@ def is_running(client): - return client.get_state() == GoalStatus.PENDING or client.get_state() == GoalStatus.ACTIVE + return ( + client.get_state() == GoalStatus.PENDING + or client.get_state() == GoalStatus.ACTIVE + ) def cancel_goal(topic, client): @@ -14,8 +17,16 @@ def cancel_goal(topic, client): pub = rospy.Publisher(topic, GoalID, queue_size=1) goal_id = GoalID() pub.publish(goal_id) - while not (client.get_state() in [GoalStatus.PREEMPTED, GoalStatus.ABORTED, GoalStatus.REJECTED, - GoalStatus.RECALLED, GoalStatus.SUCCEEDED]): + while not ( + client.get_state() + in [ + GoalStatus.PREEMPTED, + GoalStatus.ABORTED, + GoalStatus.REJECTED, + GoalStatus.RECALLED, + GoalStatus.SUCCEEDED, + ] + ): print("canceling the goal...") rospy.sleep(0.5) except AttributeError: @@ -23,6 +34,10 @@ def cancel_goal(topic, client): except Exception: print("you need a valid topic to cancel the goal") + def is_terminated(client): - return client.get_state() == GoalStatus.LOST or client.get_state() == GoalStatus.PREEMPTED or \ - client.get_state() == GoalStatus.ABORTED \ No newline at end of file + return ( + client.get_state() == GoalStatus.LOST + or client.get_state() == GoalStatus.PREEMPTED + or client.get_state() == GoalStatus.ABORTED + ) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/movement_helpers.py b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/movement_helpers.py index 45ee33351..69e5416e1 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/movement_helpers.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/movement_helpers.py @@ -22,9 +22,11 @@ def _get_halfway() -> PoseStamped: else: return path[int(len(path) / 2)] + def _is_point_in_rect(bl, tr, p): return bl[0] < p[0] < tr[0] and bl[1] < p[1] < tr[1] + def _is_halfway(base_controller) -> bool: pose = _get_halfway() if pose == PoseStamped(): @@ -32,10 +34,13 @@ def _is_halfway(base_controller) -> bool: else: tol = 1.0 x, y = pose.pose.position.x, pose.pose.position.y - return _is_point_in_rect([x - tol, y - tol], [x + tol, y + tol], base_controller.get_pose()) + return _is_point_in_rect( + [x - tol, y - tol], [x + tol, y + tol], base_controller.get_pose() + ) + def move_to_position(is_sync, position, base_controller): - is_real_life = rospy.get_published_topics(namespace='/pal_head_manager') + is_real_life = rospy.get_published_topics(namespace="/pal_head_manager") rospy.sleep(2) if is_real_life and is_sync: print("is real life") @@ -50,6 +55,7 @@ def move_to_position(is_sync, position, base_controller): place = position + "_simulation" base_controller.async_to_pose(get_pose_from_param(place)) + def move_breath_and_search(controllers: Controllers, position: str): base_controller = controllers.base_controller move_to_position(is_sync=False, position=position, base_controller=base_controller) @@ -64,13 +70,21 @@ def move_breath_and_search(controllers: Controllers, position: str): is_halfway = True elif not is_running(head_controller.get_client()): if is_head_right: - head_controller.async_reach_to(HEAD_TWIST, -1, time_from_start=HEAD_TIME) + head_controller.async_reach_to( + HEAD_TWIST, -1, time_from_start=HEAD_TIME + ) else: - head_controller.async_reach_to(-1 * HEAD_TWIST, -1, time_from_start=HEAD_TIME) + head_controller.async_reach_to( + -1 * HEAD_TWIST, -1, time_from_start=HEAD_TIME + ) is_head_right = not is_head_right rospy.sleep(0.1) if not is_running(torso_controller.get_client()): - torso_controller.async_reach_to(TORSO_LOW) if is_torso_up else torso_controller.async_reach_to(TORSO_HIGH) + ( + torso_controller.async_reach_to(TORSO_LOW) + if is_torso_up + else torso_controller.async_reach_to(TORSO_HIGH) + ) is_torso_up = not is_torso_up rospy.sleep(0.1) torso_controller.async_reach_to(TORSO_HIGH) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/nav_map_helpers.py b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/nav_map_helpers.py index fa8da8df9..01f404495 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/nav_map_helpers.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/nav_map_helpers.py @@ -8,6 +8,7 @@ from markers.markers_helpers import create_point_marker from common_math.math_ import euclidian_distance + def get_dist_to_door(is_robot, x=None, y=None): if is_robot: robot_pose = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped) @@ -23,22 +24,25 @@ def get_dist_to_door(is_robot, x=None, y=None): dist = euclidian_distance(r, d) print(f"distance to door: {dist}") return dist + + def clear_costmap(): """ - Clears costmap using clear_octomap server + Clears costmap using clear_octomap server """ - rospy.loginfo('waiting for clear_costmap') - rospy.wait_for_service('/move_base/clear_costmaps', timeout=10) + rospy.loginfo("waiting for clear_costmap") + rospy.wait_for_service("/move_base/clear_costmaps", timeout=10) try: - _clear_costmap = rospy.ServiceProxy('/move_base/clear_costmaps', Empty) + _clear_costmap = rospy.ServiceProxy("/move_base/clear_costmaps", Empty) response = _clear_costmap() - rospy.loginfo('clearing costmap done!') + rospy.loginfo("clearing costmap done!") return response except rospy.ServiceException as e: print("Service call failed: %s" % e) -def how_close_to_object(object_name='/door/pose'): + +def how_close_to_object(object_name="/door/pose"): """ Checks how close the robot is to the object stored in param Parameters @@ -56,7 +60,9 @@ def how_close_to_object(object_name='/door/pose'): d = (door_position.position.x, door_position.position.y) dist = euclidian_distance(r, d) return dist -def is_close_to_object(object_name='/door/pose', min_dist=0.5): + + +def is_close_to_object(object_name="/door/pose", min_dist=0.5): """ Checks if the robot is close to the object stored in param compared to the min_dist Parameters @@ -70,16 +76,23 @@ def is_close_to_object(object_name='/door/pose', min_dist=0.5): """ dist = how_close_to_object(object_name) return dist < min_dist + + def get_current_robot_pose(): - robot_pose = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped) - return robot_pose.pose.pose + robot_pose = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped) + return robot_pose.pose.pose + + def get_random_rgb(): import random + r = random.randint(0, 255) g = random.randint(0, 255) b = random.randint(0, 255) print(f"Random RGB: ({r}, {g}, {b})") return (r, g, b), "This is {}, {}, {}".format(r, g, b) + + def rank(points_name="lift/centers"): centers = rospy.get_param(points_name) print("centers") @@ -113,6 +126,8 @@ def rank(points_name="lift/centers"): return True else: return False + + def counter(topic="/counter_lift/counter", count_default=3): count = rospy.get_param(topic) rospy.loginfo("count: " + str(topic) + "---> " + str(count)) @@ -124,13 +139,14 @@ def counter(topic="/counter_lift/counter", count_default=3): count += 1 rospy.set_param(topic, count) -def play_motion_goal(client, motion_name='home'): - client.wait_for_server() - rospy.sleep(0.5) - goal = PlayMotionGoal() - goal.motion_name = motion_name - client.send_goal(goal) - result = client.wait_for_result() - state = client.get_state() - return result, state \ No newline at end of file +def play_motion_goal(client, motion_name="home"): + client.wait_for_server() + rospy.sleep(0.5) + goal = PlayMotionGoal() + + goal.motion_name = motion_name + client.send_goal(goal) + result = client.wait_for_result() + state = client.get_state() + return result, state diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_helpers.py b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_helpers.py index 978cf4c95..a1b1cd3dc 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_helpers.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_helpers.py @@ -7,19 +7,20 @@ def get_pose_from_param(name): return None pose = rospy.get_param(name) print(pose) - return Pose(Point(pose['position']['x'], - pose['position']['y'], - pose['position']['z']), - Quaternion(pose['orientation']['x'], - pose['orientation']['y'], - pose['orientation']['z'], - pose['orientation']['w'])) + return Pose( + Point(pose["position"]["x"], pose["position"]["y"], pose["position"]["z"]), + Quaternion( + pose["orientation"]["x"], + pose["orientation"]["y"], + pose["orientation"]["z"], + pose["orientation"]["w"], + ), + ) -def get_pose_from_param_new(name, col='location'): + +def get_pose_from_param_new(name, col="location"): if not rospy.has_param(name): return None pose = rospy.get_param(name) position, orientation = pose[col]["position"], pose[col]["orientation"] return Pose(position=Point(**position), orientation=Quaternion(**orientation)) - - diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_recorder.py b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_recorder.py index 49c2b910a..a219c94b9 100644 --- a/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_recorder.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/helpers/pose_recorder.py @@ -5,39 +5,59 @@ from geometry_msgs.msg import PoseStamped import yaml + class PoseRecorder: def __init__(self): self.points = [] self.num_points = 0 - self.clicked_point_sub = rospy.Subscriber('/clicked_point', PoseStamped, self.clicked_point_callback) - self.amcl_pose_sub = rospy.Subscriber('/amcl_pose', PoseStamped, self.amcl_pose_callback) + self.clicked_point_sub = rospy.Subscriber( + "/clicked_point", PoseStamped, self.clicked_point_callback + ) + self.amcl_pose_sub = rospy.Subscriber( + "/amcl_pose", PoseStamped, self.amcl_pose_callback + ) def clicked_point_callback(self, msg): - self.points.append({'pose': {'position': {'x': msg.pose.position.x, 'y': msg.pose.position.y, 'z': msg.pose.position.z}, - 'orientation': {'x': msg.pose.orientation.x, 'y': msg.pose.orientation.y, - 'z': msg.pose.orientation.z, 'w': msg.pose.orientation.w}}}) + self.points.append( + { + "pose": { + "position": { + "x": msg.pose.position.x, + "y": msg.pose.position.y, + "z": msg.pose.position.z, + }, + "orientation": { + "x": msg.pose.orientation.x, + "y": msg.pose.orientation.y, + "z": msg.pose.orientation.z, + "w": msg.pose.orientation.w, + }, + } + } + ) self.num_points += 1 def amcl_pose_callback(self, msg): if self.num_points > 0: - self.points[-1]['name'] = 'point{}'.format(self.num_points) + self.points[-1]["name"] = "point{}".format(self.num_points) def save_to_yaml(self, request): if self.num_points == 0: return EmptyResponse() - data = {'points': self.points} + data = {"points": self.points} - with open('output.yaml', 'w') as yaml_file: + with open("output.yaml", "w") as yaml_file: yaml.dump(data, yaml_file, default_flow_style=False) - rospy.loginfo('Saved {} points to output.yaml'.format(self.num_points)) + rospy.loginfo("Saved {} points to output.yaml".format(self.num_points)) return EmptyResponse() -if __name__ == '__main__': - rospy.init_node('pose_recorder') + +if __name__ == "__main__": + rospy.init_node("pose_recorder") pose_recorder = PoseRecorder() - save_service = rospy.Service('save_pose_to_yaml', Empty, pose_recorder.save_to_yaml) + save_service = rospy.Service("save_pose_to_yaml", Empty, pose_recorder.save_to_yaml) rospy.spin() diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/services/arm_torso_srv.py b/common/navigation/tiago_controllers/src/tiago_controllers/services/arm_torso_srv.py index e657178bb..6568792b1 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/services/arm_torso_srv.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/services/arm_torso_srv.py @@ -13,10 +13,14 @@ def __call__(self, req): raise rospy.ServiceException("Invalid request") else: if not req.plan: - res = self._arm_torso_controller.sync_reach_joint_space(req.torso_goals, req.arm_goals) + res = self._arm_torso_controller.sync_reach_joint_space( + req.torso_goals, req.arm_goals + ) else: # self._arm_torso_controller.clear_octomap() - res = self._arm_torso_controller.execute_plan(req.torso_goals, req.arm_goals) + res = self._arm_torso_controller.execute_plan( + req.torso_goals, req.arm_goals + ) if res is None: raise rospy.ServiceException("Wrong request input") response = ArmTorsoPosResponse() @@ -26,11 +30,11 @@ def __call__(self, req): if __name__ == "__main__": rospy.init_node("arm_torso_controller_server") - if rospy.get_published_topics(namespace='/xtion'): + if rospy.get_published_topics(namespace="/xtion"): server = ArmTorsoControllerSrv() - service = rospy.Service('arm_torso_controller', ArmTorsoPos, server) + service = rospy.Service("arm_torso_controller", ArmTorsoPos, server) rospy.loginfo("Arm Torso Controller Server initialised") rospy.spin() else: - print('*'*50, "Arm Torso service NOT enables", '*'*50) + print("*" * 50, "Arm Torso service NOT enables", "*" * 50) rospy.spin() diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/services/moveit_arm_controller.py b/common/navigation/tiago_controllers/src/tiago_controllers/services/moveit_arm_controller.py index a3fabbabf..98d47a9c2 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/services/moveit_arm_controller.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/services/moveit_arm_controller.py @@ -17,10 +17,12 @@ def __init__(self): self._scene = moveit_commander.PlanningSceneInterface() _group_name = "arm_torso" self._group_names = self._robot.get_group_names() - self._move_group = moveit_commander.MoveGroupCommander(name=_group_name, wait_for_servers=0) - self._display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', - DisplayTrajectory, - queue_size=20) + self._move_group = moveit_commander.MoveGroupCommander( + name=_group_name, wait_for_servers=0 + ) + self._display_trajectory_publisher = rospy.Publisher( + "/move_group/display_planned_path", DisplayTrajectory, queue_size=20 + ) def get_joint_values(self): return self._move_group.get_current_joint_values() @@ -97,17 +99,17 @@ def get_base_info(self): print("") def clear_octomap(self): - rospy.wait_for_service('/clear_octomap') + rospy.wait_for_service("/clear_octomap") try: - clear = rospy.ServiceProxy('/clear_octomap', Empty) + clear = rospy.ServiceProxy("/clear_octomap", Empty) clear() except rospy.ServiceException as e: print("Service call failed: %s" % e) -if __name__ == '__main__': +if __name__ == "__main__": rospy.init_node("move_group_python_interface_tutorial", anonymous=True) - if rospy.get_published_topics(namespace='/move_group/display_planned_path'): + if rospy.get_published_topics(namespace="/move_group/display_planned_path"): a = ArmTorsoController() a.get_base_info() a.sync_reach_joint_space(0.1, INITIAL_POSITION) @@ -115,4 +117,4 @@ def clear_octomap(self): a.clear_octomap() a.execute_plan(0.15, GRAB_OBJECT) else: - rospy.loginfo('*'*50, "You are not running TIAGO", '*'*50) + rospy.loginfo("*" * 50, "You are not running TIAGO", "*" * 50) diff --git a/common/navigation/tiago_controllers/src/tiago_controllers/speed_configuration.py b/common/navigation/tiago_controllers/src/tiago_controllers/speed_configuration.py index f427086c7..f05fc97fa 100755 --- a/common/navigation/tiago_controllers/src/tiago_controllers/speed_configuration.py +++ b/common/navigation/tiago_controllers/src/tiago_controllers/speed_configuration.py @@ -9,10 +9,12 @@ NORMAL = 0.0, 0.5 SLOW = 0.1, 0.3 -class SpeedConfiguration(): + +class SpeedConfiguration: def __init__(self, speed_mode): - self.client = dynamic_reconfigure.client.Client("/move_base/PalLocalPlanner", timeout=30, - config_callback=self.callback()) + self.client = dynamic_reconfigure.client.Client( + "/move_base/PalLocalPlanner", timeout=30, config_callback=self.callback() + ) if speed_mode == Speed.FAST: self.change_robot_speed("min_vel_x", FAST[0]) @@ -28,22 +30,42 @@ def callback(config): pass def is_within_tol(self, value, default=0.0): - return abs(value) <= abs(default * MAX_CHANGE) if default != 0.0 else abs(value) <= abs(default + MAX_CHANGE) + return ( + abs(value) <= abs(default * MAX_CHANGE) + if default != 0.0 + else abs(value) <= abs(default + MAX_CHANGE) + ) def change_robot_speed(self, param, value, tol=0.1): try: - rospy.logwarn(f"Config before change of {param} set to {self.client.get_configuration()[param]}") + rospy.logwarn( + f"Config before change of {param} set to {self.client.get_configuration()[param]}" + ) print(" ") - min_param = [x for x in self.client.get_parameter_descriptions() if x['name'].lower() == param.lower()][ - 0].get('min') - max_param = [x for x in self.client.get_parameter_descriptions() if x['name'].lower() == param.lower()][ - 0].get('max') - type_param = [x for x in self.client.get_parameter_descriptions() if x['name'].lower() == param.lower()][ - 0].get('type') - default = [x for x in self.client.get_parameter_descriptions() if x['name'].lower() == param.lower()][ - 0].get('default') + min_param = [ + x + for x in self.client.get_parameter_descriptions() + if x["name"].lower() == param.lower() + ][0].get("min") + max_param = [ + x + for x in self.client.get_parameter_descriptions() + if x["name"].lower() == param.lower() + ][0].get("max") + type_param = [ + x + for x in self.client.get_parameter_descriptions() + if x["name"].lower() == param.lower() + ][0].get("type") + default = [ + x + for x in self.client.get_parameter_descriptions() + if x["name"].lower() == param.lower() + ][0].get("default") # rospy.loginfo(f"check bounds of min_param= {min_param}, max_param = {max_param}, default_value= {default}, type_para, = {type_param} ...") - if not max_param > value > min_param or not self.is_within_tol(value, default):#or type(value) != type_param + if not max_param > value > min_param or not self.is_within_tol( + value, default + ): # or type(value) != type_param rospy.logwarn(f" Out of bounds value and setting it to default") self.client.update_configuration({param: default}) else: @@ -53,7 +75,6 @@ def change_robot_speed(self, param, value, tol=0.1): rospy.logwarn(f"cannot change the value of {param} with {value}") - -if __name__ == '__main__': +if __name__ == "__main__": rospy.init_node("speed_configuration", anonymous=True) - _speed = SpeedConfiguration(Speed.NORMAL) \ No newline at end of file + _speed = SpeedConfiguration(Speed.NORMAL) diff --git a/common/navigation/unsafe_traversal/setup.py b/common/navigation/unsafe_traversal/setup.py index 385966ebc..3658606ef 100644 --- a/common/navigation/unsafe_traversal/setup.py +++ b/common/navigation/unsafe_traversal/setup.py @@ -1,9 +1,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -d = generate_distutils_setup( - packages=['unsafe_traversal'], - package_dir={'': 'src'} -) +d = generate_distutils_setup(packages=["unsafe_traversal"], package_dir={"": "src"}) setup(**d) diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/__init__.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/__init__.py index bb44dd681..deb9bcace 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/__init__.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/__init__.py @@ -2,4 +2,4 @@ from .services import ViablePlanCheckerService, LaserDistCheckerService from .planning import get_planner_controller from .actions import TraversalActionServer -from .quaternion import align_poses \ No newline at end of file +from .quaternion import align_poses diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/action_server.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/action_server.py index 3868a25a8..b042ed04f 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/action_server.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/action_server.py @@ -1,9 +1,20 @@ #!/usr/bin/env python3 import rospy import actionlib -from unsafe_traversal.srv import ChangeTraversalParameters, DeterminePathViability, LaserDist +from unsafe_traversal.srv import ( + ChangeTraversalParameters, + DeterminePathViability, + LaserDist, +) from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal -from unsafe_traversal.msg import MoveToGoalAction, AlignToGoalAction, MoveToGoalGoal, AlignToGoalGoal, MoveToGoalResult, AlignToGoalResult +from unsafe_traversal.msg import ( + MoveToGoalAction, + AlignToGoalAction, + MoveToGoalGoal, + AlignToGoalGoal, + MoveToGoalResult, + AlignToGoalResult, +) from sensor_msgs.msg import LaserScan import numpy as np from common_math import euclidian @@ -13,33 +24,44 @@ class TraversalActionServer(MoveBaseClient): - ''' + """ Action server for moving to a goal with unsafe traversal enabled. - ''' + """ _proxy = rospy.ServiceProxy( - '/unsafe_traversal/set_unsafe_traversal', ChangeTraversalParameters) + "/unsafe_traversal/set_unsafe_traversal", ChangeTraversalParameters + ) def __init__(self): # create action servers self._align_server = actionlib.SimpleActionServer( - '/unsafe_traversal/align_to_goal', AlignToGoalAction, execute_cb=self.align_action_cb, auto_start=False) + "/unsafe_traversal/align_to_goal", + AlignToGoalAction, + execute_cb=self.align_action_cb, + auto_start=False, + ) self._move_server = actionlib.SimpleActionServer( - '/unsafe_traversal/move_to_goal', MoveToGoalAction, execute_cb=self.move_action_cb, auto_start=False) + "/unsafe_traversal/move_to_goal", + MoveToGoalAction, + execute_cb=self.move_action_cb, + auto_start=False, + ) # start the servers self._align_server.start() self._move_server.start() - self.viable_plan_srv = rospy.ServiceProxy("/unsafe_traversal/check_if_plan_is_viable", DeterminePathViability) - self.laser_dist_srv = rospy.ServiceProxy("/unsafe_traversal/laser_dist_checker", LaserDist) - - + self.viable_plan_srv = rospy.ServiceProxy( + "/unsafe_traversal/check_if_plan_is_viable", DeterminePathViability + ) + self.laser_dist_srv = rospy.ServiceProxy( + "/unsafe_traversal/laser_dist_checker", LaserDist + ) def align_action_cb(self, msg): - ''' + """ Align action server callback - ''' + """ align_poses(msg.start_pose, msg.end_pose) self.move(msg.start_pose) @@ -47,16 +69,16 @@ def align_action_cb(self, msg): self._align_server.set_succeeded(AlignToGoalResult()) def move_action_cb(self, msg): - ''' + """ Move action server callback - ''' + """ result = MoveToGoalResult() align_poses(msg.start_pose, msg.end_pose) self.move(msg.start_pose) viable = self.viable_plan_srv(msg.start_pose, msg.end_pose).viable - laser_dist = self.laser_dist_srv(60.).dist + laser_dist = self.laser_dist_srv(60.0).dist print(viable, laser_dist) if not viable and laser_dist < 1.5: result.success = False diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/move_base_client.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/move_base_client.py index f9189a7cf..f66266708 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/move_base_client.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/actions/move_base_client.py @@ -5,12 +5,11 @@ class MoveBaseClient: - ''' + """ Configure a client for move_base - ''' + """ - _move_base_client = actionlib.SimpleActionClient( - 'move_base', MoveBaseAction) + _move_base_client = actionlib.SimpleActionClient("move_base", MoveBaseAction) def __init__(self): self._move_base_client.wait_for_server() diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/parameter_controller.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/parameter_controller.py index 4201da581..f1659d55f 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/parameter_controller.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/parameter_controller.py @@ -1,29 +1,29 @@ #!/usr/bin/env python3 import rospy + # from typing import Set, Tuple from dynamic_reconfigure.srv import Reconfigure from dynamic_reconfigure.msg import Config, DoubleParameter class ParameterOverrideController: - ''' + """ Manages overriden parameters with the ability to rollback afterwards. - ''' + """ # proxy: rospy.ServiceProxy # original_values: Config # overwritten: Set[Tuple[str, str]] def __init__(self, namespace): - self.proxy = rospy.ServiceProxy( - namespace + '/set_parameters', Reconfigure) + self.proxy = rospy.ServiceProxy(namespace + "/set_parameters", Reconfigure) self.original_values = None self.overwritten = set() def get_config(self): - ''' + """ Return the original set values. - ''' + """ if self.original_values is None: self.original_values = self.proxy().config @@ -31,9 +31,9 @@ def get_config(self): return self.original_values def get_double_param(self, key): # -> float: - ''' + """ Get the original double parameter value. - ''' + """ for entry in self.get_config().doubles: if entry.name == key: @@ -42,34 +42,30 @@ def get_double_param(self, key): # -> float: return None def set_double_param(self, key, value): - ''' + """ Override some double parameter. - ''' + """ self.get_double_param(key) self.overwritten.add((key, "double")) config = Config() - config.doubles.append(DoubleParameter( - name=key, - value=value - )) + config.doubles.append(DoubleParameter(name=key, value=value)) self.proxy(config) def restore_all(self): - ''' + """ Restore all overriden config options. - ''' + """ config = Config() - for (key, value_type) in self.overwritten: - if value_type == 'double': - config.doubles.append(DoubleParameter( - name=key, - value=self.get_double_param(key) - )) + for key, value_type in self.overwritten: + if value_type == "double": + config.doubles.append( + DoubleParameter(name=key, value=self.get_double_param(key)) + ) self.overwritten = set() self.proxy(config) diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/__init__.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/__init__.py index c6d2bf852..aecc780ff 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/__init__.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/__init__.py @@ -4,14 +4,14 @@ def get_planner_controller(): - ''' + """ Get the appropriate planner controller. - ''' + """ - if '/move_base/TebLocalPlannerROS/set_parameters' in rosservice.get_service_list(): + if "/move_base/TebLocalPlannerROS/set_parameters" in rosservice.get_service_list(): # running in TIAGo simulator return TebPlannerController() - elif '/move_base/PalLocalPlanner/set_parameters' in rosservice.get_service_list(): + elif "/move_base/PalLocalPlanner/set_parameters" in rosservice.get_service_list(): # running on TIAGo return PalPlannerController() else: diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controller.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controller.py index 068ef3b0e..8616a8032 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controller.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controller.py @@ -1,11 +1,11 @@ #!/usr/bin/env python3 -#from abc import ABC, abstractmethod +# from abc import ABC, abstractmethod class PlannerController: # (ABC): - ''' + """ Planner controller interface - ''' + """ # @abstractmethod def set_unsafe(self, state): diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/PalLocalPlanner.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/PalLocalPlanner.py index e90276003..80223093e 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/PalLocalPlanner.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/PalLocalPlanner.py @@ -6,22 +6,26 @@ class PalPlannerController(PlannerController): - ''' + """ Parameters needed for physical TIAGo robot. - ''' + """ def __init__(self): self.planner_overrides = ParameterOverrideController( - "/move_base/PalLocalPlanner") + "/move_base/PalLocalPlanner" + ) self.global_costmap_overrides = ParameterOverrideController( - "/move_base/global_costmap/inflation_layer") + "/move_base/global_costmap/inflation_layer" + ) def set_unsafe(self, unsafe): if unsafe: self.planner_overrides.set_double_param( - 'security_dist', TARGET_SECURITY_DIST) + "security_dist", TARGET_SECURITY_DIST + ) self.global_costmap_overrides.set_double_param( - 'inflation_radius', TARGET_INFLATION_RADIUS) + "inflation_radius", TARGET_INFLATION_RADIUS + ) else: self.planner_overrides.restore_all() self.global_costmap_overrides.restore_all() diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/TebGlobalPlannerROS.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/TebGlobalPlannerROS.py index 6e46b8e11..eaa7e5f7b 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/TebGlobalPlannerROS.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/planning/controllers/TebGlobalPlannerROS.py @@ -6,18 +6,22 @@ class TebPlannerController(PlannerController): - ''' + """ Parameters needed for TIAGo simulation. - ''' + """ + def __init__(self): self.planner_overrides = ParameterOverrideController( - "/move_base/TebLocalPlannerROS") + "/move_base/TebLocalPlannerROS" + ) def set_unsafe(self, unsafe): if unsafe: self.planner_overrides.set_double_param( - 'inflation_dist', TARGET_INFLATION_DIST) + "inflation_dist", TARGET_INFLATION_DIST + ) self.planner_overrides.set_double_param( - 'min_obstacle_dist', TARGET_MIN_OBSTACLE_DIST) + "min_obstacle_dist", TARGET_MIN_OBSTACLE_DIST + ) else: self.planner_overrides.restore_all() diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/quaternion.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/quaternion.py index 15346e64f..a826b332e 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/quaternion.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/quaternion.py @@ -3,17 +3,17 @@ def align_poses(start_pose, end_pose): - ''' + """ Align both poses to point forwards from start to end pose This can probably be done using TF but that complicates things - ''' + """ if start_pose.header.frame_id != end_pose.header.frame_id: raise Exception("frame_id of start and end pose does not match") angle = math.atan2( end_pose.pose.position.y - start_pose.pose.position.y, - end_pose.pose.position.x - start_pose.pose.position.x + end_pose.pose.position.x - start_pose.pose.position.x, ) z = math.sin(angle / 2) diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/services/__init__.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/services/__init__.py index df8d124d3..4994cae83 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/services/__init__.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/services/__init__.py @@ -1,2 +1,2 @@ from .viable_plan_checker import ViablePlanCheckerService -from .laser_dist_checker import LaserDistCheckerService \ No newline at end of file +from .laser_dist_checker import LaserDistCheckerService diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/services/laser_dist_checker.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/services/laser_dist_checker.py index 5543576e3..07a465445 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/services/laser_dist_checker.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/services/laser_dist_checker.py @@ -5,13 +5,16 @@ from sensor_msgs.msg import LaserScan import math + class LaserDistCheckerService: """A ROS service that returns the mean distance of a laser scan within a specific field of view and with outliers removed.""" def __init__(self): """Initialize the service.""" self.laser_topic = "/scan" - self.laser_dist_srv = rospy.Service("/unsafe_traversal/laser_dist_checker", LaserDist, self.laser_dist) + self.laser_dist_srv = rospy.Service( + "/unsafe_traversal/laser_dist_checker", LaserDist, self.laser_dist + ) def remove_outliers(self, data, threshold=1.5): """ @@ -56,7 +59,7 @@ def filter_by_fov_degrees(self, scan, fov): LaserScan: The filtered laser scan message. """ filtered_scan = LaserScan() - + filtered_scan.header = scan.header filtered_scan.angle_min = scan.angle_min filtered_scan.angle_max = scan.angle_max @@ -70,7 +73,9 @@ def filter_by_fov_degrees(self, scan, fov): fov_end = center_idx + fov_half filtered_scan.ranges = [np.nan] * len(scan.ranges) - filtered_scan.ranges[fov_start:fov_end+1] = scan.ranges[fov_start:fov_end+1] + filtered_scan.ranges[fov_start : fov_end + 1] = scan.ranges[ + fov_start : fov_end + 1 + ] return filtered_scan @@ -87,4 +92,4 @@ def laser_dist(self, req): scan = rospy.wait_for_message(self.laser_topic, LaserScan) filtered_scan = self.filter_by_fov_degrees(scan, req.fov_degrees) mean_distance = self.nanmean_without_outliers(np.array(filtered_scan.ranges)) - return LaserDistResponse(mean_distance) \ No newline at end of file + return LaserDistResponse(mean_distance) diff --git a/common/navigation/unsafe_traversal/src/unsafe_traversal/services/viable_plan_checker.py b/common/navigation/unsafe_traversal/src/unsafe_traversal/services/viable_plan_checker.py index 8f0185697..09be7ac77 100644 --- a/common/navigation/unsafe_traversal/src/unsafe_traversal/services/viable_plan_checker.py +++ b/common/navigation/unsafe_traversal/src/unsafe_traversal/services/viable_plan_checker.py @@ -11,13 +11,14 @@ MOVE_BASE_TOLERANCE = 0.01 PLAN_TOLERANCE = 0.1 + class ViablePlanCheckerService: - ''' + """ Service for calling move_base to determine whether a plan is viable between two points. In these sense that, the plan given by move_base is similar to the distance between two points so that we are not trying to route around the problem. - ''' + """ # proxy: rospy.ServiceProxy # service: rospy.Service @@ -31,27 +32,32 @@ def __init__(self): if service.startswith("/move_base/") and service.endswith("/make_plan"): target_service = service break - + # raise an error if none found if target_service is None: - raise Exception('Could not find move_base make_plan service!') + raise Exception("Could not find move_base make_plan service!") self.proxy = rospy.ServiceProxy(target_service, GetPlan) # start the service - self.service = rospy.Service('/unsafe_traversal/check_if_plan_is_viable', - DeterminePathViability, self.check_plan) + self.service = rospy.Service( + "/unsafe_traversal/check_if_plan_is_viable", + DeterminePathViability, + self.check_plan, + ) def check_plan(self, request): - ''' + """ Callback for plan viability service - ''' + """ # remove any need for move_base to rotate align_poses(request.start_pose, request.end_pose) # create the plan - plan = self.proxy(request.start_pose, request.end_pose, MOVE_BASE_TOLERANCE).plan + plan = self.proxy( + request.start_pose, request.end_pose, MOVE_BASE_TOLERANCE + ).plan # calculate the distance of the plan dist = 0 @@ -59,7 +65,9 @@ def check_plan(self, request): dist += self.euclidian_distance(cur.pose.position, next.pose.position) # calculate our target distance - max_dist = self.euclidian_distance(request.start_pose.pose.position, request.end_pose.pose.position) + max_dist = self.euclidian_distance( + request.start_pose.pose.position, request.end_pose.pose.position + ) # check if this is within acceptable bounds diff = abs(dist - max_dist) @@ -70,7 +78,5 @@ def check_plan(self, request): def euclidian_distance(self, a, b): return math.sqrt( - math.pow(a.x - b.x, 2) + - math.pow(a.y - b.y, 2) + - math.pow(a.z - b.z, 2) + math.pow(a.x - b.x, 2) + math.pow(a.y - b.y, 2) + math.pow(a.z - b.z, 2) ) diff --git a/common/speech/lasr_rasa/setup.py b/common/speech/lasr_rasa/setup.py index b272d0178..bf86fd283 100644 --- a/common/speech/lasr_rasa/setup.py +++ b/common/speech/lasr_rasa/setup.py @@ -3,9 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=[], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=[], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/speech/lasr_speech_recognition_whisper/requirements.txt b/common/speech/lasr_speech_recognition_whisper/requirements.txt index 1cace21e8..bc986de21 100644 --- a/common/speech/lasr_speech_recognition_whisper/requirements.txt +++ b/common/speech/lasr_speech_recognition_whisper/requirements.txt @@ -1,22 +1,19 @@ ---extra-index-url https://pypi.ngc.nvidia.com ---trusted-host pypi.ngc.nvidia.com - catkin-pkg==1.0.0 # via rospkg certifi==2024.2.2 # via requests cffi==1.16.0 # via sounddevice charset-normalizer==3.3.2 # via requests distro==1.9.0 # via rospkg -docutils==0.20.1 # via catkin-pkg -filelock==3.13.1 # via torch, triton -fsspec==2024.2.0 # via torch -idna==3.6 # via requests -jinja2==3.1.3 # via torch +docutils==0.21.2 # via catkin-pkg +filelock==3.14.0 # via torch, triton +fsspec==2024.3.1 # via torch +idna==3.7 # via requests +jinja2==3.1.4 # via torch llvmlite==0.42.0 # via numba markupsafe==2.1.5 # via jinja2 more-itertools==10.2.0 # via openai-whisper mpmath==1.3.0 # via sympy -networkx==3.2.1 # via torch -numba==0.59.0 # via openai-whisper +networkx==3.3 # via torch +numba==0.59.1 # via openai-whisper numpy==1.26.4 # via numba, openai-whisper nvidia-cublas-cu12==12.1.3.1 # via nvidia-cudnn-cu12, nvidia-cusolver-cu12, torch nvidia-cuda-cupti-cu12==12.1.105 # via torch @@ -27,16 +24,16 @@ nvidia-cufft-cu12==11.0.2.54 # via torch nvidia-curand-cu12==10.3.2.106 # via torch nvidia-cusolver-cu12==11.4.5.107 # via torch nvidia-cusparse-cu12==12.1.0.106 # via nvidia-cusolver-cu12, torch -nvidia-nccl-cu12==2.19.3 # via torch -nvidia-nvjitlink-cu12==12.4.99 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 +nvidia-nccl-cu12==2.20.5 # via torch +nvidia-nvjitlink-cu12==12.4.127 # via nvidia-cusolver-cu12, nvidia-cusparse-cu12 nvidia-nvtx-cu12==12.1.105 # via torch openai-whisper==20231117 # via -r requirements.in pyaudio==0.2.13 # via -r requirements.in -pycparser==2.21 # via cffi +pycparser==2.22 # via cffi pyparsing==3.1.2 # via catkin-pkg python-dateutil==2.9.0.post0 # via catkin-pkg pyyaml==6.0.1 # via -r requirements.in, rospkg -regex==2023.12.25 # via tiktoken +regex==2024.4.28 # via tiktoken requests==2.31.0 # via speechrecognition, tiktoken rospkg==1.5.0 # via -r requirements.in six==1.16.0 # via python-dateutil @@ -44,10 +41,10 @@ sounddevice==0.4.6 # via -r requirements.in speechrecognition==3.10.0 # via -r requirements.in sympy==1.12 # via torch tiktoken==0.6.0 # via openai-whisper -torch==2.2.1 # via openai-whisper -tqdm==4.66.2 # via openai-whisper -triton==2.2.0 # via openai-whisper, torch -typing-extensions==4.10.0 # via torch +torch==2.3.0 # via openai-whisper +tqdm==4.66.4 # via openai-whisper +triton==2.3.0 # via openai-whisper, torch +typing-extensions==4.11.0 # via torch urllib3==2.2.1 # via requests # The following packages are considered to be unsafe in a requirements file: diff --git a/common/speech/lasr_speech_recognition_whisper/scripts/list_microphones.py b/common/speech/lasr_speech_recognition_whisper/scripts/list_microphones.py index f57e4e6a1..3b97b16c3 100644 --- a/common/speech/lasr_speech_recognition_whisper/scripts/list_microphones.py +++ b/common/speech/lasr_speech_recognition_whisper/scripts/list_microphones.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 import speech_recognition as sr + microphones = enumerate(sr.Microphone.list_microphone_names()) -print('\nAvailable microphones:') +print("\nAvailable microphones:") for index, name in microphones: print(f"[{index}] {name}") diff --git a/common/speech/lasr_speech_recognition_whisper/setup.py b/common/speech/lasr_speech_recognition_whisper/setup.py index 5dbf58a5f..ee0404ee1 100644 --- a/common/speech/lasr_speech_recognition_whisper/setup.py +++ b/common/speech/lasr_speech_recognition_whisper/setup.py @@ -1,12 +1,10 @@ - #!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_speech_recognition_whisper'], - package_dir={'': 'src'} + packages=["lasr_speech_recognition_whisper"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/__init__.py b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/__init__.py index 6a765f0ac..85b18fb9b 100644 --- a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/__init__.py +++ b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/__init__.py @@ -1,4 +1,12 @@ # from .collector import AbstractPhraseCollector, AudioTopicPhraseCollector, MicrophonePhraseCollector, RecognizerPhraseCollector -from .collector import AbstractPhraseCollector, MicrophonePhraseCollector, RecognizerPhraseCollector -from .worker import SpeechRecognitionWorker, SpeechRecognitionToStdout, SpeechRecognitionToTopic +from .collector import ( + AbstractPhraseCollector, + MicrophonePhraseCollector, + RecognizerPhraseCollector, +) +from .worker import ( + SpeechRecognitionWorker, + SpeechRecognitionToStdout, + SpeechRecognitionToTopic, +) from .cache import load_model diff --git a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/bytesfifo.py b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/bytesfifo.py index a5e705131..1f86b7ffc 100644 --- a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/bytesfifo.py +++ b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/bytesfifo.py @@ -1,14 +1,15 @@ import io + class BytesFIFO(object): """ A FIFO that can store a fixed number of bytes. https://github.com/hbock/byte-fifo/blob/master/fifo.py """ - + def __init__(self, init_size): - """ Create a FIFO of ``init_size`` bytes. """ - self._buffer = io.BytesIO(b"\x00"*init_size) + """Create a FIFO of ``init_size`` bytes.""" + self._buffer = io.BytesIO(b"\x00" * init_size) self._size = init_size self._filled = 0 self._read_ptr = 0 @@ -17,13 +18,13 @@ def __init__(self, init_size): def read(self, size=-1): """ Read at most ``size`` bytes from the FIFO. - + If less than ``size`` bytes are available, or ``size`` is negative, return all remaining bytes. """ if size < 0: size = self._filled - + # Go to read pointer self._buffer.seek(self._read_ptr) @@ -32,7 +33,7 @@ def read(self, size=-1): contig = self._size - self._read_ptr contig_read = min(contig, size) - ret = self._buffer.read(contig_read) + ret = self._buffer.read(contig_read) self._read_ptr += contig_read if contig_read < size: leftover_size = size - contig_read @@ -41,13 +42,13 @@ def read(self, size=-1): self._read_ptr = leftover_size self._filled -= size - + return ret def write(self, data): """ Write as many bytes of ``data`` as are free in the FIFO. - + If less than ``len(data)`` bytes are free, write as many as can be written. Returns the number of bytes written. """ @@ -67,7 +68,7 @@ def write(self, data): if contig < write_size: self._buffer.seek(0) self._buffer.write(data[contig_write:write_size]) - #self._buffer.write(buffer(data, contig_write, write_size - contig_write)) + # self._buffer.write(buffer(data, contig_write, write_size - contig_write)) self._write_ptr = write_size - contig_write self._filled += write_size @@ -75,33 +76,33 @@ def write(self, data): return write_size def flush(self): - """ Flush all data from the FIFO. """ + """Flush all data from the FIFO.""" self._filled = 0 self._read_ptr = 0 self._write_ptr = 0 - + def empty(self): - """ Return ```True``` if FIFO is empty. """ + """Return ```True``` if FIFO is empty.""" return self._filled == 0 def full(self): - """ Return ``True`` if FIFO is full. """ + """Return ``True`` if FIFO is full.""" return self._filled == self._size def free(self): - """ Return the number of bytes that can be written to the FIFO. """ + """Return the number of bytes that can be written to the FIFO.""" return self._size - self._filled def capacity(self): - """ Return the total space allocated for this FIFO. """ + """Return the total space allocated for this FIFO.""" return self._size def __len__(self): - """ Return the amount of data filled in FIFO """ + """Return the amount of data filled in FIFO""" return self._filled def __nonzero__(self): - """ Return ```True``` if the FIFO is not empty. """ + """Return ```True``` if the FIFO is not empty.""" return self._filled > 0 def resize(self, new_size): @@ -117,8 +118,10 @@ def resize(self, new_size): raise ValueError("Cannot resize to zero or less bytes.") if new_size < self._filled: - raise ValueError("Cannot contract FIFO to less than {} bytes, " - "or data will be lost.".format(self._filled)) + raise ValueError( + "Cannot contract FIFO to less than {} bytes, " + "or data will be lost.".format(self._filled) + ) # original data is non-contiguous. we need to copy old data, # re-write to the beginning of the buffer, and re-sync @@ -130,5 +133,5 @@ def resize(self, new_size): self._filled = len(old_data) self._read_ptr = 0 self._write_ptr = self._filled - - self._size = new_size \ No newline at end of file + + self._size = new_size diff --git a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/collector.py b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/collector.py index b5a2e8750..ae15a5347 100644 --- a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/collector.py +++ b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/collector.py @@ -7,59 +7,63 @@ # from .source import AudioTopic + class AbstractPhraseCollector(ABC): - ''' + """ Supertype holding a queue of audio data representing a phrase - ''' + """ data: Queue[bytes] = Queue() @abstractmethod def start(self): - ''' + """ Start collecting phrases - ''' + """ pass @abstractmethod def stop(self): - ''' + """ Stop collecting phrases - ''' + """ pass @abstractmethod def sample_rate(self): - ''' + """ Sample rate of the data - ''' + """ pass @abstractmethod def sample_width(self): - ''' + """ Sample width of the data - ''' + """ pass + class RecognizerPhraseCollector(AbstractPhraseCollector): - ''' + """ Collect phrases using a SoundRecognition Recognizer This will monitor energy levels on the input and only capture when a certain threshold of activity is met. - ''' + """ _recorder: sr.Recognizer _phrase_time_limit: float def _record_callback(self, _, audio: sr.AudioData) -> None: - ''' + """ Collect raw audio data from the microphone - ''' + """ self.data.put(audio.get_raw_data()) - def __init__(self, energy_threshold: int = 500, phrase_time_limit: float = 2) -> None: + def __init__( + self, energy_threshold: int = 500, phrase_time_limit: float = 2 + ) -> None: super().__init__() self._recorder = sr.Recognizer() self._recorder.dynamic_energy_threshold = False @@ -68,14 +72,16 @@ def __init__(self, energy_threshold: int = 500, phrase_time_limit: float = 2) -> @abstractmethod def adjust_for_noise(self, source: sr.AudioSource): - rospy.loginfo('Adjusting for background noise...') + rospy.loginfo("Adjusting for background noise...") with source: self._recorder.adjust_for_ambient_noise(source) @abstractmethod - def start(self, source: sr.AudioSource): - rospy.loginfo('Started source listen thread') - self._stopper = self._recorder.listen_in_background(source, self._record_callback, phrase_time_limit=self._phrase_time_limit) + def start(self, source: sr.AudioSource): + rospy.loginfo("Started source listen thread") + self._stopper = self._recorder.listen_in_background( + source, self._record_callback, phrase_time_limit=self._phrase_time_limit + ) def stop(self): self._stopper() @@ -86,14 +92,20 @@ def sample_rate(self): def sample_width(self): return self._source.SAMPLE_WIDTH + class MicrophonePhraseCollector(RecognizerPhraseCollector): - ''' + """ Collect phrases from the default microphone - ''' + """ _source: sr.Microphone - def __init__(self, energy_threshold: int = 500, phrase_time_limit: float = 2, device_index: int = None) -> None: + def __init__( + self, + energy_threshold: int = 500, + phrase_time_limit: float = 2, + device_index: int = None, + ) -> None: self._source = sr.Microphone(device_index=device_index, sample_rate=16000) super().__init__(energy_threshold, phrase_time_limit) @@ -103,6 +115,7 @@ def adjust_for_noise(self): def start(self): return super().start(self._source) + # class AudioTopicPhraseCollector(RecognizerPhraseCollector): # ''' # Collect phrases from an audio topic diff --git a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/source.py b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/source.py index a1a2e3efd..dd235ceb2 100644 --- a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/source.py +++ b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/source.py @@ -6,20 +6,21 @@ from .bytesfifo import BytesFIFO + class AudioTopic(sr.AudioSource): - ''' + """ Use a ROS topic as an AudioSource - ''' + """ _topic: str _sub: rospy.Subscriber - def __init__(self, topic: str, chunk_size = 1024) -> None: + def __init__(self, topic: str, chunk_size=1024) -> None: self._topic = topic - config: AudioInfo = rospy.wait_for_message(f'{topic}/audio_info', AudioInfo) - assert config.coding_format == 'wave', "Expected Wave audio format" - assert config.sample_format == 'S16LE', "Expected sample format S16LE" + config: AudioInfo = rospy.wait_for_message(f"{topic}/audio_info", AudioInfo) + assert config.coding_format == "wave", "Expected Wave audio format" + assert config.sample_format == "S16LE", "Expected sample format S16LE" rospy.loginfo(config) self.SAMPLE_WIDTH = pyaudio.get_sample_size(pyaudio.paInt16) @@ -29,26 +30,28 @@ def __init__(self, topic: str, chunk_size = 1024) -> None: self.stream = None def __enter__(self): - ''' + """ Start stream when entering with: block - ''' + """ - assert self.stream is None, "This audio source is already inside a context manager" - self.stream = BytesFIFO(1024 * 10) # 10 kB buffer - self._sub = rospy.Subscriber(f'{self._topic}/audio', AudioData, self._read) + assert ( + self.stream is None + ), "This audio source is already inside a context manager" + self.stream = BytesFIFO(1024 * 10) # 10 kB buffer + self._sub = rospy.Subscriber(f"{self._topic}/audio", AudioData, self._read) return self def __exit__(self, exc_type, exc_value, traceback): - ''' + """ Close out stream on exit - ''' + """ self.stream = None self._sub.unregister() - + def _read(self, msg: AudioData) -> None: - ''' + """ Forward raw audio data to queue - ''' + """ self.stream.write(msg.data) diff --git a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/worker.py b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/worker.py index f5f605891..a560dc798 100644 --- a/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/worker.py +++ b/common/speech/lasr_speech_recognition_whisper/src/lasr_speech_recognition_whisper/worker.py @@ -14,21 +14,28 @@ from lasr_speech_recognition_msgs.msg import Transcription + class SpeechRecognitionWorker(ABC): - ''' + """ Collect and run inference on phrases to produce a transcription - ''' + """ _collector: AbstractPhraseCollector _tmp_file: NamedTemporaryFile _model: whisper.Whisper _current_sample: bytes - _phrase_start: datetime + _phrase_start: datetime _maximum_phrase_length: timedelta | None _infer_partial: bool _stopped = True - def __init__(self, collector: AbstractPhraseCollector, model: whisper.Whisper, maximum_phrase_length = timedelta(seconds=3), infer_partial = True) -> None: + def __init__( + self, + collector: AbstractPhraseCollector, + model: whisper.Whisper, + maximum_phrase_length=timedelta(seconds=3), + infer_partial=True, + ) -> None: self._collector = collector self._tmp_file = NamedTemporaryFile().name self._model = model @@ -39,15 +46,15 @@ def __init__(self, collector: AbstractPhraseCollector, model: whisper.Whisper, m @abstractmethod def on_phrase(self, phrase: str, finished: bool) -> None: - ''' + """ Handle a partial or complete transcription - ''' + """ pass def _finish_phrase(self): - ''' + """ Complete the current phrase and clear the sample - ''' + """ text = self._perform_inference() if text is not None: @@ -57,46 +64,55 @@ def _finish_phrase(self): self._phrase_start = None def _perform_inference(self): - ''' + """ Run inference on the current sample - ''' - - rospy.loginfo('Processing sample') - audio_data = sr.AudioData(self._current_sample, self._collector.sample_rate(), self._collector.sample_width()) + """ + + rospy.loginfo("Processing sample") + audio_data = sr.AudioData( + self._current_sample, + self._collector.sample_rate(), + self._collector.sample_width(), + ) wav_data = BytesIO(audio_data.get_wav_data()) - with open(self._tmp_file, 'w+b') as f: + with open(self._tmp_file, "w+b") as f: f.write(wav_data.read()) - rospy.loginfo('Running inference') + rospy.loginfo("Running inference") try: - result = self._model.transcribe(self._tmp_file, fp16=torch.cuda.is_available()) + result = self._model.transcribe( + self._tmp_file, fp16=torch.cuda.is_available() + ) except RuntimeError: return None - text = result['text'].strip() + text = result["text"].strip() # Detect and drop garbage - if len(text) == 0 or text.lower() in ['.', 'you', 'thanks for watching!']: + if len(text) == 0 or text.lower() in [".", "you", "thanks for watching!"]: self._phrase_start = None self._current_sample = bytes() - rospy.loginfo('Skipping garbage...') + rospy.loginfo("Skipping garbage...") return None - + return text def _worker(self): - ''' + """ Indefinitely perform inference on the given data - ''' + """ - rospy.loginfo('Started inference worker') + rospy.loginfo("Started inference worker") while not self._stopped: try: # Check whether the current phrase has timed out now = datetime.utcnow() - if self._phrase_start and now - self._phrase_start > self._maximum_phrase_length: - rospy.loginfo('Reached timeout for phrase, ending now.') + if ( + self._phrase_start + and now - self._phrase_start > self._maximum_phrase_length + ): + rospy.loginfo("Reached timeout for phrase, ending now.") self._finish_phrase() # Start / continue phrase if data is coming in @@ -107,7 +123,9 @@ def _worker(self): while not self._collector.data.empty(): self._current_sample += self._collector.data.get() - rospy.loginfo('Received and added more data to current audio sample.') + rospy.loginfo( + "Received and added more data to current audio sample." + ) # Run inference on partial sample if enabled if self._infer_partial: @@ -121,23 +139,23 @@ def _worker(self): except KeyboardInterrupt: self._stopped = True - rospy.loginfo('Worker finished') + rospy.loginfo("Worker finished") def start(self): - ''' + """ Start performing inference on incoming data - ''' - + """ + assert self._stopped, "Already running inference" self._stopped = False self._collector.start() worker_thread = Thread(target=self._worker) worker_thread.start() - + def stop(self): - ''' + """ Stop the worker from running inference - ''' + """ assert not self._stopped, "Not currently running" self._collector.stop() @@ -148,26 +166,35 @@ def stop(self): while not self._collector.data.empty(): self._current_sample += self._collector.data.get() + class SpeechRecognitionToStdout(SpeechRecognitionWorker): - ''' + """ Recognise speech and pass it through to standard output - ''' + """ def on_phrase(self, phrase: str, finished: bool) -> None: - rospy.loginfo('[' + ('x' if finished else ' ') + '] ' + phrase) + rospy.loginfo("[" + ("x" if finished else " ") + "] " + phrase) + class SpeechRecognitionToTopic(SpeechRecognitionToStdout): - ''' + """ Recognise speech and publish it to a topic - ''' + """ _pub: rospy.Publisher - def __init__(self, collector: AbstractPhraseCollector, model: whisper.Whisper, topic: str, maximum_phrase_length=timedelta(seconds=1), infer_partial=True) -> None: + def __init__( + self, + collector: AbstractPhraseCollector, + model: whisper.Whisper, + topic: str, + maximum_phrase_length=timedelta(seconds=1), + infer_partial=True, + ) -> None: super().__init__(collector, model, maximum_phrase_length, infer_partial) - rospy.loginfo(f'Will be publishing transcription to {topic}') + rospy.loginfo(f"Will be publishing transcription to {topic}") self._pub = rospy.Publisher(topic, Transcription, queue_size=5) - + def on_phrase(self, phrase: str, finished: bool) -> None: super().on_phrase(phrase, finished) msg = Transcription() diff --git a/common/speech/lasr_voice/setup.py b/common/speech/lasr_voice/setup.py index c566f672b..5bc7876e6 100644 --- a/common/speech/lasr_voice/setup.py +++ b/common/speech/lasr_voice/setup.py @@ -3,9 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['lasr_voice'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["lasr_voice"], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/speech/lasr_voice/src/lasr_voice/__init__.py b/common/speech/lasr_voice/src/lasr_voice/__init__.py index 0cf4112e1..cc05d889d 100644 --- a/common/speech/lasr_voice/src/lasr_voice/__init__.py +++ b/common/speech/lasr_voice/src/lasr_voice/__init__.py @@ -1 +1 @@ -from .voice import Voice \ No newline at end of file +from .voice import Voice diff --git a/common/vision/lasr_vision_bodypix/setup.py b/common/vision/lasr_vision_bodypix/setup.py index bf8f95354..765505af1 100644 --- a/common/vision/lasr_vision_bodypix/setup.py +++ b/common/vision/lasr_vision_bodypix/setup.py @@ -4,8 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_vision_bodypix'], - package_dir={'': 'src'} + packages=["lasr_vision_bodypix"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/vision/lasr_vision_bodypix/src/lasr_vision_bodypix/bodypix.py b/common/vision/lasr_vision_bodypix/src/lasr_vision_bodypix/bodypix.py index b1cab9f83..dcd811b89 100644 --- a/common/vision/lasr_vision_bodypix/src/lasr_vision_bodypix/bodypix.py +++ b/common/vision/lasr_vision_bodypix/src/lasr_vision_bodypix/bodypix.py @@ -5,11 +5,13 @@ import numpy as np from PIL import Image +import re import tensorflow as tf from tf_bodypix.api import download_model, load_model, BodyPixModelPaths from sensor_msgs.msg import Image as SensorImage -from lasr_vision_msgs.msg import BodyPixMask, BodyPixPose + +from lasr_vision_msgs.msg import BodyPixMask, BodyPixPose, BodyPixKeypoint from lasr_vision_msgs.srv import BodyPixDetectionRequest, BodyPixDetectionResponse import rospkg @@ -18,89 +20,83 @@ loaded_models = {} r = rospkg.RosPack() + +def snake_to_camel(snake_str): + components = snake_str.split("_") + return components[0] + "".join(x.title() for x in components[1:]) + + +def camel_to_snake(name): + return re.sub(r"(? None: - ''' + """ Load a model into cache - ''' + """ model = None if dataset in loaded_models: model = loaded_models[dataset] else: - if dataset == 'resnet50': + if dataset == "resnet50": name = download_model(BodyPixModelPaths.RESNET50_FLOAT_STRIDE_16) model = load_model(name) - elif dataset == 'mobilenet50': + elif dataset == "mobilenet50": name = download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16) model = load_model(name) else: model = load_model(dataset) - rospy.loginfo(f'Loaded {dataset} model') + rospy.loginfo(f"Loaded {dataset} model") loaded_models[dataset] = model return model -def detect(request: BodyPixDetectionRequest, debug_publisher: rospy.Publisher | None) -> BodyPixDetectionResponse: - ''' + +def detect( + request: BodyPixDetectionRequest, + debug_publisher: rospy.Publisher = rospy.Publisher( + "/bodypix/debug", SensorImage, queue_size=1 + ), +) -> BodyPixDetectionResponse: + """ Run BodyPix inference on given detection request - ''' + """ # decode the image - rospy.loginfo('Decoding') + rospy.loginfo("Decoding") img = cv2_img.msg_to_cv2_img(request.image_raw) # load model - rospy.loginfo('Loading model') + rospy.loginfo("Loading model") model = load_model_cached(request.dataset) # run inference - rospy.loginfo('Running inference') + rospy.loginfo("Running inference") result = model.predict_single(img) + mask = result.get_mask(threshold=request.confidence) - rospy.loginfo('Inference complete') + rospy.loginfo("Inference complete") # construct masks response masks = [] for mask_request in request.masks: - part_mask = result.get_part_mask(mask=tf.identity(mask), part_names=mask_request.parts).squeeze() + part_mask = result.get_part_mask( + mask=tf.identity(mask), part_names=mask_request.parts + ).squeeze() bodypix_mask = BodyPixMask() - bodypix_mask.mask = part_mask.flatten().astype(bool) + + bodypix_mask.mask = part_mask.flatten().astype(bool).tolist() bodypix_mask.shape = list(part_mask.shape) masks.append(bodypix_mask) # construct poses response and neck coordinates poses = result.get_poses() - rospy.loginfo(str(poses)) - neck_coordinates = [] - for pose in poses: - left_shoulder_keypoint = pose.keypoints.get(5) # 5 is the typical index for left shoulder - right_shoulder_keypoint = pose.keypoints.get(6) # 6 is the typical index for right shoulder - - if left_shoulder_keypoint and right_shoulder_keypoint: - # If both shoulders are detected, calculate neck as midpoint - left_shoulder = left_shoulder_keypoint.position - right_shoulder = right_shoulder_keypoint.position - neck_x = (left_shoulder.x + right_shoulder.x) / 2 - neck_y = (left_shoulder.y + right_shoulder.y) / 2 - elif left_shoulder_keypoint: - # Only left shoulder detected, use it as neck coordinate - left_shoulder = left_shoulder_keypoint.position - neck_x = left_shoulder.x - neck_y = left_shoulder.y - elif right_shoulder_keypoint: - # Only right shoulder detected, use it as neck coordinate - right_shoulder = right_shoulder_keypoint.position - neck_x = right_shoulder.x - neck_y = right_shoulder.y - - pose = BodyPixPose() - pose.coord = np.array([neck_x, neck_y]).astype(np.int32) - neck_coordinates.append(pose) - # publish to debug topic if debug_publisher is not None: # create coloured mask with poses from tf_bodypix.draw import draw_poses + coloured_mask = result.get_colored_part_mask(mask).astype(np.uint8) poses = result.get_poses() coloured_mask = draw_poses( @@ -110,8 +106,26 @@ def detect(request: BodyPixDetectionRequest, debug_publisher: rospy.Publisher | skeleton_color=(100, 100, 255), ) debug_publisher.publish(cv2_img.cv2_img_to_msg(coloured_mask)) - + + output_poses = [] + + for pose in poses: + pose_msg = BodyPixPose() + keypoints_msg = [] + + for i, keypoint in pose.keypoints.items(): + + if camel_to_snake(keypoint.part) in request.masks[0].parts: + keypoint_msg = BodyPixKeypoint() + keypoint_msg.xy = [int(keypoint.position.x), int(keypoint.position.y)] + keypoint_msg.score = keypoint.score + keypoint_msg.part = keypoint.part + keypoints_msg.append(keypoint_msg) + + pose_msg.keypoints = keypoints_msg + output_poses.append(pose_msg) + response = BodyPixDetectionResponse() + response.poses = output_poses response.masks = masks - response.poses = neck_coordinates return response diff --git a/common/vision/lasr_vision_deepface/CMakeLists.txt b/common/vision/lasr_vision_deepface/CMakeLists.txt index b22bda630..197253447 100644 --- a/common/vision/lasr_vision_deepface/CMakeLists.txt +++ b/common/vision/lasr_vision_deepface/CMakeLists.txt @@ -19,7 +19,7 @@ find_package(catkin REQUIRED catkin_virtualenv) catkin_python_setup() catkin_generate_virtualenv( INPUT_REQUIREMENTS requirements.in - PYTHON_INTERPRETER python3.10 + PYTHON_INTERPRETER python3.8 ) ################################################ @@ -161,7 +161,8 @@ include_directories( ## in contrast to setup.py, you can choose the destination catkin_install_python(PROGRAMS scripts/create_dataset - examples/relay + examples/relay_recognise + examples/relay_detect_faces examples/greet nodes/service DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} diff --git a/common/vision/lasr_vision_deepface/examples/relay_detect_faces b/common/vision/lasr_vision_deepface/examples/relay_detect_faces new file mode 100644 index 000000000..df08ea88c --- /dev/null +++ b/common/vision/lasr_vision_deepface/examples/relay_detect_faces @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +import sys +import rospy +import threading + +from sensor_msgs.msg import Image + +from lasr_vision_msgs.srv import DetectFaces, DetectFacesRequest + +if len(sys.argv) < 2: + print("Usage: rosrun lasr_vision_deepface relay ") + exit() + +listen_topic = sys.argv[1] + +processing = False + + +def detect(image): + global processing + processing = True + rospy.loginfo("Received image message") + + try: + detect_service = rospy.ServiceProxy("/detect_faces", DetectFaces) + req = DetectFacesRequest() + req.image_raw = image + resp = detect_service(req) + print(resp) + except rospy.ServiceException as e: + rospy.logerr("Service call failed: %s" % e) + finally: + processing = False + + +def image_callback(image): + global processing + if processing: + return + + t = threading.Thread(target=detect, args=(image,)) + t.start() + + +def listener(): + rospy.init_node("image_listener", anonymous=True) + rospy.wait_for_service("/recognise") + rospy.Subscriber(listen_topic, Image, image_callback) + rospy.spin() + + +if __name__ == "__main__": + listener() diff --git a/common/vision/lasr_vision_deepface/examples/relay b/common/vision/lasr_vision_deepface/examples/relay_recognise similarity index 93% rename from common/vision/lasr_vision_deepface/examples/relay rename to common/vision/lasr_vision_deepface/examples/relay_recognise index 5c35ee887..038a4fdb7 100644 --- a/common/vision/lasr_vision_deepface/examples/relay +++ b/common/vision/lasr_vision_deepface/examples/relay_recognise @@ -9,7 +9,7 @@ from sensor_msgs.msg import Image from lasr_vision_msgs.srv import Recognise, RecogniseRequest if len(sys.argv) < 3: - print("Usage: rosrun lase_recognition relay ") + print("Usage: rosrun lasr_vision_deepface relay ") exit() listen_topic = sys.argv[1] diff --git a/common/vision/lasr_vision_deepface/nodes/service b/common/vision/lasr_vision_deepface/nodes/service index d6a71384a..424bcaf1c 100644 --- a/common/vision/lasr_vision_deepface/nodes/service +++ b/common/vision/lasr_vision_deepface/nodes/service @@ -2,7 +2,7 @@ import re import rospy -import lasr_vision_deepface as face_recognition +import lasr_vision_deepface.deepface as face_recognition from sensor_msgs.msg import Image from lasr_vision_msgs.srv import ( Recognise, @@ -11,6 +11,9 @@ from lasr_vision_msgs.srv import ( LearnFace, LearnFaceRequest, LearnFaceResponse, + DetectFaces, + DetectFacesRequest, + DetectFacesResponse, ) @@ -21,6 +24,8 @@ DEBUG = rospy.get_param("~debug", False) recognise_debug_publishers = {} learn_face_debug_publishers = {} +detect_faces_debug_publisher = None + if DEBUG: recognise_debug_publisher = rospy.Publisher("/recognise/debug", Image, queue_size=1) learn_face_debug_publisher = rospy.Publisher( @@ -29,19 +34,20 @@ if DEBUG: cropped_face_publisher = rospy.Publisher( "/learn_face/debug/cropped_query_face", Image, queue_size=1 ) + detect_faces_debug_publisher = rospy.Publisher( + "/detect_faces/debug", Image, queue_size=1 + ) -def detect(request: RecogniseRequest) -> RecogniseResponse: +def recognise(request: RecogniseRequest) -> RecogniseResponse: debug_publisher = None similar_face_debug_publisher = None cropped_face_publisher = None if DEBUG: if request.dataset in recognise_debug_publishers: - debug_publisher = recognise_debug_publishers[request.dataset][0] - similar_face_debug_publisher = recognise_debug_publishers[request.dataset][ - 1 - ] - cropped_face_publisher = recognise_debug_publisher[request.dataset][2] + debug_publisher, similar_face_debug_publisher, cropped_face_publisher = ( + recognise_debug_publishers[request.dataset] + ) else: topic_name = re.sub(r"[\W_]+", "", request.dataset) debug_publisher = rospy.Publisher( @@ -58,7 +64,7 @@ def detect(request: RecogniseRequest) -> RecogniseResponse: similar_face_debug_publisher, cropped_face_publisher, ) - return face_recognition.detect( + return face_recognition.recognise( request, debug_publisher, similar_face_debug_publisher, cropped_face_publisher ) @@ -83,7 +89,12 @@ def learn_face(request: LearnFaceRequest) -> LearnFaceResponse: return LearnFaceResponse() -rospy.Service("/recognise", Recognise, detect) +def detect_faces(request: DetectFacesRequest) -> DetectFacesResponse: + return face_recognition.detect_faces(request, detect_faces_debug_publisher) + + +rospy.Service("/recognise", Recognise, recognise) rospy.Service("/learn_face", LearnFace, learn_face) +rospy.Service("/detect_faces", DetectFaces, detect_faces) rospy.loginfo("Face Recognition service started") rospy.spin() diff --git a/common/vision/lasr_vision_deepface/requirements.in b/common/vision/lasr_vision_deepface/requirements.in index f23404f65..5070bfd5f 100644 --- a/common/vision/lasr_vision_deepface/requirements.in +++ b/common/vision/lasr_vision_deepface/requirements.in @@ -1,2 +1,3 @@ -deepface==0.0.83 +deepface==0.0.91 +tensorflow==2.9.0 numpy>=1.2.1 \ No newline at end of file diff --git a/common/vision/lasr_vision_deepface/requirements.txt b/common/vision/lasr_vision_deepface/requirements.txt index 9d2957353..10bf64cf0 100644 --- a/common/vision/lasr_vision_deepface/requirements.txt +++ b/common/vision/lasr_vision_deepface/requirements.txt @@ -1,65 +1,68 @@ absl-py==2.1.0 # via tensorboard, tensorflow astunparse==1.6.3 # via tensorflow beautifulsoup4==4.12.3 # via gdown -blinker==1.7.0 # via flask -cachetools==5.3.2 # via google-auth -certifi==2023.11.17 # via requests +blinker==1.8.1 # via flask +cachetools==5.3.3 # via google-auth +certifi==2024.2.2 # via requests charset-normalizer==3.3.2 # via requests click==8.1.7 # via flask -deepface==0.0.83 # via -r requirements.in -filelock==3.13.1 # via gdown -fire==0.5.0 # via deepface -flask==3.0.1 # via deepface -flatbuffers==23.5.26 # via tensorflow -gast==0.5.4 # via tensorflow -gdown==5.0.1 # via deepface, retina-face -google-auth==2.27.0 # via google-auth-oauthlib, tensorboard -google-auth-oauthlib==1.2.0 # via tensorboard +deepface==0.0.91 # via -r requirements.in +filelock==3.14.0 # via gdown +fire==0.6.0 # via deepface +flask==3.0.3 # via deepface +flatbuffers==1.12 # via tensorflow +gast==0.4.0 # via tensorflow +gdown==5.1.0 # via deepface, retina-face +google-auth==2.29.0 # via google-auth-oauthlib, tensorboard +google-auth-oauthlib==0.4.6 # via tensorboard google-pasta==0.2.0 # via tensorflow -grpcio==1.60.0 # via tensorboard, tensorflow -gunicorn==21.2.0 # via deepface -h5py==3.10.0 # via tensorflow -idna==3.6 # via requests -itsdangerous==2.1.2 # via flask +grpcio==1.63.0 # via tensorboard, tensorflow +gunicorn==22.0.0 # via deepface +h5py==3.11.0 # via tensorflow +idna==3.7 # via requests +importlib-metadata==7.1.0 # via flask, markdown +itsdangerous==2.2.0 # via flask jinja2==3.1.3 # via flask -keras==2.15.0 # via deepface, mtcnn, tensorflow -libclang==16.0.6 # via tensorflow -markdown==3.5.2 # via tensorboard -markupsafe==2.1.4 # via jinja2, werkzeug -ml-dtypes==0.2.0 # via tensorflow +keras==2.9.0 # via deepface, mtcnn, tensorflow +keras-preprocessing==1.1.2 # via tensorflow +libclang==18.1.1 # via tensorflow +markdown==3.6 # via tensorboard +markupsafe==2.1.5 # via jinja2, werkzeug mtcnn==0.1.1 # via deepface -numpy==1.26.3 # via -r requirements.in, deepface, h5py, ml-dtypes, opencv-python, opt-einsum, pandas, retina-face, tensorboard, tensorflow +numpy==1.24.4 # via -r requirements.in, deepface, h5py, keras-preprocessing, opencv-python, opt-einsum, pandas, retina-face, tensorboard, tensorflow oauthlib==3.2.2 # via requests-oauthlib opencv-python==4.9.0.80 # via deepface, mtcnn, retina-face opt-einsum==3.3.0 # via tensorflow -packaging==23.2 # via gunicorn, tensorflow -pandas==2.2.0 # via deepface -pillow==10.2.0 # via deepface, retina-face -protobuf==4.23.4 # via tensorboard, tensorflow -pyasn1==0.5.1 # via pyasn1-modules, rsa -pyasn1-modules==0.3.0 # via google-auth +packaging==24.0 # via gunicorn, tensorflow +pandas==2.0.3 # via deepface +pillow==10.3.0 # via deepface, retina-face +protobuf==3.19.6 # via tensorboard, tensorflow +pyasn1==0.6.0 # via pyasn1-modules, rsa +pyasn1-modules==0.4.0 # via google-auth pysocks==1.7.1 # via requests -python-dateutil==2.8.2 # via pandas -pytz==2023.4 # via pandas -requests[socks]==2.31.0 # via gdown, requests-oauthlib, tensorboard -requests-oauthlib==1.3.1 # via google-auth-oauthlib -retina-face==0.0.14 # via deepface +python-dateutil==2.9.0.post0 # via pandas +pytz==2024.1 # via pandas +requests[socks]==2.31.0 # via deepface, gdown, requests-oauthlib, tensorboard +requests-oauthlib==2.0.0 # via google-auth-oauthlib +retina-face==0.0.17 # via deepface rsa==4.9 # via google-auth -six==1.16.0 # via astunparse, fire, google-pasta, python-dateutil, tensorboard, tensorflow +six==1.16.0 # via astunparse, fire, google-pasta, keras-preprocessing, python-dateutil, tensorflow soupsieve==2.5 # via beautifulsoup4 -tensorboard==2.15.1 # via tensorflow -tensorboard-data-server==0.7.2 # via tensorboard -tensorflow==2.15.0.post1 # via deepface, retina-face -tensorflow-estimator==2.15.0 # via tensorflow -tensorflow-io-gcs-filesystem==0.35.0 # via tensorflow +tensorboard==2.9.1 # via tensorflow +tensorboard-data-server==0.6.1 # via tensorboard +tensorboard-plugin-wit==1.8.1 # via tensorboard +tensorflow==2.9.0 # via -r requirements.in, deepface, retina-face +tensorflow-estimator==2.9.0 # via tensorflow +tensorflow-io-gcs-filesystem==0.34.0 # via tensorflow termcolor==2.4.0 # via fire, tensorflow -tqdm==4.66.1 # via deepface, gdown -typing-extensions==4.9.0 # via tensorflow -tzdata==2023.4 # via pandas -urllib3==2.1.0 # via requests -werkzeug==3.0.1 # via flask, tensorboard -wheel==0.42.0 # via astunparse -wrapt==1.14.1 # via tensorflow +tqdm==4.66.4 # via deepface, gdown +typing-extensions==4.11.0 # via tensorflow +tzdata==2024.1 # via pandas +urllib3==2.2.1 # via requests +werkzeug==3.0.2 # via flask, tensorboard +wheel==0.43.0 # via astunparse, tensorboard +wrapt==1.16.0 # via tensorflow +zipp==3.18.1 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: # setuptools diff --git a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py index ce9c4771b..fe822e94b 100644 --- a/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py +++ b/common/vision/lasr_vision_deepface/src/lasr_vision_deepface/deepface.py @@ -9,42 +9,26 @@ import pandas as pd from lasr_vision_msgs.msg import Detection -from lasr_vision_msgs.srv import RecogniseRequest, RecogniseResponse +from lasr_vision_msgs.srv import ( + RecogniseRequest, + RecogniseResponse, + DetectFacesRequest, + DetectFacesResponse, +) from sensor_msgs.msg import Image +from typing import Union + DATASET_ROOT = os.path.join( rospkg.RosPack().get_path("lasr_vision_deepface"), "datasets" ) -Mat = int # np.typing.NDArray[np.uint8] - - -def detect_face(cv_im: Mat) -> Mat | None: - try: - faces = DeepFace.extract_faces( - cv_im, - target_size=(224, 244), - detector_backend="mtcnn", - enforce_detection=True, - ) - except ValueError: - return None - facial_area = faces[0]["facial_area"] - x, y, w, h = facial_area["x"], facial_area["y"], facial_area["w"], facial_area["h"] - - # add padding to the face - x -= 10 - y -= 10 - w += 20 - h += 20 - - return cv_im[:][y : y + h, x : x + w] +Mat = np.ndarray def create_image_collage(images, output_size=(640, 480)): - # Calculate grid dimensions num_images = len(images) rows = int(np.sqrt(num_images)) @@ -75,12 +59,34 @@ def create_image_collage(images, output_size=(640, 480)): return grid_image +def _extract_face(cv_im: Mat) -> Union[Mat, None]: + try: + faces = DeepFace.extract_faces( + cv_im, + target_size=(224, 244), + detector_backend="mtcnn", + enforce_detection=True, + ) + except ValueError: + return None + facial_area = faces[0]["facial_area"] + x, y, w, h = facial_area["x"], facial_area["y"], facial_area["w"], facial_area["h"] + + # add padding to the face + x -= 10 + y -= 10 + w += 20 + h += 20 + + return cv_im[:][y : y + h, x : x + w] + + def create_dataset( topic: str, dataset: str, name: str, size: int, - debug_publisher: rospy.Publisher | None, + debug_publisher: Union[rospy.Publisher, None], ) -> None: dataset_path = os.path.join(DATASET_ROOT, dataset, name) if not os.path.exists(dataset_path): @@ -91,7 +97,7 @@ def create_dataset( for i in range(size): img_msg = rospy.wait_for_message(topic, Image) cv_im = cv2_img.msg_to_cv2_img(img_msg) - face_cropped_cv_im = detect_face(cv_im) + face_cropped_cv_im = _extract_face(cv_im) if face_cropped_cv_im is None: continue cv2.imwrite(os.path.join(dataset_path, f"{name}_{i+1}.png"), face_cropped_cv_im) # type: ignore @@ -112,11 +118,11 @@ def create_dataset( ) -def detect( +def recognise( request: RecogniseRequest, - debug_publisher: rospy.Publisher | None, - debug_inference_pub: rospy.Publisher | None, - cropped_detect_pub: rospy.Publisher | None, + debug_publisher: Union[rospy.Publisher, None], + debug_inference_pub: Union[rospy.Publisher, None], + cropped_detect_pub: Union[rospy.Publisher, None], ) -> RecogniseResponse: # Decode the image rospy.loginfo("Decoding") @@ -185,3 +191,51 @@ def detect( ) return response + + +def detect_faces( + request: DetectFacesRequest, + debug_publisher: Union[rospy.Publisher, None], +) -> DetectFacesResponse: + + cv_im = cv2_img.msg_to_cv2_img(request.image_raw) + + response = DetectFacesResponse() + + try: + faces = DeepFace.extract_faces( + cv_im, detector_backend="mtcnn", enforce_detection=True + ) + except ValueError: + return response + + for i, face in enumerate(faces): + detection = Detection() + detection.name = f"face_{i}" + x, y, w, h = ( + face["facial_area"]["x"], + face["facial_area"]["y"], + face["facial_area"]["w"], + face["facial_area"]["h"], + ) + detection.xywh = [x, y, w, h] + detection.confidence = 1.0 + response.detections.append(detection) + + # Draw bounding boxes and labels for debugging + cv2.rectangle(cv_im, (x, y), (x + w, y + h), (0, 0, 255), 2) + cv2.putText( + cv_im, + f"face_{i}", + (x, y - 5), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + (0, 255, 0), + 2, + ) + + # publish to debug topic + if debug_publisher is not None: + debug_publisher.publish(cv2_img.cv2_img_to_msg(cv_im)) + + return response diff --git a/common/vision/lasr_vision_feature_extraction/setup.py b/common/vision/lasr_vision_feature_extraction/setup.py index 9df7d2505..a9cae4108 100644 --- a/common/vision/lasr_vision_feature_extraction/setup.py +++ b/common/vision/lasr_vision_feature_extraction/setup.py @@ -4,8 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_vision_feature_extraction'], - package_dir={'': 'src'} + packages=["lasr_vision_feature_extraction"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/__init__.py b/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/__init__.py index 19c4e1cc7..faaccda12 100644 --- a/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/__init__.py +++ b/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/__init__.py @@ -1,5 +1,11 @@ -from lasr_vision_feature_extraction.categories_and_attributes import CategoriesAndAttributes, CelebAMaskHQCategoriesAndAttributes -from lasr_vision_feature_extraction.image_with_masks_and_attributes import ImageWithMasksAndAttributes, ImageOfPerson +from lasr_vision_feature_extraction.categories_and_attributes import ( + CategoriesAndAttributes, + CelebAMaskHQCategoriesAndAttributes, +) +from lasr_vision_feature_extraction.image_with_masks_and_attributes import ( + ImageWithMasksAndAttributes, + ImageOfPerson, +) import numpy as np import cv2 @@ -19,7 +25,8 @@ def X2conv(in_channels, out_channels, inner_channels=None): nn.ReLU(inplace=True), nn.Conv2d(inner_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True)) + nn.ReLU(inplace=True), + ) return down_conv @@ -32,7 +39,12 @@ def __init__(self, in_channels, skip_channels, out_channels): def forward(self, x_copy, x): x = self.up(x) if x.size(2) != x_copy.size(2) or x.size(3) != x_copy.size(3): - x = F.interpolate(x, size=(x_copy.size(2), x_copy.size(3)), mode='bilinear', align_corners=True) + x = F.interpolate( + x, + size=(x_copy.size(2), x_copy.size(3)), + mode="bilinear", + align_corners=True, + ) x = torch.cat((x_copy, x), dim=1) x = self.up_conv(x) return x @@ -42,11 +54,17 @@ class UNetWithResnetEncoder(nn.Module): def __init__(self, num_classes, in_channels=3, freeze_bn=False, sigmoid=True): super(UNetWithResnetEncoder, self).__init__() self.sigmoid = sigmoid - self.resnet = models.resnet34(pretrained=False) # Initialize with a ResNet model + self.resnet = models.resnet34( + pretrained=False + ) # Initialize with a ResNet model if in_channels != 3: - self.resnet.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.resnet.conv1 = nn.Conv2d( + in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False + ) - self.encoder1 = nn.Sequential(self.resnet.conv1, self.resnet.bn1, self.resnet.relu) + self.encoder1 = nn.Sequential( + self.resnet.conv1, self.resnet.bn1, self.resnet.relu + ) self.encoder2 = self.resnet.layer1 self.encoder3 = self.resnet.layer2 self.encoder4 = self.resnet.layer3 @@ -84,7 +102,9 @@ def forward(self, x): x = self.up2(x3, x) x = self.up3(x2, x) x = self.up4(x1, x) - x = F.interpolate(x, size=(x.size(2) * 2, x.size(3) * 2), mode='bilinear', align_corners=True) + x = F.interpolate( + x, size=(x.size(2) * 2, x.size(3) * 2), mode="bilinear", align_corners=True + ) x = self.final_conv(x) @@ -110,7 +130,9 @@ def __init__(self, num_labels, input_channels=3, sigmoid=True): self.sigmoid = sigmoid if input_channels != 3: - self.model.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.model.conv1 = nn.Conv2d( + input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False + ) num_ftrs = self.model.fc.in_features @@ -124,7 +146,9 @@ def forward(self, x): class CombinedModel(nn.Module): - def __init__(self, segment_model: nn.Module, predict_model: nn.Module, cat_layers: int=None): + def __init__( + self, segment_model: nn.Module, predict_model: nn.Module, cat_layers: int = None + ): super(CombinedModel, self).__init__() self.segment_model = segment_model self.predict_model = predict_model @@ -135,7 +159,7 @@ def forward(self, x: torch.Tensor): seg_masks = self.segment_model(x) seg_masks_ = seg_masks.detach() if self.cat_layers: - seg_masks_ = seg_masks_[:, 0:self.cat_layers] + seg_masks_ = seg_masks_[:, 0 : self.cat_layers] x = torch.cat((x, seg_masks_), dim=1) else: x = torch.cat((x, seg_masks_), dim=1) @@ -149,29 +173,43 @@ def unfreeze_segment_model(self): self.segment_model.train() - class Predictor: - def __init__(self, model: torch.nn.Module, device: torch.device, categories_and_attributes: CategoriesAndAttributes): + def __init__( + self, + model: torch.nn.Module, + device: torch.device, + categories_and_attributes: CategoriesAndAttributes, + ): self.model = model self.device = device self.categories_and_attributes = categories_and_attributes self._thresholds_mask: list[float] = [] self._thresholds_pred: list[float] = [] - for key in sorted(list(self.categories_and_attributes.merged_categories.keys())): - self._thresholds_mask.append(self.categories_and_attributes.thresholds_mask[key]) + for key in sorted( + list(self.categories_and_attributes.merged_categories.keys()) + ): + self._thresholds_mask.append( + self.categories_and_attributes.thresholds_mask[key] + ) for attribute in self.categories_and_attributes.attributes: if attribute not in self.categories_and_attributes.avoided_attributes: - self._thresholds_pred.append(self.categories_and_attributes.thresholds_pred[attribute]) + self._thresholds_pred.append( + self.categories_and_attributes.thresholds_pred[attribute] + ) def predict(self, rgb_image: np.ndarray) -> ImageWithMasksAndAttributes: mean_val = np.mean(rgb_image) - image_tensor = torch.from_numpy(rgb_image).permute(2, 0, 1).unsqueeze(0).float() / 255.0 + image_tensor = ( + torch.from_numpy(rgb_image).permute(2, 0, 1).unsqueeze(0).float() / 255.0 + ) pred_masks, pred_classes = self.model(image_tensor) # Apply binary erosion and dilation to the masks pred_masks = binary_erosion_dilation( - pred_masks, thresholds=self._thresholds_mask, - erosion_iterations=1, dilation_iterations=1 + pred_masks, + thresholds=self._thresholds_mask, + erosion_iterations=1, + dilation_iterations=1, ) pred_masks = pred_masks.detach().squeeze(0).numpy().astype(np.uint8) mask_list = [pred_masks[i, :, :] for i in range(pred_masks.shape[0])] @@ -190,22 +228,35 @@ def predict(self, rgb_image: np.ndarray) -> ImageWithMasksAndAttributes: attribute_dict[attribute] = class_list_iter.__next__() for attribute in self.categories_and_attributes.mask_labels: attribute_dict[attribute] = class_list_iter.__next__() - image_obj = ImageWithMasksAndAttributes(rgb_image, mask_dict, attribute_dict, self.categories_and_attributes) + image_obj = ImageWithMasksAndAttributes( + rgb_image, mask_dict, attribute_dict, self.categories_and_attributes + ) return image_obj def load_face_classifier_model(): cat_layers = CelebAMaskHQCategoriesAndAttributes.merged_categories.keys().__len__() segment_model = UNetWithResnetEncoder(num_classes=cat_layers) - predictions = len(CelebAMaskHQCategoriesAndAttributes.attributes) - len( - CelebAMaskHQCategoriesAndAttributes.avoided_attributes) + len(CelebAMaskHQCategoriesAndAttributes.mask_labels) - predict_model = MultiLabelResNet(num_labels=predictions, input_channels=cat_layers + 3) + predictions = ( + len(CelebAMaskHQCategoriesAndAttributes.attributes) + - len(CelebAMaskHQCategoriesAndAttributes.avoided_attributes) + + len(CelebAMaskHQCategoriesAndAttributes.mask_labels) + ) + predict_model = MultiLabelResNet( + num_labels=predictions, input_channels=cat_layers + 3 + ) model = CombinedModel(segment_model, predict_model, cat_layers=cat_layers) model.eval() r = rospkg.RosPack() - model, _, _, _ = load_torch_model(model, None, path=path.join(r.get_path( - "lasr_vision_feature_extraction"), "models", "model.pth"), cpu_only=True) + model, _, _, _ = load_torch_model( + model, + None, + path=path.join( + r.get_path("lasr_vision_feature_extraction"), "models", "model.pth" + ), + cpu_only=True, + ) return model @@ -218,7 +269,12 @@ def pad_image_to_even_dims(image): width_pad = 0 if width % 2 == 0 else 1 # Pad the image. Pad the bottom and right side of the image - padded_image = np.pad(image, ((0, height_pad), (0, width_pad), (0, 0)), mode='constant', constant_values=0) + padded_image = np.pad( + image, + ((0, height_pad), (0, width_pad), (0, 0)), + mode="constant", + constant_values=0, + ) return padded_image @@ -226,7 +282,7 @@ def pad_image_to_even_dims(image): def extract_mask_region(frame, mask, expand_x=0.5, expand_y=0.5): """ Extracts the face region from the image and expands the region by the specified amount. - + :param frame: The source image. :param mask: The mask with the face part. :param expand_x: The percentage to expand the width of the bounding box. @@ -250,12 +306,14 @@ def extract_mask_region(frame, mask, expand_x=0.5, expand_y=0.5): new_w = min(frame.shape[1] - x, new_w) new_h = min(frame.shape[0] - y, new_h) - face_region = frame[y:y+int(new_h), x:x+int(new_w)] + face_region = frame[y : y + int(new_h), x : x + int(new_w)] return face_region return None -def predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask, predictor): +def predict_frame( + head_frame, torso_frame, full_frame, head_mask, torso_mask, predictor +): full_frame = cv2.cvtColor(full_frame, cv2.COLOR_BGR2RGB) head_frame = cv2.cvtColor(head_frame, cv2.COLOR_BGR2RGB) torso_frame = cv2.cvtColor(torso_frame, cv2.COLOR_BGR2RGB) @@ -270,18 +328,20 @@ def predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask, pr def load_torch_model(model, optimizer, path="model.pth", cpu_only=False): if cpu_only: - checkpoint = torch.load(path, map_location=torch.device('cpu')) + checkpoint = torch.load(path, map_location=torch.device("cpu")) else: checkpoint = torch.load(path) - model.load_state_dict(checkpoint['model_state_dict']) + model.load_state_dict(checkpoint["model_state_dict"]) if optimizer is not None: - optimizer.load_state_dict(checkpoint['optimizer_state_dict']) - epoch = checkpoint['epoch'] - best_val_loss = checkpoint.get('best_val_loss', float('inf')) + optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + epoch = checkpoint["epoch"] + best_val_loss = checkpoint.get("best_val_loss", float("inf")) return model, optimizer, epoch, best_val_loss -def binary_erosion_dilation(tensor, thresholds, erosion_iterations=1, dilation_iterations=1): +def binary_erosion_dilation( + tensor, thresholds, erosion_iterations=1, dilation_iterations=1 +): """ Apply binary threshold, followed by erosion and dilation to a tensor. @@ -294,19 +354,20 @@ def binary_erosion_dilation(tensor, thresholds, erosion_iterations=1, dilation_i # Check if the length of thresholds matches the number of channels if len(thresholds) != tensor.size(1): - raise ValueError( - "Length of thresholds must match the number of channels") + raise ValueError("Length of thresholds must match the number of channels") # Binary thresholding for i, threshold in enumerate(thresholds): - tensor[:, i] = (tensor[:, i] > threshold/2).float() / 4 + tensor[:, i] = (tensor[:, i] > threshold / 2).float() / 4 tensor[:, i] += (tensor[:, i] > threshold).float() tensor[:, i] /= max(tensor[:, i].clone()) # Define the 3x3 kernel for erosion and dilation - kernel = torch.tensor([[1, 1, 1], - [1, 1, 1], - [1, 1, 1]], dtype=torch.float32).unsqueeze(0).unsqueeze(0) + kernel = ( + torch.tensor([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=torch.float32) + .unsqueeze(0) + .unsqueeze(0) + ) # Replicate the kernel for each channel kernel = kernel.repeat(tensor.size(1), 1, 1, 1).to(tensor.device) @@ -320,8 +381,7 @@ def binary_erosion_dilation(tensor, thresholds, erosion_iterations=1, dilation_i # Dilation for _ in range(dilation_iterations): # 3x3 convolution with groups - tensor_dilated = F.conv2d( - tensor, kernel, padding=1, groups=tensor.size(1)) + tensor_dilated = F.conv2d(tensor, kernel, padding=1, groups=tensor.size(1)) # Combine the original and dilated tensors tensor = torch.clamp(tensor + tensor_dilated, 0, 1) @@ -330,8 +390,9 @@ def binary_erosion_dilation(tensor, thresholds, erosion_iterations=1, dilation_i def median_color_float(rgb_image: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: mask = mask.bool() - median_colors = torch.zeros((rgb_image.size(0), mask.size( - 1), rgb_image.size(1)), device=rgb_image.device) + median_colors = torch.zeros( + (rgb_image.size(0), mask.size(1), rgb_image.size(1)), device=rgb_image.device + ) for i in range(rgb_image.size(0)): for j in range(mask.size(1)): for k in range(rgb_image.size(1)): @@ -342,4 +403,3 @@ def median_color_float(rgb_image: torch.Tensor, mask: torch.Tensor) -> torch.Ten median_value = torch.tensor(0.0, device=rgb_image.device) median_colors[i, j, k] = median_value return median_colors # / 255.0 - diff --git a/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/categories_and_attributes.py b/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/categories_and_attributes.py index d03f51cf9..d05a5ccdb 100644 --- a/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/categories_and_attributes.py +++ b/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/categories_and_attributes.py @@ -11,13 +11,44 @@ class CategoriesAndAttributes: class CelebAMaskHQCategoriesAndAttributes(CategoriesAndAttributes): - mask_categories = ['cloth', 'r_ear', 'hair', 'l_brow', 'l_eye', 'l_lip', 'mouth', 'neck', 'nose', 'r_brow', - 'r_ear', 'r_eye', 'skin', 'u_lip', 'hat', 'l_ear', 'neck_l', 'eye_g', ] + mask_categories = [ + "cloth", + "r_ear", + "hair", + "l_brow", + "l_eye", + "l_lip", + "mouth", + "neck", + "nose", + "r_brow", + "r_ear", + "r_eye", + "skin", + "u_lip", + "hat", + "l_ear", + "neck_l", + "eye_g", + ] merged_categories = { - 'ear': ['l_ear', 'r_ear',], - 'brow': ['l_brow', 'r_brow',], - 'eye': ['l_eye', 'r_eye',], - 'mouth': ['l_lip', 'u_lip', 'mouth',], + "ear": [ + "l_ear", + "r_ear", + ], + "brow": [ + "l_brow", + "r_brow", + ], + "eye": [ + "l_eye", + "r_eye", + ], + "mouth": [ + "l_lip", + "u_lip", + "mouth", + ], } _categories_to_merge = [] for key in sorted(list(merged_categories.keys())): @@ -26,24 +57,100 @@ class CelebAMaskHQCategoriesAndAttributes(CategoriesAndAttributes): for key in mask_categories: if key not in _categories_to_merge: merged_categories[key] = [key] - mask_labels = ['hair'] + mask_labels = ["hair"] selective_attributes = { - 'facial_hair': ['5_o_Clock_Shadow', 'Goatee', 'Mustache', 'No_Beard', 'Sideburns', ], - 'hair_colour': ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair', ], - 'hair_shape': ['Straight_Hair', 'Wavy_Hair', ] + "facial_hair": [ + "5_o_Clock_Shadow", + "Goatee", + "Mustache", + "No_Beard", + "Sideburns", + ], + "hair_colour": [ + "Black_Hair", + "Blond_Hair", + "Brown_Hair", + "Gray_Hair", + ], + "hair_shape": [ + "Straight_Hair", + "Wavy_Hair", + ], } - plane_attributes = ['Bangs', 'Eyeglasses', 'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Necklace', - 'Wearing_Necktie', 'Male', ] - avoided_attributes = ['Arched_Eyebrows', 'Bags_Under_Eyes', 'Big_Lips', 'Big_Nose', 'Bushy_Eyebrows', 'Chubby', - 'Double_Chin', 'High_Cheekbones', 'Narrow_Eyes', 'Oval_Face', 'Pointy_Nose', - 'Receding_Hairline', 'Rosy_Cheeks', 'Heavy_Makeup', 'Wearing_Lipstick', 'Attractive', - 'Blurry', 'Mouth_Slightly_Open', 'Pale_Skin', 'Smiling', 'Young', ] - attributes = ["5_o_Clock_Shadow", "Arched_Eyebrows", "Attractive", "Bags_Under_Eyes", "Bald", "Bangs", "Big_Lips", - "Big_Nose", "Black_Hair", "Blond_Hair", "Blurry", "Brown_Hair", "Bushy_Eyebrows", "Chubby", - "Double_Chin", "Eyeglasses", "Goatee", "Gray_Hair", "Heavy_Makeup", "High_Cheekbones", "Male", - "Mouth_Slightly_Open", "Mustache", "Narrow_Eyes", "No_Beard", "Oval_Face", "Pale_Skin", "Pointy_Nose", - "Receding_Hairline", "Rosy_Cheeks", "Sideburns", "Smiling", "Straight_Hair", "Wavy_Hair", - "Wearing_Earrings", "Wearing_Hat", "Wearing_Lipstick", "Wearing_Necklace", "Wearing_Necktie", "Young"] + plane_attributes = [ + "Bangs", + "Eyeglasses", + "Wearing_Earrings", + "Wearing_Hat", + "Wearing_Necklace", + "Wearing_Necktie", + "Male", + ] + avoided_attributes = [ + "Arched_Eyebrows", + "Bags_Under_Eyes", + "Big_Lips", + "Big_Nose", + "Bushy_Eyebrows", + "Chubby", + "Double_Chin", + "High_Cheekbones", + "Narrow_Eyes", + "Oval_Face", + "Pointy_Nose", + "Receding_Hairline", + "Rosy_Cheeks", + "Heavy_Makeup", + "Wearing_Lipstick", + "Attractive", + "Blurry", + "Mouth_Slightly_Open", + "Pale_Skin", + "Smiling", + "Young", + ] + attributes = [ + "5_o_Clock_Shadow", + "Arched_Eyebrows", + "Attractive", + "Bags_Under_Eyes", + "Bald", + "Bangs", + "Big_Lips", + "Big_Nose", + "Black_Hair", + "Blond_Hair", + "Blurry", + "Brown_Hair", + "Bushy_Eyebrows", + "Chubby", + "Double_Chin", + "Eyeglasses", + "Goatee", + "Gray_Hair", + "Heavy_Makeup", + "High_Cheekbones", + "Male", + "Mouth_Slightly_Open", + "Mustache", + "Narrow_Eyes", + "No_Beard", + "Oval_Face", + "Pale_Skin", + "Pointy_Nose", + "Receding_Hairline", + "Rosy_Cheeks", + "Sideburns", + "Smiling", + "Straight_Hair", + "Wavy_Hair", + "Wearing_Earrings", + "Wearing_Hat", + "Wearing_Lipstick", + "Wearing_Necklace", + "Wearing_Necktie", + "Young", + ] thresholds_mask: dict[str, float] = {} thresholds_pred: dict[str, float] = {} @@ -55,8 +162,8 @@ class CelebAMaskHQCategoriesAndAttributes(CategoriesAndAttributes): thresholds_pred[key] = 0.5 # set specific thresholds: - thresholds_mask['eye_g'] = 0.25 - thresholds_pred['Eyeglasses'] = 0.25 - thresholds_pred['Wearing_Earrings'] = 0.5 - thresholds_pred['Wearing_Necklace'] = 0.5 - thresholds_pred['Wearing_Necktie'] = 0.5 + thresholds_mask["eye_g"] = 0.25 + thresholds_pred["Eyeglasses"] = 0.25 + thresholds_pred["Wearing_Earrings"] = 0.5 + thresholds_pred["Wearing_Necklace"] = 0.5 + thresholds_pred["Wearing_Necktie"] = 0.5 diff --git a/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/image_with_masks_and_attributes.py b/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/image_with_masks_and_attributes.py index 57858fc12..c5df408a4 100644 --- a/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/image_with_masks_and_attributes.py +++ b/common/vision/lasr_vision_feature_extraction/src/lasr_vision_feature_extraction/image_with_masks_and_attributes.py @@ -1,5 +1,7 @@ import numpy as np -from lasr_vision_feature_extraction.categories_and_attributes import CategoriesAndAttributes +from lasr_vision_feature_extraction.categories_and_attributes import ( + CategoriesAndAttributes, +) import json @@ -16,29 +18,42 @@ def _softmax(x: list[float]) -> list[float]: def _exp(x): """Compute e^x for a given x. A simple implementation of the exponential function.""" - return 2.718281828459045 ** x # Using an approximation of Euler's number e + return 2.718281828459045**x # Using an approximation of Euler's number e class ImageWithMasksAndAttributes: - def __init__(self, image: np.ndarray, masks: dict[str, np.ndarray], attributes: dict[str, float], - categories_and_attributes: CategoriesAndAttributes): + def __init__( + self, + image: np.ndarray, + masks: dict[str, np.ndarray], + attributes: dict[str, float], + categories_and_attributes: CategoriesAndAttributes, + ): self.image: np.ndarray = image self.masks: dict[str, np.ndarray] = masks self.attributes: dict[str, float] = attributes - self.categories_and_attributes: CategoriesAndAttributes = categories_and_attributes + self.categories_and_attributes: CategoriesAndAttributes = ( + categories_and_attributes + ) self.plane_attribute_dict: dict[str, float] = {} for attribute in self.categories_and_attributes.plane_attributes: self.plane_attribute_dict[attribute] = self.attributes[attribute] self.selective_attribute_dict: dict[str, dict[str, float]] = {} - for category in sorted(list(self.categories_and_attributes.selective_attributes.keys())): + for category in sorted( + list(self.categories_and_attributes.selective_attributes.keys()) + ): self.selective_attribute_dict[category] = {} temp_list: list[float] = [] - for attribute in self.categories_and_attributes.selective_attributes[category]: + for attribute in self.categories_and_attributes.selective_attributes[ + category + ]: temp_list.append(self.attributes[attribute]) softmax_list = _softmax(temp_list) - for i, attribute in enumerate(self.categories_and_attributes.selective_attributes[category]): + for i, attribute in enumerate( + self.categories_and_attributes.selective_attributes[category] + ): self.selective_attribute_dict[category][attribute] = softmax_list[i] def describe(self) -> str: @@ -52,108 +67,142 @@ def _max_value_tuple(some_dict: dict[str, float]) -> tuple[str, float]: class ImageOfPerson(ImageWithMasksAndAttributes): - def __init__(self, image: np.ndarray, masks: dict[str, np.ndarray], attributes: dict[str, float], - categories_and_attributes: CategoriesAndAttributes): + def __init__( + self, + image: np.ndarray, + masks: dict[str, np.ndarray], + attributes: dict[str, float], + categories_and_attributes: CategoriesAndAttributes, + ): super().__init__(image, masks, attributes, categories_and_attributes) @classmethod - def from_parent_instance(cls, parent_instance: ImageWithMasksAndAttributes) -> 'ImageOfPerson': + def from_parent_instance( + cls, parent_instance: ImageWithMasksAndAttributes + ) -> "ImageOfPerson": """ Creates an instance of ImageOfPerson using the properties of an instance of ImageWithMasksAndAttributes. """ - return cls(image=parent_instance.image, - masks=parent_instance.masks, - attributes=parent_instance.attributes, - categories_and_attributes=parent_instance.categories_and_attributes) + return cls( + image=parent_instance.image, + masks=parent_instance.masks, + attributes=parent_instance.attributes, + categories_and_attributes=parent_instance.categories_and_attributes, + ) def describe(self) -> str: - male = (self.attributes['Male'] > self.categories_and_attributes.thresholds_pred['Male'], self.attributes['Male']) + male = ( + self.attributes["Male"] + > self.categories_and_attributes.thresholds_pred["Male"], + self.attributes["Male"], + ) has_hair = ( - self.attributes['hair'] > self.categories_and_attributes.thresholds_pred['hair'], self.attributes['hair']) - hair_colour = _max_value_tuple(self.selective_attribute_dict['hair_colour']) - hair_shape = _max_value_tuple(self.selective_attribute_dict['hair_shape']) - facial_hair = _max_value_tuple(self.selective_attribute_dict['facial_hair']) + self.attributes["hair"] + > self.categories_and_attributes.thresholds_pred["hair"], + self.attributes["hair"], + ) + hair_colour = _max_value_tuple(self.selective_attribute_dict["hair_colour"]) + hair_shape = _max_value_tuple(self.selective_attribute_dict["hair_shape"]) + facial_hair = _max_value_tuple(self.selective_attribute_dict["facial_hair"]) # bangs = ( # self.attributes['Bangs'] > self.categories_and_attributes.thresholds_pred['Bangs'], # self.attributes['Bangs']) - hat = (self.attributes['Wearing_Hat'] > self.categories_and_attributes.thresholds_pred['Wearing_Hat'], - self.attributes['Wearing_Hat']) - glasses = (self.attributes['Eyeglasses'] > self.categories_and_attributes.thresholds_pred['Eyeglasses'], - self.attributes['Eyeglasses']) + hat = ( + self.attributes["Wearing_Hat"] + > self.categories_and_attributes.thresholds_pred["Wearing_Hat"], + self.attributes["Wearing_Hat"], + ) + glasses = ( + self.attributes["Eyeglasses"] + > self.categories_and_attributes.thresholds_pred["Eyeglasses"], + self.attributes["Eyeglasses"], + ) earrings = ( - self.attributes['Wearing_Earrings'] > self.categories_and_attributes.thresholds_pred['Wearing_Earrings'], - self.attributes['Wearing_Earrings']) + self.attributes["Wearing_Earrings"] + > self.categories_and_attributes.thresholds_pred["Wearing_Earrings"], + self.attributes["Wearing_Earrings"], + ) necklace = ( - self.attributes['Wearing_Necklace'] > self.categories_and_attributes.thresholds_pred['Wearing_Necklace'], - self.attributes['Wearing_Necklace']) + self.attributes["Wearing_Necklace"] + > self.categories_and_attributes.thresholds_pred["Wearing_Necklace"], + self.attributes["Wearing_Necklace"], + ) necktie = ( - self.attributes['Wearing_Necktie'] > self.categories_and_attributes.thresholds_pred['Wearing_Necktie'], - self.attributes['Wearing_Necktie']) + self.attributes["Wearing_Necktie"] + > self.categories_and_attributes.thresholds_pred["Wearing_Necktie"], + self.attributes["Wearing_Necktie"], + ) description = "This customer has " - hair_colour_str = 'None' - hair_shape_str = 'None' + hair_colour_str = "None" + hair_shape_str = "None" if has_hair[0]: - hair_shape_str = '' - if hair_shape[0] == 'Straight_Hair': - hair_shape_str = 'straight' - elif hair_shape[0] == 'Wavy_Hair': - hair_shape_str = 'wavy' - if hair_colour[0] == 'Black_Hair': - description += 'black %s hair, ' % hair_shape_str - hair_colour_str = 'black' - elif hair_colour[0] == 'Blond_Hair': - description += 'blond %s hair, ' % hair_shape_str - hair_colour_str = 'blond' - elif hair_colour[0] == 'Brown_Hair': - description += 'brown %s hair, ' % hair_shape_str - hair_colour_str = 'brown' - elif hair_colour[0] == 'Gray_Hair': - description += 'gray %s hair, ' % hair_shape_str - hair_colour_str = 'gray' - - if male: # here 'male' is only used to determine whether it is confident to decide whether the person has beard - if not facial_hair[0] == 'No_Beard': - description += 'and has beard. ' + hair_shape_str = "" + if hair_shape[0] == "Straight_Hair": + hair_shape_str = "straight" + elif hair_shape[0] == "Wavy_Hair": + hair_shape_str = "wavy" + if hair_colour[0] == "Black_Hair": + description += "black %s hair, " % hair_shape_str + hair_colour_str = "black" + elif hair_colour[0] == "Blond_Hair": + description += "blond %s hair, " % hair_shape_str + hair_colour_str = "blond" + elif hair_colour[0] == "Brown_Hair": + description += "brown %s hair, " % hair_shape_str + hair_colour_str = "brown" + elif hair_colour[0] == "Gray_Hair": + description += "gray %s hair, " % hair_shape_str + hair_colour_str = "gray" + + if ( + male + ): # here 'male' is only used to determine whether it is confident to decide whether the person has beard + if not facial_hair[0] == "No_Beard": + description += "and has beard. " if hat[0] or glasses[0]: - description += 'I can also see this customer wearing ' + description += "I can also see this customer wearing " if hat[0] and glasses[0]: - description += 'a hat and a pair of glasses. ' + description += "a hat and a pair of glasses. " elif hat[0]: - description += 'a hat. ' + description += "a hat. " else: - description += 'a pair of glasses. ' + description += "a pair of glasses. " if earrings[0] or necklace[0] or necktie[0]: - description += 'This customer might also wear ' + description += "This customer might also wear " wearables = [] if earrings[0]: - wearables.append('a pair of earrings') + wearables.append("a pair of earrings") if necklace[0]: - wearables.append('a necklace') + wearables.append("a necklace") if necktie[0]: - wearables.append('a necktie') - description += ", ".join(wearables[:-2] + [" and ".join(wearables[-2:])]) + '. ' + wearables.append("a necktie") + description += ( + ", ".join(wearables[:-2] + [" and ".join(wearables[-2:])]) + ". " + ) if description == "This customer has ": - description = "I didn't manage to get any attributes from this customer, I'm sorry." - + description = ( + "I didn't manage to get any attributes from this customer, I'm sorry." + ) + result = { - 'attributes': { - 'has_hair': has_hair[0], - 'hair_colour': hair_colour_str, - 'hair_shape': hair_shape_str, - 'male': male[0], - 'facial_hair': facial_hair[0], - 'hat': hat[0], - 'glasses': glasses[0], - 'earrings': earrings[0], - 'necklace': necklace[0], - 'necktie': necktie[0], + "attributes": { + "has_hair": has_hair[0], + "hair_colour": hair_colour_str, + "hair_shape": hair_shape_str, + "male": male[0], + "facial_hair": facial_hair[0], + "hat": hat[0], + "glasses": glasses[0], + "earrings": earrings[0], + "necklace": necklace[0], + "necktie": necktie[0], }, - 'description': description + "description": description, } result = json.dumps(result, indent=4) diff --git a/common/vision/lasr_vision_msgs/CMakeLists.txt b/common/vision/lasr_vision_msgs/CMakeLists.txt index 03a9d1748..911af4f06 100644 --- a/common/vision/lasr_vision_msgs/CMakeLists.txt +++ b/common/vision/lasr_vision_msgs/CMakeLists.txt @@ -48,6 +48,7 @@ add_message_files( Detection.msg Detection3D.msg BodyPixPose.msg + BodyPixKeypoint.msg BodyPixMask.msg BodyPixMaskRequest.msg ) @@ -63,6 +64,7 @@ add_service_files( Vqa.srv PointingDirection.srv TorchFaceFeatureDetectionDescription.srv + DetectFaces.srv ) # Generate actions in the 'action' folder @@ -203,4 +205,4 @@ include_directories( # endif() ## Add folders to be run by python nosetests -# catkin_add_nosetests(test) +# catkin_add_nosetests(test) \ No newline at end of file diff --git a/common/vision/lasr_vision_msgs/msg/BodyPixKeypoint.msg b/common/vision/lasr_vision_msgs/msg/BodyPixKeypoint.msg new file mode 100644 index 000000000..e875679af --- /dev/null +++ b/common/vision/lasr_vision_msgs/msg/BodyPixKeypoint.msg @@ -0,0 +1,12 @@ +# Keypoint.msg + +# int number of the parts following +# https://github.com/de-code/python-tf-bodypix/blob/develop/tf_bodypix/bodypix_js_utils/part_channels.py#L5 + +string part + +# the score of the body part +float64 score + +# the x and y coordinates of the body part +int32[] xy diff --git a/common/vision/lasr_vision_msgs/msg/BodyPixPose.msg b/common/vision/lasr_vision_msgs/msg/BodyPixPose.msg index 8c03dc6df..1ba8b9f7d 100644 --- a/common/vision/lasr_vision_msgs/msg/BodyPixPose.msg +++ b/common/vision/lasr_vision_msgs/msg/BodyPixPose.msg @@ -1,2 +1,2 @@ -# x and y coordinates -float32[] coord +# keypoints +BodyPixKeypoint[] keypoints \ No newline at end of file diff --git a/common/vision/lasr_vision_msgs/srv/DetectFaces.srv b/common/vision/lasr_vision_msgs/srv/DetectFaces.srv new file mode 100644 index 000000000..7bc137dad --- /dev/null +++ b/common/vision/lasr_vision_msgs/srv/DetectFaces.srv @@ -0,0 +1,3 @@ +sensor_msgs/Image image_raw +--- +lasr_vision_msgs/Detection[] detections diff --git a/common/vision/lasr_vision_pointing/requirements.txt b/common/vision/lasr_vision_pointing/requirements.txt index f3bb12a4b..e6ef834ab 100644 --- a/common/vision/lasr_vision_pointing/requirements.txt +++ b/common/vision/lasr_vision_pointing/requirements.txt @@ -1,24 +1,24 @@ absl-py==2.1.0 # via mediapipe attrs==23.2.0 # via mediapipe cffi==1.16.0 # via sounddevice -contourpy==1.2.0 # via matplotlib +contourpy==1.2.1 # via matplotlib cycler==0.12.1 # via matplotlib -flatbuffers==24.3.7 # via mediapipe -fonttools==4.50.0 # via matplotlib -jax==0.4.25 # via mediapipe +flatbuffers==24.3.25 # via mediapipe +fonttools==4.51.0 # via matplotlib +jax==0.4.26 # via mediapipe kiwisolver==1.4.5 # via matplotlib -matplotlib==3.8.3 # via mediapipe +matplotlib==3.8.4 # via mediapipe mediapipe==0.10.10 # via -r requirements.in -ml-dtypes==0.3.2 # via jax +ml-dtypes==0.4.0 # via jax numpy==1.26.4 # via contourpy, jax, matplotlib, mediapipe, ml-dtypes, opencv-contrib-python, opt-einsum, scipy opencv-contrib-python==4.9.0.80 # via mediapipe opt-einsum==3.3.0 # via jax packaging==24.0 # via matplotlib -pillow==10.2.0 # via matplotlib +pillow==10.3.0 # via matplotlib protobuf==3.20.3 # via mediapipe -pycparser==2.21 # via cffi +pycparser==2.22 # via cffi pyparsing==3.1.2 # via matplotlib python-dateutil==2.9.0.post0 # via matplotlib -scipy==1.12.0 # via jax +scipy==1.13.0 # via jax six==1.16.0 # via python-dateutil sounddevice==0.4.6 # via mediapipe diff --git a/common/vision/lasr_vision_pointing/scripts/detect_pointing.py b/common/vision/lasr_vision_pointing/scripts/detect_pointing.py index a3f1d64d4..b56e0d14e 100644 --- a/common/vision/lasr_vision_pointing/scripts/detect_pointing.py +++ b/common/vision/lasr_vision_pointing/scripts/detect_pointing.py @@ -6,12 +6,21 @@ import rospy import mediapipe as mp from cv_bridge import CvBridge -from lasr_vision_msgs.srv import YoloDetectionRequest, YoloDetection, PointingDirection, PointingDirectionResponse, PointingDirectionRequest +from lasr_vision_msgs.srv import ( + YoloDetectionRequest, + YoloDetection, + PointingDirection, + PointingDirectionResponse, + PointingDirectionRequest, +) + class PointingDetector: def __init__(self): - self.detect_service = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - self.service = rospy.Service('/pointing_detection_service', PointingDirection, self.excute) + self.detect_service = rospy.ServiceProxy("/yolov8/detect", YoloDetection) + self.service = rospy.Service( + "/pointing_detection_service", PointingDirection, self.excute + ) self.counter = 0 # Load MediaPipe Pose model @@ -25,24 +34,34 @@ def excute(self, req: PointingDirectionRequest) -> PointingDirectionResponse: if people_bounding_box: for person in people_bounding_box: - keypoints = self.detect_keypoints(img) # Detect keypoints using MediaPipe - direction = self.determine_pointing_direction(keypoints) + keypoints = self.detect_keypoints( + img + ) # Detect keypoints using MediaPipe + direction = self.determine_pointing_direction( + keypoints, buffer_width=25 + ) rospy.loginfo(f"Person detected pointing: {direction}") # Visualize pointing direction with landmarks - image_path = os.path.join(rospkg.RosPack().get_path('lasr_vision_pointing'), 'images', f'image{self.counter}.jpg') - self.visualize_pointing_direction_with_landmarks(image_path, person, direction, keypoints) + image_path = os.path.join( + rospkg.RosPack().get_path("lasr_vision_pointing"), + "images", + f"image{self.counter}.jpg", + ) + self.visualize_pointing_direction_with_landmarks( + image_path, person, direction, keypoints + ) resp.direction = direction else: - resp.direction = "Err" - + resp.direction = "NONE" + self.counter += 1 return resp def store_image(self, img): - package_path = rospkg.RosPack().get_path('lasr_vision_pointing') - os.chdir(os.path.abspath(os.path.join(package_path, 'images'))) + package_path = rospkg.RosPack().get_path("lasr_vision_pointing") + os.chdir(os.path.abspath(os.path.join(package_path, "images"))) cv2.imwrite(f"image{self.counter}.jpg", img) def detect_keypoints(self, img): @@ -62,17 +81,17 @@ def detect_keypoints(self, img): keypoints.append((x, y)) return keypoints - + def detection(self, img): request = YoloDetectionRequest() - request.image_raw = img # sensor_msgs/Image - request.dataset = "yolov8n-seg.pt" # YOLOv8 model, auto-downloads - request.confidence = 0.7 # minimum confidence to include in results - request.nms = 0.4 # non maximal supression + request.image_raw = img # sensor_msgs/Image + request.dataset = "yolov8n-seg.pt" # YOLOv8 model, auto-downloads + request.confidence = 0.7 # minimum confidence to include in results + request.nms = 0.4 # non maximal supression # send request response = self.detect_service(request) - + result = [] for detection in response.detected_objects: if detection.name == "person": @@ -83,65 +102,74 @@ def detection(self, img): def determine_pointing_direction(self, keypoints, buffer_width=50): # Ensure keypoints are available - if len(keypoints) >= 7: # Ensure we have at least 7 keypoints for the upper body + if ( + len(keypoints) >= 7 + ): # Ensure we have at least 7 keypoints for the upper body # Extract relevant keypoints for shoulders and wrists left_shoulder = keypoints[self.mp_pose.PoseLandmark.LEFT_SHOULDER.value] right_shoulder = keypoints[self.mp_pose.PoseLandmark.RIGHT_SHOULDER.value] left_wrist = keypoints[self.mp_pose.PoseLandmark.LEFT_WRIST.value] right_wrist = keypoints[self.mp_pose.PoseLandmark.RIGHT_WRIST.value] - + # Ensure all keypoints are detected if left_shoulder and right_shoulder and left_wrist and right_wrist: # Calculate the x-coordinate difference between shoulders and wrists left_diff = left_wrist[0] - left_shoulder[0] right_diff = right_shoulder[0] - right_wrist[0] - + # Determine pointing direction based on the difference in x-coordinates if abs(left_diff - right_diff) < buffer_width: - return "Front" + return "FORWARDS" elif abs(left_diff) > buffer_width and abs(left_diff) > abs(right_diff): - return "Left" if left_diff > 0 else "Right" - elif abs(right_diff) > buffer_width and abs(right_diff) > abs(left_diff): - return "Right" if right_diff > 0 else "Left" - + return "LEFT" if left_diff > 0 else "RIGHT" + elif abs(right_diff) > buffer_width and abs(right_diff) > abs( + left_diff + ): + return "RIGHT" if right_diff > 0 else "LEFT" + # Default: Determine direction based on the relative position to the center of the image - return "Front" - - def visualize_pointing_direction_with_landmarks(self, image_path, person_bbox, pointing_direction, keypoints): + return "NONE" + + def visualize_pointing_direction_with_landmarks( + self, image_path, person_bbox, pointing_direction, keypoints + ): # Load image img = cv2.imread(image_path) - + # Extract person bbox coordinates x, y, w, h = person_bbox # Calculate center of bbox center_x = x + w // 2 center_y = y + h // 2 - + # Calculate endpoint of arrow based on pointing direction arrow_length = min(w, h) // 2 - if pointing_direction == "Left": + if pointing_direction == "LEFT": endpoint = (center_x - arrow_length, center_y) - elif pointing_direction == "Right": + elif pointing_direction == "RIGHT": endpoint = (center_x + arrow_length, center_y) - else: + elif pointing_direction == "FORWARDS": endpoint = (center_x, center_y) - + else: + return # No pointing direction detected + # Draw arrow on image color = (0, 255, 0) # Green color thickness = 2 cv2.arrowedLine(img, (center_x, center_y), endpoint, color, thickness) - + # Draw landmarks (keypoints) on the image for keypoint in keypoints: if keypoint: cv2.circle(img, keypoint, 3, (0, 0, 255), -1) # Red color for landmarks - + # Store image with pointing direction visualization and landmarks output_image_path = f"landmark_image{self.counter}.jpg" # Change the output image path as needed cv2.imwrite(output_image_path, img) -if __name__ == '__main__': + +if __name__ == "__main__": rospy.init_node("pointing_detector") pointer = PointingDetector() rospy.loginfo("Pointing Detector is running") - rospy.spin() \ No newline at end of file + rospy.spin() diff --git a/common/vision/lasr_vision_pointing/setup.py b/common/vision/lasr_vision_pointing/setup.py index 627e86f08..e094797e8 100644 --- a/common/vision/lasr_vision_pointing/setup.py +++ b/common/vision/lasr_vision_pointing/setup.py @@ -4,8 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_vision_pointing'], - package_dir={'': 'src'} + packages=["lasr_vision_pointing"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/common/vision/lasr_vision_yolov8/setup.py b/common/vision/lasr_vision_yolov8/setup.py index 86898294f..c75738e76 100644 --- a/common/vision/lasr_vision_yolov8/setup.py +++ b/common/vision/lasr_vision_yolov8/setup.py @@ -1,12 +1,10 @@ - #!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_vision_yolov8'], - package_dir={'': 'src'} + packages=["lasr_vision_yolov8"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/documentation/scripts/generate_readme.py b/documentation/scripts/generate_readme.py index b0428443a..439046a48 100755 --- a/documentation/scripts/generate_readme.py +++ b/documentation/scripts/generate_readme.py @@ -4,7 +4,7 @@ # Determine package name if len(sys.argv) < 2: - print('Usage: rosrun documentation generate_readme.py ros_package_name') + print("Usage: rosrun documentation generate_readme.py ros_package_name") exit() else: package = sys.argv[1] diff --git a/documentation/setup.py b/documentation/setup.py index 8d037b2c7..bb5956069 100644 --- a/documentation/setup.py +++ b/documentation/setup.py @@ -1,12 +1,10 @@ - #!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['document_lasr'], - package_dir={'': 'src'} + packages=["document_lasr"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/documentation/src/document_lasr/document.py b/documentation/src/document_lasr/document.py index 49199f75d..c78ac9783 100644 --- a/documentation/src/document_lasr/document.py +++ b/documentation/src/document_lasr/document.py @@ -5,363 +5,391 @@ import xml.etree.ElementTree as ET from collections import OrderedDict + def generate_readme(pkg_dir): # Prepare the data data = { - 'package_name': None, - 'description': 'No description provided.', - 'maintainers': [], - 'authors': [], - 'dependencies': [] + "package_name": None, + "description": "No description provided.", + "maintainers": [], + "authors": [], + "dependencies": [], } # Load package.xml - tree = ET.parse(join(pkg_dir, 'package.xml')) + tree = ET.parse(join(pkg_dir, "package.xml")) for child in tree.getroot(): - if child.tag == 'name': - data['package_name'] = child.text - elif child.tag == 'description': - data['description'] = child.text - elif child.tag == 'maintainer': - data['maintainers'].append({ - 'name': child.text, - 'email': child.attrib['email'] - }) - elif child.tag == 'author': - data['authors'].append({ - 'name': child.text, - 'email': child.attrib['email'] - }) - elif child.tag in ['buildtool_depend', 'depend', 'build_depend', 'exec_depend', 'test_depend', 'doc_depend']: - data['dependencies'].append({ - 'package': child.text, - 'type': child.tag - }) + if child.tag == "name": + data["package_name"] = child.text + elif child.tag == "description": + data["description"] = child.text + elif child.tag == "maintainer": + data["maintainers"].append( + {"name": child.text, "email": child.attrib["email"]} + ) + elif child.tag == "author": + data["authors"].append({"name": child.text, "email": child.attrib["email"]}) + elif child.tag in [ + "buildtool_depend", + "depend", + "build_depend", + "exec_depend", + "test_depend", + "doc_depend", + ]: + data["dependencies"].append({"package": child.text, "type": child.tag}) # Generate maintainers list - MAINTAINERS = '''This package is maintained by:''' + MAINTAINERS = """This package is maintained by:""" - for maintainer in data['maintainers']: - name = maintainer['name'] - email = maintainer['email'] - MAINTAINERS += f'\n- [{name}](mailto:{email})' + for maintainer in data["maintainers"]: + name = maintainer["name"] + email = maintainer["email"] + MAINTAINERS += f"\n- [{name}](mailto:{email})" - data['maintainers'] = MAINTAINERS + data["maintainers"] = MAINTAINERS # Generate authors list - AUTHORS = '' + AUTHORS = "" + + if len(data["authors"]) > 0: + AUTHORS = """\nThe following people have contributed to this package:""" - if len(data['authors']) > 0: - AUTHORS = '''\nThe following people have contributed to this package:''' + for author in data["authors"]: + name = author["name"] + email = author["email"] + AUTHORS += f"\n- [{name}](mailto:{email})" - for author in data['authors']: - name = author['name'] - email = author['email'] - AUTHORS += f'\n- [{name}](mailto:{email})' - - AUTHORS += '\n' + AUTHORS += "\n" - data['authors'] = AUTHORS + data["authors"] = AUTHORS # Generate dependency list - DEPENDENCIES = 'This package has no additional ROS dependencies.' + DEPENDENCIES = "This package has no additional ROS dependencies." - if len(data['dependencies']) > 0: - DEPENDENCIES = '''This package depends on the following ROS packages:''' + if len(data["dependencies"]) > 0: + DEPENDENCIES = """This package depends on the following ROS packages:""" - for dependency in data['dependencies']: - name = dependency['package'] - depend_type = dependency['type'].replace('depend', '').replace('_', '') + for dependency in data["dependencies"]: + name = dependency["package"] + depend_type = dependency["type"].replace("depend", "").replace("_", "") if len(depend_type) > 0: - depend_type = f' ({depend_type})' + depend_type = f" ({depend_type})" - DEPENDENCIES += f'\n- {name}{depend_type}' + DEPENDENCIES += f"\n- {name}{depend_type}" # Try to detect required Python version if using catkin_virtualenv - with open(join(pkg_dir, 'CMakeLists.txt')) as f: + with open(join(pkg_dir, "CMakeLists.txt")) as f: cmakelists = f.read() - match = re.search(r'PYTHON_INTERPRETER ([\w\d\.]+)', cmakelists) + match = re.search(r"PYTHON_INTERPRETER ([\w\d\.]+)", cmakelists) if match is not None: python = match.group(1) - if python.startswith('python'): + if python.startswith("python"): version = python[6:] - python = f'Python {version}' + python = f"Python {version}" - DEPENDENCIES += f'\n\nThis packages requires {python} to be present.' + DEPENDENCIES += f"\n\nThis packages requires {python} to be present." # Generate Python dependency list - PATH = join(pkg_dir, 'requirements.txt') + PATH = join(pkg_dir, "requirements.txt") if exists(PATH): - LOCKED_PATH = join(pkg_dir, 'requirements.in') + LOCKED_PATH = join(pkg_dir, "requirements.in") LOCKED_DEPENDS = exists(LOCKED_PATH) python_requirements: list[str] all_requirements: list[str] if LOCKED_DEPENDS: - with open(LOCKED_PATH, 'r') as f: - python_requirements = f.read().strip().split('\n') - - with open(PATH, 'r') as f: - all_requirements = f.read().strip().split('\n') + with open(LOCKED_PATH, "r") as f: + python_requirements = f.read().strip().split("\n") + + with open(PATH, "r") as f: + all_requirements = f.read().strip().split("\n") else: - with open(PATH, 'r') as f: - python_requirements = f.read().strip().split('\n') + with open(PATH, "r") as f: + python_requirements = f.read().strip().split("\n") all_requirements = python_dependencies - + def split_depend(input: str) -> (str, str): # get rid of any garbage - input = input.strip().split(' ')[0] + input = input.strip().split(" ")[0] # skip comments and empty lines - if input.startswith('#') or len(input) == 0: + if input.startswith("#") or len(input) == 0: return None # determine package name and restriction - for restriction in ['==', '>', '<']: + for restriction in ["==", ">", "<"]: if restriction in input: name, ver = input.split(restriction) - return (name, f'{restriction}{ver}') + return (name, f"{restriction}{ver}") # otherwise just return everything as the package name return (input, None) python_dependencies = [ - split_depend(depend) for depend in python_requirements if split_depend(depend) is not None + split_depend(depend) + for depend in python_requirements + if split_depend(depend) is not None ] all_python_dependencies = [ - split_depend(depend) for depend in all_requirements if split_depend(depend) is not None + split_depend(depend) + for depend in all_requirements + if split_depend(depend) is not None ] - DEPENDENCIES += f'\n\nThis package has {len(all_python_dependencies)} Python dependencies:' + DEPENDENCIES += ( + f"\n\nThis package has {len(all_python_dependencies)} Python dependencies:" + ) - for (name, restriction) in python_dependencies: - DEPENDENCIES += f'\n- [{name}](https://pypi.org/project/{name}){restriction}' + for name, restriction in python_dependencies: + DEPENDENCIES += ( + f"\n- [{name}](https://pypi.org/project/{name}){restriction}" + ) hidden_dependencies = [ - name for (name, _) in all_python_dependencies if not any(name == pkg for (pkg, _) in python_dependencies) + name + for (name, _) in all_python_dependencies + if not any(name == pkg for (pkg, _) in python_dependencies) ] if len(hidden_dependencies) > 0: - DEPENDENCIES += f'\n- .. and {len(hidden_dependencies)} sub dependencies' + DEPENDENCIES += f"\n- .. and {len(hidden_dependencies)} sub dependencies" - data['dependencies'] = DEPENDENCIES + data["dependencies"] = DEPENDENCIES # Load additional markdown files - data['prerequisites'] = 'Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package!' - data['usage'] = 'Ask the package maintainer to write a `doc/USAGE.md` for their package!' - data['example'] = 'Ask the package maintainer to write a `doc/EXAMPLE.md` for their package!' - data['technical'] = 'Ask the package maintainer to write a `doc/TECHNICAL.md` for their package!' + data["prerequisites"] = ( + "Ask the package maintainer to write or create a blank `doc/PREREQUISITES.md` for their package!" + ) + data["usage"] = ( + "Ask the package maintainer to write a `doc/USAGE.md` for their package!" + ) + data["example"] = ( + "Ask the package maintainer to write a `doc/EXAMPLE.md` for their package!" + ) + data["technical"] = ( + "Ask the package maintainer to write a `doc/TECHNICAL.md` for their package!" + ) for key, file in [ - ('prerequisites', 'PREREQUISITES'), - ('usage', 'USAGE'), - ('example', 'EXAMPLE'), - ('technical', 'TECHNICAL') + ("prerequisites", "PREREQUISITES"), + ("usage", "USAGE"), + ("example", "EXAMPLE"), + ("technical", "TECHNICAL"), ]: - PATH = join(pkg_dir, f'doc/{file}.md') + PATH = join(pkg_dir, f"doc/{file}.md") if exists(PATH): - with open(PATH, 'r') as f: + with open(PATH, "r") as f: data[key] = f.read().strip() # Generate documentation for ROS-specific components - data['launch_files'] = 'This package has no launch files.\n' - data['messages'] = 'This package has no messages.\n' - data['services'] = 'This package has no services.\n' - data['actions'] = 'This package has no actions.\n' + data["launch_files"] = "This package has no launch files.\n" + data["messages"] = "This package has no messages.\n" + data["services"] = "This package has no services.\n" + data["actions"] = "This package has no actions.\n" # Generate launch file table - LAUNCH_FILES = join(pkg_dir, 'launch') + LAUNCH_FILES = join(pkg_dir, "launch") if exists(LAUNCH_FILES): files = listdir(LAUNCH_FILES) - data['launch_files'] = '' + data["launch_files"] = "" for file in files: - if not file.endswith('.launch'): + if not file.endswith(".launch"): continue - + name = file[:-7] - data['launch_files'] += f'#### `{name}`\n\n' + data["launch_files"] += f"#### `{name}`\n\n" # Load launch file XML - tree = ET.parse(join(pkg_dir, 'launch', file)) + tree = ET.parse(join(pkg_dir, "launch", file)) launch_data = { - 'description': 'No description provided.', - 'usage': [], - 'arguments': [], + "description": "No description provided.", + "usage": [], + "arguments": [], } for child in tree.getroot(): - if child.tag == 'description': - launch_data['description'] = child.text - elif child.tag == 'usage': - launch_data['usage'].append({ - 'description': child.attrib['doc'], - 'arguments': child.text - }) - elif child.tag == 'arg': - if 'value' in child.attrib: + if child.tag == "description": + launch_data["description"] = child.text + elif child.tag == "usage": + launch_data["usage"].append( + {"description": child.attrib["doc"], "arguments": child.text} + ) + elif child.tag == "arg": + if "value" in child.attrib: continue - - launch_data['arguments'].append(child.attrib) + + launch_data["arguments"].append(child.attrib) # Generate list of example usages - LAUNCH_USAGE = '\n\n```bash' - - for use in launch_data['usage']: - pkg_name = data['package_name'] - description = use['description'] - arguments = use['arguments'] or '' - LAUNCH_USAGE += f'\n# {description}\nroslaunch {pkg_name} {file} {arguments}\n' + LAUNCH_USAGE = "\n\n```bash" - launch_data['usage'] = f'{LAUNCH_USAGE}```' if len(launch_data['usage']) > 0 else '' + for use in launch_data["usage"]: + pkg_name = data["package_name"] + description = use["description"] + arguments = use["arguments"] or "" + LAUNCH_USAGE += ( + f"\n# {description}\nroslaunch {pkg_name} {file} {arguments}\n" + ) + + launch_data["usage"] = ( + f"{LAUNCH_USAGE}```" if len(launch_data["usage"]) > 0 else "" + ) # Generate table of arguments - table = '\n\n| Argument | Default | Description |\n|:-:|:-:|---|\n' - for arg in launch_data['arguments']: - argument = arg['name'] - default = arg['default'] if 'default' in arg else '' - description = arg['doc'] if 'doc' in arg else '' - table += f'| {argument} | {default} | {description} |\n' + table = "\n\n| Argument | Default | Description |\n|:-:|:-:|---|\n" + for arg in launch_data["arguments"]: + argument = arg["name"] + default = arg["default"] if "default" in arg else "" + description = arg["doc"] if "doc" in arg else "" + table += f"| {argument} | {default} | {description} |\n" + + launch_data["arguments"] = ( + table if len(launch_data["arguments"]) > 0 else "" + ) - launch_data['arguments'] = table if len(launch_data['arguments']) > 0 else '' + data["launch_files"] += "{description}{usage}{arguments}\n\n".format( + **launch_data + ) - data['launch_files'] += '{description}{usage}{arguments}\n\n'.format(**launch_data) - # Helper functions for ROS messages def parse_rosmsg_definition(input): - ''' + """ Convert message definition into a dictionary - ''' + """ - COMMENT_BUFFER = '' + COMMENT_BUFFER = "" MESSAGE = OrderedDict() - for line in input.split('\n'): + for line in input.split("\n"): line = line.strip() # skip empty lines if len(line) == 0: continue # buffer comments - elif line.startswith('#'): - COMMENT_BUFFER += '\n' + line[1:].strip() + elif line.startswith("#"): + COMMENT_BUFFER += "\n" + line[1:].strip() # parse type / name else: # add comment to buffer if exists - if '#' in line: - definition, comment = line.split('#', 1) - COMMENT_BUFFER += '\n' + comment + if "#" in line: + definition, comment = line.split("#", 1) + COMMENT_BUFFER += "\n" + comment else: definition = line # destruct the type and name - var_type, var_name = [seg for seg in definition.strip().split(' ') if len(seg) > 0] + var_type, var_name = [ + seg for seg in definition.strip().split(" ") if len(seg) > 0 + ] MESSAGE[var_name] = { - 'type': var_type, - 'comment': COMMENT_BUFFER.strip() + "type": var_type, + "comment": COMMENT_BUFFER.strip(), } - COMMENT_BUFFER = '' + COMMENT_BUFFER = "" return MESSAGE def rosmsg_definition_to_table(fields): - ''' + """ Convert a definition to a table - ''' + """ - table = '| Field | Type | Description |\n|:-:|:-:|---|\n' + table = "| Field | Type | Description |\n|:-:|:-:|---|\n" for name, definition in fields.items(): - var_type = definition['type'] - comment = '
'.join(definition['comment'].split('\n')) - table += f'| {name} | {var_type} | {comment} |\n' - + var_type = definition["type"] + comment = "
".join(definition["comment"].split("\n")) + table += f"| {name} | {var_type} | {comment} |\n" + return table # Generate message tables - MESSAGES = join(pkg_dir, 'msg') + MESSAGES = join(pkg_dir, "msg") if exists(MESSAGES): messages = os.listdir(MESSAGES) - data['messages'] = '' + data["messages"] = "" for message in messages: - if not message.endswith('.msg'): + if not message.endswith(".msg"): continue - + name = message[:-4] - data['messages'] += f'#### `{name}`\n\n' + data["messages"] += f"#### `{name}`\n\n" - with open(join(pkg_dir, 'msg', message), 'r') as f: + with open(join(pkg_dir, "msg", message), "r") as f: fields = parse_rosmsg_definition(f.read()) - data['messages'] += rosmsg_definition_to_table(fields) + data["messages"] += rosmsg_definition_to_table(fields) - data['messages'] += '\n' + data["messages"] += "\n" # Generate service tables - SERVICES = join(pkg_dir, 'srv') + SERVICES = join(pkg_dir, "srv") if exists(SERVICES): services = os.listdir(SERVICES) - data['services'] = '' + data["services"] = "" for service in services: - if not service.endswith('.srv'): + if not service.endswith(".srv"): continue - + name = service[:-4] - data['services'] += f'#### `{name}`\n\n' + data["services"] += f"#### `{name}`\n\n" - with open(join(pkg_dir, 'srv', service), 'r') as f: - request, response = f.read().split('---') + with open(join(pkg_dir, "srv", service), "r") as f: + request, response = f.read().split("---") - data['services'] += 'Request\n\n' + data["services"] += "Request\n\n" request_fields = parse_rosmsg_definition(request) - data['services'] += rosmsg_definition_to_table(request_fields) + data["services"] += rosmsg_definition_to_table(request_fields) - data['services'] += '\nResponse\n\n' + data["services"] += "\nResponse\n\n" response_fields = parse_rosmsg_definition(response) - data['services'] += rosmsg_definition_to_table(response_fields) + data["services"] += rosmsg_definition_to_table(response_fields) - data['services'] += '\n' + data["services"] += "\n" # Generate action tables - ACTIONS = join(pkg_dir, 'action') + ACTIONS = join(pkg_dir, "action") if exists(ACTIONS): actions = os.listdir(ACTIONS) - data['actions'] = '' + data["actions"] = "" for action in actions: - if not action.endswith('.action'): + if not action.endswith(".action"): continue - + name = action[:-7] - data['actions'] += f'#### `{name}`\n\n' + data["actions"] += f"#### `{name}`\n\n" - with open(join(pkg_dir, 'action', action), 'r') as f: - goal, result, feedback = f.read().split('---') + with open(join(pkg_dir, "action", action), "r") as f: + goal, result, feedback = f.read().split("---") - data['actions'] += 'Goal\n\n' + data["actions"] += "Goal\n\n" goal_fields = parse_rosmsg_definition(goal) - data['actions'] += rosmsg_definition_to_table(goal_fields) + data["actions"] += rosmsg_definition_to_table(goal_fields) - data['actions'] += '\nResult\n\n' + data["actions"] += "\nResult\n\n" result_fields = parse_rosmsg_definition(result) - data['actions'] += rosmsg_definition_to_table(result_fields) + data["actions"] += rosmsg_definition_to_table(result_fields) - data['actions'] += '\nFeedback\n\n' + data["actions"] += "\nFeedback\n\n" feedback_fields = parse_rosmsg_definition(feedback) - data['actions'] += rosmsg_definition_to_table(feedback_fields) + data["actions"] += rosmsg_definition_to_table(feedback_fields) - data['actions'] += '\n' + data["actions"] += "\n" # Generate the README - README = '''# {package_name} + README = """# {package_name} {description} @@ -398,7 +426,9 @@ def rosmsg_definition_to_table(fields): {services} ### Actions -{actions}'''.format(**data) +{actions}""".format( + **data + ) - with open(join(pkg_dir, 'README.md'), 'w') as f: + with open(join(pkg_dir, "README.md"), "w") as f: f.write(README) diff --git a/documentation/src/document_lasr/iter_pkg.py b/documentation/src/document_lasr/iter_pkg.py index ae59c3f82..d6c4891c9 100644 --- a/documentation/src/document_lasr/iter_pkg.py +++ b/documentation/src/document_lasr/iter_pkg.py @@ -3,46 +3,51 @@ FWD = os.path.dirname(os.path.realpath(__file__)) + # Scan upwards to find the workspace root def is_root(path): - return os.path.exists(os.path.join(path, 'build')) \ - and os.path.exists(os.path.join(path, 'devel')) + return os.path.exists(os.path.join(path, "build")) and os.path.exists( + os.path.join(path, "devel") + ) + WORKSPACE_ROOT = FWD -while not is_root(WORKSPACE_ROOT) \ - and os.path.dirname(WORKSPACE_ROOT) != WORKSPACE_ROOT: +while not is_root(WORKSPACE_ROOT) and os.path.dirname(WORKSPACE_ROOT) != WORKSPACE_ROOT: WORKSPACE_ROOT = os.path.abspath(os.path.join(WORKSPACE_ROOT, "..")) if not is_root(WORKSPACE_ROOT): raise Exception("Could not find workspace root, have you run `catkin build` yet?") -SRC_DIR = os.path.join(WORKSPACE_ROOT, 'src') +SRC_DIR = os.path.join(WORKSPACE_ROOT, "src") + +print("Workspace root:", WORKSPACE_ROOT) -print('Workspace root:', WORKSPACE_ROOT) # List out all packages in workspace def pkg_list(): packages = [] for root, dirs, files in os.walk(SRC_DIR): for file in files: - if file == 'package.xml': + if file == "package.xml": fullname = os.path.join(root, file) packages.append(root) - + return packages + # Check if this is a LASR package def is_lasr_pkg(path): - relative_path = path[len(SRC_DIR):].lower() + relative_path = path[len(SRC_DIR) :].lower() pure_path = PurePath(relative_path) parts = list(pure_path.parts) # Remove root component if parts[0] == os.sep: parts.pop(0) - + # Guess if this is part of LASR - return parts[0] in ['base', 'lasr-base', 'lasr_base'] + return parts[0] in ["base", "lasr-base", "lasr_base"] + # List out all LASR packages def pkg_lasr_list(): diff --git a/documentation/src/document_lasr/web.py b/documentation/src/document_lasr/web.py index 14b8d76e1..f2728d24a 100644 --- a/documentation/src/document_lasr/web.py +++ b/documentation/src/document_lasr/web.py @@ -5,12 +5,13 @@ r = rospkg.RosPack() + def configure_web(): # Determine dest folder - PKG = r.get_path('documentation') - WEB_DIR = os.path.abspath(os.path.join(PKG, 'web')) - DOC_DIR = os.path.join(WEB_DIR, 'docs', 'packages') - TASK_DIR = os.path.join(WEB_DIR, 'docs', 'tasks') + PKG = r.get_path("documentation") + WEB_DIR = os.path.abspath(os.path.join(PKG, "web")) + DOC_DIR = os.path.join(WEB_DIR, "docs", "packages") + TASK_DIR = os.path.join(WEB_DIR, "docs", "tasks") for DIR in [DOC_DIR, TASK_DIR]: # Clear files @@ -22,15 +23,21 @@ def configure_web(): # Copy README files for pkg in document_lasr.pkg_lasr_list(): - README = os.path.join(pkg, 'README.md') + README = os.path.join(pkg, "README.md") if os.path.exists(README): - shutil.copyfile(README, os.path.join(TASK_DIR if '/tasks/' in pkg else DOC_DIR, os.path.basename(pkg) + '.md')) + shutil.copyfile( + README, + os.path.join( + TASK_DIR if "/tasks/" in pkg else DOC_DIR, + os.path.basename(pkg) + ".md", + ), + ) # Write category file for packages - with open(os.path.join(DOC_DIR, '_category_.json'), 'w') as f: + with open(os.path.join(DOC_DIR, "_category_.json"), "w") as f: f.write('{"label":"Packages","position":4}') - - with open(os.path.join(TASK_DIR, '_category_.json'), 'w') as f: + + with open(os.path.join(TASK_DIR, "_category_.json"), "w") as f: f.write('{"label":"Tasks","position":3}') # Change directory diff --git a/legacy/aruco_service/scripts/GenerateTableCuboid.py b/legacy/aruco_service/scripts/GenerateTableCuboid.py index cc02598d0..02dd1aaa0 100755 --- a/legacy/aruco_service/scripts/GenerateTableCuboid.py +++ b/legacy/aruco_service/scripts/GenerateTableCuboid.py @@ -15,15 +15,23 @@ # The marker should be placed on the bottom left corner of the table, with the x axis pointing to the right and the y axis pointing up # Use 0 to inf for tables, use -1 for counter, use -2 for waiting area + def get_transform_to_marker(from_frame, to_frame): tf_buffer = tf2_ros.Buffer() tf_listener = tf2_ros.TransformListener(tf_buffer) try: - t = tf_buffer.lookup_transform(to_frame, from_frame, rospy.Time(0), rospy.Duration(5)) + t = tf_buffer.lookup_transform( + to_frame, from_frame, rospy.Time(0), rospy.Duration(5) + ) return t - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): + except ( + tf2_ros.LookupException, + tf2_ros.ConnectivityException, + tf2_ros.ExtrapolationException, + ): raise + def get_map_frame_pose(input_xy, transform): ps = PointStamped() ps.point.x = input_xy[0] @@ -34,6 +42,7 @@ def get_map_frame_pose(input_xy, transform): tr_point = tf2_geometry_msgs.do_transform_point(ps, transform) return (tr_point.point.x, tr_point.point.y) + def create_marker_msg(point, idx): marker_msg = Marker() marker_msg.header.frame_id = "map" @@ -52,6 +61,7 @@ def create_marker_msg(point, idx): marker_msg.color.a = 1.0 return marker_msg + def generate_cuboid(msg): table = msg.table long_side = msg.long_side @@ -60,22 +70,23 @@ def generate_cuboid(msg): is_rect = msg.is_rect radius = msg.radius seconds = rospy.get_time() - objects_marker_pub = rospy.Publisher("/table/objects_cuboid", Marker, queue_size= 4 if is_rect else 8) - persons_marker_pub = rospy.Publisher("/table/persons_cuboid", Marker, queue_size=4 if is_rect else 8) + objects_marker_pub = rospy.Publisher( + "/table/objects_cuboid", Marker, queue_size=4 if is_rect else 8 + ) + persons_marker_pub = rospy.Publisher( + "/table/persons_cuboid", Marker, queue_size=4 if is_rect else 8 + ) if latest_pose is not None and (seconds - latest_pose.header.stamp.secs < 1.0): - tr = get_transform_to_marker("aruco_marker_frame", "map") if is_rect: - # THESE POINTS ARE IN MARKER COORDINATE FRAME [LOCATION OF ARUCO MARKER IS (0,0) IN THIS FRAME] - local_corner_1 = [0, 0] + local_corner_1 = [0, 0] local_corner_2 = [long_side, 0] local_corner_3 = [long_side, short_side] local_corner_4 = [0, short_side] - # TRANSFORM TO MAP FRAME corner_1 = get_map_frame_pose(local_corner_1, tr) corner_2 = get_map_frame_pose(local_corner_2, tr) @@ -87,12 +98,23 @@ def generate_cuboid(msg): objects_marker_pub.publish(create_marker_msg(corner_3, 2)) objects_marker_pub.publish(create_marker_msg(corner_4, 3)) - if table >= 0: - - local_padded_corner_1 = [local_corner_1[0] - padding, local_corner_1[1] - padding] - local_padded_corner_2 = [local_corner_2[0] + padding, local_corner_2[1] - padding] - local_padded_corner_3 = [local_corner_3[0] + padding, local_corner_3[1] + padding] - local_padded_corner_4 = [local_corner_4[0] - padding, local_corner_4[1] + padding] + if table >= 0: + local_padded_corner_1 = [ + local_corner_1[0] - padding, + local_corner_1[1] - padding, + ] + local_padded_corner_2 = [ + local_corner_2[0] + padding, + local_corner_2[1] - padding, + ] + local_padded_corner_3 = [ + local_corner_3[0] + padding, + local_corner_3[1] + padding, + ] + local_padded_corner_4 = [ + local_corner_4[0] - padding, + local_corner_4[1] + padding, + ] padded_corner_1 = get_map_frame_pose(local_padded_corner_1, tr) padded_corner_2 = get_map_frame_pose(local_padded_corner_2, tr) @@ -104,20 +126,35 @@ def generate_cuboid(msg): persons_marker_pub.publish(create_marker_msg(padded_corner_3, 2)) persons_marker_pub.publish(create_marker_msg(padded_corner_4, 3)) - rospy.set_param("/tables/table" + str(table) + "/objects_cuboid", [corner_1, corner_2, corner_3, corner_4]) - rospy.set_param("/tables/table" + str(table) + "/persons_cuboid", [padded_corner_1, padded_corner_2, padded_corner_3, padded_corner_4]) + rospy.set_param( + "/tables/table" + str(table) + "/objects_cuboid", + [corner_1, corner_2, corner_3, corner_4], + ) + rospy.set_param( + "/tables/table" + str(table) + "/persons_cuboid", + [ + padded_corner_1, + padded_corner_2, + padded_corner_3, + padded_corner_4, + ], + ) now = str(datetime.datetime.now()) rospy.set_param("/tables/table" + str(table) + "/last_updated", now) rospy.loginfo("Cuboid for table %d saved to parameter server", table) elif table == -1: - rospy.set_param("/counter/cuboid", [corner_1, corner_2, corner_3, corner_4]) + rospy.set_param( + "/counter/cuboid", [corner_1, corner_2, corner_3, corner_4] + ) now = str(datetime.datetime.now()) rospy.set_param("/counter/last_updated", now) rospy.loginfo("Cuboid for the counter saved to parameter server") elif table == -2: - rospy.set_param("/wait/cuboid", [corner_1, corner_2, corner_3, corner_4]) + rospy.set_param( + "/wait/cuboid", [corner_1, corner_2, corner_3, corner_4] + ) now = str(datetime.datetime.now()) rospy.set_param("/wait/last_updated", now) rospy.loginfo("Cuboid for the waiting area saved to parameter server") @@ -128,23 +165,23 @@ def generate_cuboid(msg): # Dump rosparams to file data = { - 'tables': rosparam.get_param('/tables'), - 'counter': rosparam.get_param('/counter'), - 'wait': rosparam.get_param('/wait') + "tables": rosparam.get_param("/tables"), + "counter": rosparam.get_param("/counter"), + "wait": rosparam.get_param("/wait"), } - with open(rosparam.get_param('/config_path'), 'w') as file: + with open(rosparam.get_param("/config_path"), "w") as file: yaml.dump(data, file) else: # assume circular, and aruco marker is placed in the centre of the table local_r1 = [-radius, 0] - local_r2 = [(sqrt(2.)*-radius)/2., (sqrt(2.)*+radius)/2.] + local_r2 = [(sqrt(2.0) * -radius) / 2.0, (sqrt(2.0) * +radius) / 2.0] local_r3 = [0, +radius] - local_r4 = [(sqrt(2.)*+radius)/2., (sqrt(2.)*+radius)/2.] + local_r4 = [(sqrt(2.0) * +radius) / 2.0, (sqrt(2.0) * +radius) / 2.0] local_r5 = [+radius, 0] - local_r6 = [(sqrt(2.)*+radius)/2., (sqrt(2.)*-radius)/2.] + local_r6 = [(sqrt(2.0) * +radius) / 2.0, (sqrt(2.0) * -radius) / 2.0] local_r7 = [0, -radius] - local_r8 = [(sqrt(2.)*-radius)/2., (sqrt(2.)*-radius)/2.] + local_r8 = [(sqrt(2.0) * -radius) / 2.0, (sqrt(2.0) * -radius) / 2.0] r1 = get_map_frame_pose(local_r1, tr) r2 = get_map_frame_pose(local_r2, tr) @@ -191,34 +228,43 @@ def generate_cuboid(msg): persons_marker_pub.publish(create_marker_msg(r7p, 6)) persons_marker_pub.publish(create_marker_msg(r8p, 7)) - - rospy.set_param("/tables/table" + str(table) + "/objects_cuboid", [r1, r2, r3, r4, r5, r6, r7, r8]) - rospy.set_param("/tables/table" + str(table) + "/persons_cuboid", [r1p, r2p, r3p, r4p, r5p, r6p, r7p, r8p]) + rospy.set_param( + "/tables/table" + str(table) + "/objects_cuboid", + [r1, r2, r3, r4, r5, r6, r7, r8], + ) + rospy.set_param( + "/tables/table" + str(table) + "/persons_cuboid", + [r1p, r2p, r3p, r4p, r5p, r6p, r7p, r8p], + ) now = str(datetime.datetime.now()) rospy.set_param("/tables/table" + str(table) + "/last_updated", now) rospy.loginfo("Cuboid for table %d saved to parameter server", table) return GenerateTableCuboidResponse(True) else: - rospy.logerr("No pose data available to generate cuboid for table %d, please check if the marker is on the table", table) + rospy.logerr( + "No pose data available to generate cuboid for table %d, please check if the marker is on the table", + table, + ) return GenerateTableCuboidResponse(False) + def get_latest_pose(msg): global latest_pose latest_pose = msg -if __name__ == "__main__": +if __name__ == "__main__": rospy.init_node("generate_table_cuboid") sub = rospy.Subscriber("/aruco_single/pose", PoseStamped, get_latest_pose) s = rospy.Service("generate_table_cuboid", GenerateTableCuboid, generate_cuboid) if rospy.has_param("/config_path"): - els = rosparam.load_file(rosparam.get_param('/config_path')) + els = rosparam.load_file(rosparam.get_param("/config_path")) for param, ns in els: rosparam.upload_params(ns, param) - + rospy.loginfo("Cuboid Generator Service Ready") - + while not rospy.is_shutdown(): - rospy.sleep(1.0) \ No newline at end of file + rospy.sleep(1.0) diff --git a/legacy/aruco_service/scripts/publish_points_of_table.py b/legacy/aruco_service/scripts/publish_points_of_table.py index 0008ad272..85964855b 100755 --- a/legacy/aruco_service/scripts/publish_points_of_table.py +++ b/legacy/aruco_service/scripts/publish_points_of_table.py @@ -4,6 +4,7 @@ from aruco_service.srv import PublishTablePoints, PublishTablePointsResponse from visualization_msgs.msg import Marker + def create_marker_msg(point, idx): marker_msg = Marker() marker_msg.header.frame_id = "map" @@ -22,28 +23,24 @@ def create_marker_msg(point, idx): marker_msg.color.a = 1.0 return marker_msg + def publish_points(number): table = number.table objects_marker_pub = rospy.Publisher("/table/objects_cuboid", Marker, queue_size=8) persons_marker_pub = rospy.Publisher("/table/persons_cuboid", Marker, queue_size=8) if table >= 0: - obj = rospy.get_param("/tables/table" + str(table) + "/objects_cuboid") - - for i, p in enumerate(obj): + for i, p in enumerate(obj): objects_marker_pub.publish(create_marker_msg(p, i)) per = rospy.get_param("/tables/table" + str(table) + "/persons_cuboid") - for i, p in enumerate(per): - persons_marker_pub.publish(create_marker_msg(p, i)) elif table == -1: - cuboid = rospy.get_param("/counter/cuboid") objects_marker_pub.publish(create_marker_msg(cuboid[0], 0)) @@ -59,17 +56,16 @@ def publish_points(number): objects_marker_pub.publish(create_marker_msg(cuboid[2], 2)) objects_marker_pub.publish(create_marker_msg(cuboid[3], 3)) - rospy.loginfo("Published points for table " + str(table)) return PublishTablePointsResponse(True) -if __name__ == "__main__": +if __name__ == "__main__": rospy.init_node("point_publisher") s = rospy.Service("publish_table_points", PublishTablePoints, publish_points) - + rospy.loginfo("Point Publisher Service Ready") - + while not rospy.is_shutdown(): - rospy.sleep(1.0) \ No newline at end of file + rospy.sleep(1.0) diff --git a/legacy/aruco_service/scripts/save_navigation_points.py b/legacy/aruco_service/scripts/save_navigation_points.py index b0e0cfb8e..5dbd7560a 100755 --- a/legacy/aruco_service/scripts/save_navigation_points.py +++ b/legacy/aruco_service/scripts/save_navigation_points.py @@ -6,6 +6,7 @@ from geometry_msgs.msg import PoseWithCovarianceStamped import rosparam + def save_navigation_points(number): table = number.table @@ -13,36 +14,68 @@ def save_navigation_points(number): orientation = latest_pose.pose.pose.orientation if table >= 0: - rospy.set_param("/tables/table" + str(table) + "/location/position", {"x": position.x, "y": position.y, "z": 0.}) - rospy.set_param("/tables/table" + str(table) + "/location/orientation", {"x": orientation.x, "y": orientation.y, "z": orientation.z, "w": orientation.w}) + rospy.set_param( + "/tables/table" + str(table) + "/location/position", + {"x": position.x, "y": position.y, "z": 0.0}, + ) + rospy.set_param( + "/tables/table" + str(table) + "/location/orientation", + { + "x": orientation.x, + "y": orientation.y, + "z": orientation.z, + "w": orientation.w, + }, + ) rospy.loginfo("Navigation point for table %d saved to parameter server", table) elif table == -1: - rospy.set_param("/counter/location/position", {"x": position.x, "y": position.y, "z": 0.}) - rospy.set_param("counter/location/orientation", {"x": orientation.x, "y": orientation.y, "z": orientation.z, "w": orientation.w}) + rospy.set_param( + "/counter/location/position", {"x": position.x, "y": position.y, "z": 0.0} + ) + rospy.set_param( + "counter/location/orientation", + { + "x": orientation.x, + "y": orientation.y, + "z": orientation.z, + "w": orientation.w, + }, + ) rospy.loginfo("Navigation point for the counter saved to parameter server") elif table == -2: - rospy.set_param("/wait/location/position", {"x": position.x, "y": position.y, "z": 0.}) - rospy.set_param("wait/location/orientation", {"x": orientation.x, "y": orientation.y, "z": orientation.z, "w": orientation.w}) + rospy.set_param( + "/wait/location/position", {"x": position.x, "y": position.y, "z": 0.0} + ) + rospy.set_param( + "wait/location/orientation", + { + "x": orientation.x, + "y": orientation.y, + "z": orientation.z, + "w": orientation.w, + }, + ) rospy.loginfo("Navigation point for the waiting area saved to parameter server") else: rospy.loginfo("Invalid table number.") return SaveNavigationPointResponse(False) - -# Dump rosparams to file + + # Dump rosparams to file data = { - 'tables': rosparam.get_param('/tables'), - 'counter': rosparam.get_param('/counter'), - 'wait': rosparam.get_param('/wait') + "tables": rosparam.get_param("/tables"), + "counter": rosparam.get_param("/counter"), + "wait": rosparam.get_param("/wait"), } - with open(rosparam.get_param("/config_path"), 'w') as file: + with open(rosparam.get_param("/config_path"), "w") as file: yaml.dump(data, file) return SaveNavigationPointResponse(True) + def save_pre_navigation_points(number): table = number.table @@ -50,37 +83,53 @@ def save_pre_navigation_points(number): orientation = latest_pose.pose.pose.orientation if table >= 0: - rospy.set_param("/tables/table" + str(table) + "/pre_location/position", {"x": position.x, "y": position.y, "z": 0.}) - rospy.set_param("/tables/table" + str(table) + "/pre_location/orientation", {"x": orientation.x, "y": orientation.y, "z": orientation.z, "w": orientation.w}) - rospy.loginfo("Pre-navigation point for table %d saved to parameter server", table) + rospy.set_param( + "/tables/table" + str(table) + "/pre_location/position", + {"x": position.x, "y": position.y, "z": 0.0}, + ) + rospy.set_param( + "/tables/table" + str(table) + "/pre_location/orientation", + { + "x": orientation.x, + "y": orientation.y, + "z": orientation.z, + "w": orientation.w, + }, + ) + rospy.loginfo( + "Pre-navigation point for table %d saved to parameter server", table + ) else: rospy.loginfo("Invalid table number.") return SaveNavigationPointResponse(False) - + # Dump rosparams to file data = { - 'tables': rosparam.get_param('/tables'), - 'counter': rosparam.get_param('/counter'), - 'wait': rosparam.get_param('/wait') + "tables": rosparam.get_param("/tables"), + "counter": rosparam.get_param("/counter"), + "wait": rosparam.get_param("/wait"), } - with open(rosparam.get_param("/config_path"), 'w') as file: + with open(rosparam.get_param("/config_path"), "w") as file: yaml.dump(data, file) return SaveNavigationPointResponse(True) + def get_latest_pose(msg): global latest_pose latest_pose = msg -if __name__ == "__main__": +if __name__ == "__main__": rospy.init_node("save_navigation_points") sub = rospy.Subscriber("/amcl_pose", PoseWithCovarianceStamped, get_latest_pose) rospy.Service("save_navigation_points", SaveNavigationPoint, save_navigation_points) - rospy.Service("save_pre_navigation_points", SaveNavigationPoint, save_pre_navigation_points) + rospy.Service( + "save_pre_navigation_points", SaveNavigationPoint, save_pre_navigation_points + ) rospy.loginfo("Navigation Points Saver Ready") - + while not rospy.is_shutdown(): - rospy.sleep(1.0) \ No newline at end of file + rospy.sleep(1.0) diff --git a/legacy/choosing_wait_position/setup.py b/legacy/choosing_wait_position/setup.py index cb116ec82..acc0ea5d2 100644 --- a/legacy/choosing_wait_position/setup.py +++ b/legacy/choosing_wait_position/setup.py @@ -2,8 +2,7 @@ from catkin_pkg.python_setup import generate_distutils_setup d = generate_distutils_setup( - packages=['choosing_wait_position'], - package_dir={'': 'src'} + packages=["choosing_wait_position"], package_dir={"": "src"} ) setup(**d) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/common_tools.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/common_tools.py index a2f02e444..3ac6a978c 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/common_tools.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/common_tools.py @@ -11,14 +11,30 @@ def train_transform(): - return A.Compose([ - A.Sequential([ - A.RandomRotate90(p=1), # Random rotation of an image by 90 degrees zero or more times - A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, brightness_by_max=True, always_apply=False, p=1), # Random change of brightness & contrast - ], p=1) - ], - keypoint_params=A.KeypointParams(format='xy'), # More about keypoint formats used in albumentations library read at https://albumentations.ai/docs/getting_started/keypoints_augmentation/ - bbox_params=A.BboxParams(format='pascal_voc', label_fields=['bboxes_labels']) # Bboxes should have labels, read more here https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/ + return A.Compose( + [ + A.Sequential( + [ + A.RandomRotate90( + p=1 + ), # Random rotation of an image by 90 degrees zero or more times + A.RandomBrightnessContrast( + brightness_limit=0.3, + contrast_limit=0.3, + brightness_by_max=True, + always_apply=False, + p=1, + ), # Random change of brightness & contrast + ], + p=1, + ) + ], + keypoint_params=A.KeypointParams( + format="xy" + ), # More about keypoint formats used in albumentations library read at https://albumentations.ai/docs/getting_started/keypoints_augmentation/ + bbox_params=A.BboxParams( + format="pascal_voc", label_fields=["bboxes_labels"] + ), # Bboxes should have labels, read more here https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/ ) @@ -26,144 +42,195 @@ class ClassDataset(Dataset): def __init__(self, root, transform=None, demo=False): self.root = root self.transform = transform - self.demo = demo # Use demo=True if you need transformed and original images (for example, for visualization purposes) + self.demo = demo # Use demo=True if you need transformed and original images (for example, for visualization purposes) self.imgs_files = sorted(os.listdir(os.path.join(root, "images"))) self.annotations_files = sorted(os.listdir(os.path.join(root, "annotations"))) - def __getitem__(self,idx): + def __getitem__(self, idx): img_path = os.path.join(self.root, "images", self.imgs_files[idx]) - annotations_path = os.path.join(self.root, "annotations", self.annotations_files[idx]) + annotations_path = os.path.join( + self.root, "annotations", self.annotations_files[idx] + ) img_original = cv2.imread(img_path) img_original = cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB) with open(annotations_path) as f: data = json.load(f) - bboxes_original = data['bboxes'] - keypoints_original = data['keypoints'] - + bboxes_original = data["bboxes"] + keypoints_original = data["keypoints"] + # All objects are ideal area - bboxes_labels_original = ['Ideal Pos' for _ in bboxes_original] + bboxes_labels_original = ["Ideal Pos" for _ in bboxes_original] - if self.transform: - # Converting keypoints from [x,y,visibility]-format to [x, y]-format + Flattening nested list of keypoints + if self.transform: + # Converting keypoints from [x,y,visibility]-format to [x, y]-format + Flattening nested list of keypoints # For example, if we have the following list of keypoints for three objects (each object has two keypoints): - # [[obj1_kp1, obj1_kp2], [obj2_kp1, obj2_kp2], [obj3_kp1, obj3_kp2]], where each keypoint is in [x, y]-format + # [[obj1_kp1, obj1_kp2], [obj2_kp1, obj2_kp2], [obj3_kp1, obj3_kp2]], where each keypoint is in [x, y]-format # Then we need to convert it to the following list: # [obj1_kp1, obj1_kp2, obj2_kp1, obj2_kp2, obj3_kp1, obj3_kp2] - keypoints_original_flattened = [el[0:2] for kp in keypoints_original for el in kp] - + keypoints_original_flattened = [ + el[0:2] for kp in keypoints_original for el in kp + ] + # Apply augmentations - transformed = self.transform(image=img_original, bboxes=bboxes_original, bboxes_labels=bboxes_labels_original, keypoints=keypoints_original_flattened) - img = transformed['image'] - bboxes = transformed['bboxes'] - + transformed = self.transform( + image=img_original, + bboxes=bboxes_original, + bboxes_labels=bboxes_labels_original, + keypoints=keypoints_original_flattened, + ) + img = transformed["image"] + bboxes = transformed["bboxes"] + # Unflattening list transformed['keypoints'] # For example, if we have the following list of keypoints for three objects (each object has two keypoints): # [obj1_kp1, obj1_kp2, obj2_kp1, obj2_kp2, obj3_kp1, obj3_kp2], where each keypoint is in [x, y]-format # Then we need to convert it to the following list: # [[obj1_kp1, obj1_kp2], [obj2_kp1, obj2_kp2], [obj3_kp1, obj3_kp2]] - keypoints_transformed_unflattened = np.reshape(np.array(transformed['keypoints']), (-1,2,2)).tolist() + keypoints_transformed_unflattened = np.reshape( + np.array(transformed["keypoints"]), (-1, 2, 2) + ).tolist() # Converting transformed keypoints from [x, y]-format to [x,y,visibility]-format by appending original visibilities to transformed coordinates of keypoints keypoints = [] - for o_idx, obj in enumerate(keypoints_transformed_unflattened): # Iterating over objects + for o_idx, obj in enumerate( + keypoints_transformed_unflattened + ): # Iterating over objects obj_keypoints = [] - for k_idx, kp in enumerate(obj): # Iterating over keypoints in each object + for k_idx, kp in enumerate( + obj + ): # Iterating over keypoints in each object # kp - coordinates of keypoint # keypoints_original[o_idx][k_idx][2] - original visibility of keypoint obj_keypoints.append(kp + [keypoints_original[o_idx][k_idx][2]]) keypoints.append(obj_keypoints) - - else: + else: img, bboxes, keypoints = img_original, bboxes_original, keypoints_original - bboxes = torch.as_tensor(bboxes, dtype=torch.float32) + bboxes = torch.as_tensor(bboxes, dtype=torch.float32) target = {} target["boxes"] = bboxes - target["labels"] = torch.as_tensor([1 for _ in bboxes], dtype=torch.int64) # all objects are glue tubes + target["labels"] = torch.as_tensor( + [1 for _ in bboxes], dtype=torch.int64 + ) # all objects are glue tubes target["image_id"] = torch.tensor([idx]) target["area"] = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0]) target["iscrowd"] = torch.zeros(len(bboxes), dtype=torch.int64) - target["keypoints"] = torch.as_tensor(keypoints, dtype=torch.float32) + target["keypoints"] = torch.as_tensor(keypoints, dtype=torch.float32) img = F.to_tensor(img) bboxes_original = torch.as_tensor(bboxes_original, dtype=torch.float32) target_original = {} target_original["boxes"] = bboxes_original - target_original["labels"] = torch.as_tensor([1 for _ in bboxes_original], dtype=torch.int64) # all objects are glue tubes + target_original["labels"] = torch.as_tensor( + [1 for _ in bboxes_original], dtype=torch.int64 + ) # all objects are glue tubes target_original["image_id"] = torch.tensor([idx]) - target_original["area"] = (bboxes_original[:, 3] - bboxes_original[:, 1]) * (bboxes_original[:, 2] - bboxes_original[:, 0]) - target_original["iscrowd"] = torch.zeros(len(bboxes_original), dtype=torch.int64) - target_original["keypoints"] = torch.as_tensor(keypoints_original, dtype=torch.float32) + target_original["area"] = (bboxes_original[:, 3] - bboxes_original[:, 1]) * ( + bboxes_original[:, 2] - bboxes_original[:, 0] + ) + target_original["iscrowd"] = torch.zeros( + len(bboxes_original), dtype=torch.int64 + ) + target_original["keypoints"] = torch.as_tensor( + keypoints_original, dtype=torch.float32 + ) img_original = F.to_tensor(img_original) if self.demo: return img, target, img_original, target_original else: return img, target - + def __len__(self): return len(self.imgs_files) - -def visualize(image, bboxes, keypoints, image_original=None, bboxes_original=None,keypoints_original=None): - fontsize = 18 +def visualize( + image, + bboxes, + keypoints, + image_original=None, + bboxes_original=None, + keypoints_original=None, +): + fontsize = 18 for bbox in bboxes: start_point = (bbox[0], bbox[1]) end_point = (bbox[2], bbox[3]) - image = cv2.rectangle(image.copy(), start_point, end_point, (0,255,0), 2) - + image = cv2.rectangle(image.copy(), start_point, end_point, (0, 255, 0), 2) + for kps in keypoints: for idx, kp in enumerate(kps): - image = cv2.circle(image.copy(), tuple(kp), 5, (255,0,0), 10) - image = cv2.putText(image.copy(), " " + keypoints_classes_ids2names[idx], tuple(kp), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,0,0), 3, cv2.LINE_AA) - + image = cv2.circle(image.copy(), tuple(kp), 5, (255, 0, 0), 10) + image = cv2.putText( + image.copy(), + " " + keypoints_classes_ids2names[idx], + tuple(kp), + cv2.FONT_HERSHEY_SIMPLEX, + 2, + (255, 0, 0), + 3, + cv2.LINE_AA, + ) if image_original is None and keypoints_original is None: - plt.figure(figsize=(40,40)) + plt.figure(figsize=(40, 40)) plt.imshow(image) plt.show() - else: + else: for bbox in bboxes_original: start_point = (bbox[0], bbox[1]) end_point = (bbox[2], bbox[3]) - image_original = cv2.rectangle(image_original.copy(), start_point, end_point, (0,255,0), 2) + image_original = cv2.rectangle( + image_original.copy(), start_point, end_point, (0, 255, 0), 2 + ) for kps in keypoints_original: for idx, kp in enumerate(kps): - image_original = cv2.circle(image_original, tuple(kp), 5, (255,0,0), 10) - image_original = cv2.putText(image_original, " " + keypoints_classes_ids2names[idx], tuple(kp), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,0,0), 3, cv2.LINE_AA) - + image_original = cv2.circle( + image_original, tuple(kp), 5, (255, 0, 0), 10 + ) + image_original = cv2.putText( + image_original, + " " + keypoints_classes_ids2names[idx], + tuple(kp), + cv2.FONT_HERSHEY_SIMPLEX, + 2, + (255, 0, 0), + 3, + cv2.LINE_AA, + ) f, ax = plt.subplots(1, 2, figsize=(40, 20)) ax[0].imshow(image_original) - ax[0].set_title('Original image', fontsize=fontsize) + ax[0].set_title("Original image", fontsize=fontsize) ax[1].imshow(image) - ax[1].set_title('Transformed image', fontsize=fontsize) + ax[1].set_title("Transformed image", fontsize=fontsize) plt.show() def get_model(num_keypoints, weights_path=None): - - anchor_generator = AnchorGenerator(sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0)) - model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False, - pretrained_backbone=True, - num_keypoints=num_keypoints, - num_classes = 2, # Background is the first class, object is the second class - rpn_anchor_generator=anchor_generator) + anchor_generator = AnchorGenerator( + sizes=(32, 64, 128, 256, 512), + aspect_ratios=(0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0), + ) + model = torchvision.models.detection.keypointrcnn_resnet50_fpn( + pretrained=False, + pretrained_backbone=True, + num_keypoints=num_keypoints, + num_classes=2, # Background is the first class, object is the second class + rpn_anchor_generator=anchor_generator, + ) if weights_path: state_dict = torch.load(weights_path) - model.load_state_dict(state_dict) - - return model - - + model.load_state_dict(state_dict) + return model diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/createlabels.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/createlabels.py index 6faca81cd..2a489fdef 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/createlabels.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/createlabels.py @@ -1,105 +1,118 @@ -import json -import os -import cv2 -import matplotlib.pyplot as plt +import json +import os +import cv2 +import matplotlib.pyplot as plt -def converter(file_labels,file_image): +def converter(file_labels, file_image): img = cv2.imread(file_image) img_w, img_h = img.shape[1], img.shape[0] - with open(file_labels_example) as f: + with open(file_labels_example) as f: lines_txt = f.readlines() - lines = [] + lines = [] for line in lines_txt: - lines.append([int(line.split()[0])] + [round(float(el), 5) for el in line.split()[1:]]) + lines.append( + [int(line.split()[0])] + + [round(float(el), 5) for el in line.split()[1:]] + ) - - bboxes = [] + bboxes = [] for line in lines: - x_c, y_c, w, h = round(line[1] * img_w), round(line[2] * img_h), round(line[3] * img_w), round(line[4] * img_h) - bboxes.append([round(x_c - w/2), round(y_c - h/2), round(x_c + w/2), round(y_c + h/2)]) - - x1, y1, x2, y2, = bboxes[-1] + x_c, y_c, w, h = ( + round(line[1] * img_w), + round(line[2] * img_h), + round(line[3] * img_w), + round(line[4] * img_h), + ) + bboxes.append( + [ + round(x_c - w / 2), + round(y_c - h / 2), + round(x_c + w / 2), + round(y_c + h / 2), + ] + ) + + ( + x1, + y1, + x2, + y2, + ) = bboxes[-1] keypoints = [] - keypoint = (round((x1 + x2)/2), round((y1+y2)/2),0) + keypoint = (round((x1 + x2) / 2), round((y1 + y2) / 2), 0) keypoints.append(keypoint) - - return bboxes,keypoints - + return bboxes, keypoints def dump2json(bboxes, keypoints, file_json): annotations = {} - annotations['bboxes'], annotations['keypoints'] = bboxes, keypoints + annotations["bboxes"], annotations["keypoints"] = bboxes, keypoints with open(file_json, "w") as f: json.dump(annotations, f) +# Example -#Example - -file_image_example = 'data/train/images/cropped1ezgif-frame-039_jpg.rf.d24e6236c1b349c58e5152fa7079f151.jpg' +file_image_example = "data/train/images/cropped1ezgif-frame-039_jpg.rf.d24e6236c1b349c58e5152fa7079f151.jpg" img = cv2.imread(file_image_example) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) -plt.figure(figsize=(15,15)) +plt.figure(figsize=(15, 15)) plt.imshow(img) -#plt.show() +# plt.show() -file_labels_example = 'data/train/labels/cropped1ezgif-frame-039_jpg.rf.d24e6236c1b349c58e5152fa7079f151.txt' +file_labels_example = "data/train/labels/cropped1ezgif-frame-039_jpg.rf.d24e6236c1b349c58e5152fa7079f151.txt" -bboxes,keypoints = converter(file_labels_example, file_image_example) +bboxes, keypoints = converter(file_labels_example, file_image_example) print("Bboxes:", bboxes) for bbox_idx, bbox in enumerate(bboxes): - top_left_corner = tuple([bbox[0],bbox[1]]) - bottom_right_corner = tuple([bbox[2],bbox[3]]) - img = cv2.rectangle(img, top_left_corner,bottom_right_corner,(0,255,0),2) - + top_left_corner = tuple([bbox[0], bbox[1]]) + bottom_right_corner = tuple([bbox[2], bbox[3]]) + img = cv2.rectangle(img, top_left_corner, bottom_right_corner, (0, 255, 0), 2) c1, c2, c3 = keypoints[bbox_idx] print("c1: ", c1, "c2: ", c2) - img = cv2.circle(img, (round(c1),round(c2)), 2, (255,0,0),4) + img = cv2.circle(img, (round(c1), round(c2)), 2, (255, 0, 0), 4) -plt.figure(figsize=(15,15)) +plt.figure(figsize=(15, 15)) plt.imshow(img) plt.show() +# Actual Labelling -#Actual Labelling - -IMAGES = 'data/test/images' -LABELS = 'data/test/labels' -ANNOTATIONS = 'data/test/annotations' +IMAGES = "data/test/images" +LABELS = "data/test/labels" +ANNOTATIONS = "data/test/annotations" -files_names = [file.split('.jpg')[0] for file in os.listdir(IMAGES)] +files_names = [file.split(".jpg")[0] for file in os.listdir(IMAGES)] for file in files_names: file_labels = os.path.join(LABELS, file + ".txt") file_image = os.path.join(IMAGES, file + ".jpg") bboxes, keypoints = converter(file_labels, file_image) - dump2json(bboxes,keypoints,os.path.join(ANNOTATIONS,file + '.json')) + dump2json(bboxes, keypoints, os.path.join(ANNOTATIONS, file + ".json")) -IMAGES = 'data/valid/images' -LABELS = 'data/valid/labels' -ANNOTATIONS = 'data/valid/annotations' +IMAGES = "data/valid/images" +LABELS = "data/valid/labels" +ANNOTATIONS = "data/valid/annotations" -files_names = [file.split('.jpg')[0] for file in os.listdir(IMAGES)] +files_names = [file.split(".jpg")[0] for file in os.listdir(IMAGES)] for file in files_names: file_labels = os.path.join(LABELS, file + ".txt") file_image = os.path.join(IMAGES, file + ".jpg") bboxes, keypoints = converter(file_labels, file_image) - dump2json(bboxes,keypoints,os.path.join(ANNOTATIONS,file + '.json')) - + dump2json(bboxes, keypoints, os.path.join(ANNOTATIONS, file + ".json")) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/generate_wait_box.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/generate_wait_box.py index 8f3f33622..8599bc6af 100644 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/generate_wait_box.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/generate_wait_box.py @@ -1,43 +1,37 @@ -import numpy as np +import numpy as np WAITING_AREA_LONG_SIDE = 1.2 WAITING_AREA_SHORT_SIDE = 0.6 -def generate_lift_wait_box(corner_zero,area): - x,y = corner_zero - #This is the corner that if you were facing the lift, would be the left corner of the lift. +def generate_lift_wait_box(corner_zero, area): + x, y = corner_zero + # This is the corner that if you were facing the lift, would be the left corner of the lift. - - wait_box = np.zeros((4,2)) + wait_box = np.zeros((4, 2)) print(wait_box) - see_wait_area_pos = [0,0] - - if (area=='606_lift'): - wait_box[0] = [x,y] - wait_box[1] = [x-3.5,y] - wait_box[2] = [x-3.5,y+2.7] - wait_box[3] = [x,y+2.5] + see_wait_area_pos = [0, 0] - see_wait_area_pos = [x,y+2.5] - elif(area=='6th floor lift'): - wait_box[0] = [x,y] - wait_box[1] = [x-3.5,y] - wait_box[2] = [x-3.5,y+2.7] - wait_box[3] = [x,y+2.5] + if area == "606_lift": + wait_box[0] = [x, y] + wait_box[1] = [x - 3.5, y] + wait_box[2] = [x - 3.5, y + 2.7] + wait_box[3] = [x, y + 2.5] + see_wait_area_pos = [x, y + 2.5] + elif area == "6th floor lift": + wait_box[0] = [x, y] + wait_box[1] = [x - 3.5, y] + wait_box[2] = [x - 3.5, y + 2.7] + wait_box[3] = [x, y + 2.5] - return wait_box, see_wait_area_pos +corner = [4.10421085358, -0.823522567749] - -corner = [4.10421085358,-0.823522567749] - - -wait_box,see_wait_area_pos = generate_lift_wait_box(corner,'606_lift') +wait_box, see_wait_area_pos = generate_lift_wait_box(corner, "606_lift") print(wait_box) print(see_wait_area_pos) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/predict_pos.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/predict_pos.py index 12ba7d975..af8ae4b17 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/predict_pos.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/predict_pos.py @@ -17,12 +17,12 @@ FWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.abspath(os.path.join(FWD, "../src/choosing_wait_position"))) -os.chdir(os.path.abspath(os.path.join(FWD, 'models'))) +os.chdir(os.path.abspath(os.path.join(FWD, "models"))) def make_prediction(image_path): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - model = get_model(1, weights_path='keypointsrcnn_weights.pth') + model = get_model(1, weights_path="keypointsrcnn_weights.pth") origin_list = glob.glob(image_path) print(image_path) @@ -33,7 +33,7 @@ def make_prediction(image_path): for i, img_name in enumerate(origin_list): original_img = Image.open(img_name) - plt.imshow(original_img, cmap='gray') + plt.imshow(original_img, cmap="gray") if PLOT_SHOW: plt.show() @@ -76,13 +76,14 @@ def visualise_predictions(image, bbox, keypoint): except Exception as e: print(e) - plt.imshow(image, cmap='gray', aspect='auto') - plt.title('visualisation of the prediction') + plt.imshow(image, cmap="gray", aspect="auto") + plt.title("visualisation of the prediction") if PLOT_SHOW: plt.show() if PLOT_SAVE: plt.savefig(DEBUG_PATH + "/predict_pos_viz_test_test" + str(TEST) + ".jpg") + # from geometry_msgs.msg import PoseWithCovarianceStamped, Pose, Point, Quaternion # from tiago_controllers.controllers.controllers import Controllers # diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_eval.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_eval.py index ec0709c5d..b94a064ef 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_eval.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_eval.py @@ -43,7 +43,9 @@ def update(self, predictions): def synchronize_between_processes(self): for iou_type in self.iou_types: self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) - create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) + create_common_coco_eval( + self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type] + ) def accumulate(self): for coco_eval in self.coco_eval.values(): @@ -103,7 +105,10 @@ def prepare_for_coco_segmentation(self, predictions): labels = prediction["labels"].tolist() rles = [ - mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks + mask_util.encode( + np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F") + )[0] + for mask in masks ] for rle in rles: rle["counts"] = rle["counts"].decode("utf-8") @@ -188,4 +193,6 @@ def create_common_coco_eval(coco_eval, img_ids, eval_imgs): def evaluate(imgs): with redirect_stdout(io.StringIO()): imgs.evaluate() - return imgs.params.imgIds, np.asarray(imgs.evalImgs).reshape(-1, len(imgs.params.areaRng), len(imgs.params.imgIds)) + return imgs.params.imgIds, np.asarray(imgs.evalImgs).reshape( + -1, len(imgs.params.areaRng), len(imgs.params.imgIds) + ) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_utils.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_utils.py index a65660286..6117c4c04 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_utils.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/coco_utils.py @@ -220,8 +220,14 @@ def __getitem__(self, idx): def get_coco(root, image_set, transforms, mode="instances"): anno_file_template = "{}_{}2017.json" PATHS = { - "train": ("train2017", os.path.join("annotations", anno_file_template.format(mode, "train"))), - "val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))), + "train": ( + "train2017", + os.path.join("annotations", anno_file_template.format(mode, "train")), + ), + "val": ( + "val2017", + os.path.join("annotations", anno_file_template.format(mode, "val")), + ), # "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))) } diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/engine.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/engine.py index 4219a2663..1c17c6a1a 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/engine.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/engine.py @@ -5,8 +5,12 @@ import torch import torchvision.models.detection.mask_rcnn import utils -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.coco_eval import CocoEvaluator -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.coco_utils import get_coco_api_from_dataset +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.coco_eval import ( + CocoEvaluator, +) +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.coco_utils import ( + get_coco_api_from_dataset, +) def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq): @@ -93,7 +97,10 @@ def evaluate(model, data_loader, device): outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] model_time = time.time() - model_time - res = {target["image_id"].item(): output for target, output in zip(targets, outputs)} + res = { + target["image_id"].item(): output + for target, output in zip(targets, outputs) + } evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/group_by_aspect_ratio.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/group_by_aspect_ratio.py index 1323849a6..a06376b29 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/group_by_aspect_ratio.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/group_by_aspect_ratio.py @@ -36,7 +36,9 @@ class GroupedBatchSampler(BatchSampler): def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): - raise ValueError(f"sampler should be an instance of torch.utils.data.Sampler, but got sampler={sampler}") + raise ValueError( + f"sampler should be an instance of torch.utils.data.Sampler, but got sampler={sampler}" + ) self.sampler = sampler self.group_ids = group_ids self.batch_size = batch_size @@ -65,9 +67,13 @@ def __iter__(self): if num_remaining > 0: # for the remaining batches, take first the buffers with largest number # of elements - for group_id, _ in sorted(buffer_per_group.items(), key=lambda x: len(x[1]), reverse=True): + for group_id, _ in sorted( + buffer_per_group.items(), key=lambda x: len(x[1]), reverse=True + ): remaining = self.batch_size - len(buffer_per_group[group_id]) - samples_from_group_id = _repeat_to_at_least(samples_per_group[group_id], remaining) + samples_from_group_id = _repeat_to_at_least( + samples_per_group[group_id], remaining + ) buffer_per_group[group_id].extend(samples_from_group_id[:remaining]) assert len(buffer_per_group[group_id]) == self.batch_size yield buffer_per_group[group_id] diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/presets.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/presets.py index 4b70ce49e..0b440ec5b 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/presets.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/presets.py @@ -1,6 +1,7 @@ import torch -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils import \ - transforms as T +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils import ( + transforms as T, +) class DetectionPresetTrain: diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/train.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/train.py index e611514d3..58a56cd36 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/train.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/train.py @@ -17,6 +17,7 @@ Because the number of images is smaller in the person keypoint subset of COCO, the number of epochs should be adapted so that we have the same number of iterations. """ + import datetime import os import time @@ -26,11 +27,22 @@ import torchvision import torchvision.models.detection import torchvision.models.detection.mask_rcnn -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils import \ - utils, presets -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.coco_utils import get_coco, get_coco_kp -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.engine import train_one_epoch, evaluate -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils import ( + utils, + presets, +) +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.coco_utils import ( + get_coco, + get_coco_kp, +) +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.engine import ( + train_one_epoch, + evaluate, +) +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.group_by_aspect_ratio import ( + GroupedBatchSampler, + create_aspect_ratio_groups, +) try: @@ -61,18 +73,44 @@ def get_transform(train, args): def get_args_parser(add_help=True): import argparse - parser = argparse.ArgumentParser(description="PyTorch Detection Training", add_help=add_help) + parser = argparse.ArgumentParser( + description="PyTorch Detection Training", add_help=add_help + ) - parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path") + parser.add_argument( + "--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path" + ) parser.add_argument("--dataset", default="coco", type=str, help="dataset name") - parser.add_argument("--model", default="maskrcnn_resnet50_fpn", type=str, help="model name") - parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)") parser.add_argument( - "-b", "--batch-size", default=2, type=int, help="images per gpu, the total batch size is $NGPU x batch_size" + "--model", default="maskrcnn_resnet50_fpn", type=str, help="model name" + ) + parser.add_argument( + "--device", + default="cuda", + type=str, + help="device (Use cuda or cpu Default: cuda)", + ) + parser.add_argument( + "-b", + "--batch-size", + default=2, + type=int, + help="images per gpu, the total batch size is $NGPU x batch_size", + ) + parser.add_argument( + "--epochs", + default=26, + type=int, + metavar="N", + help="number of total epochs to run", ) - parser.add_argument("--epochs", default=26, type=int, metavar="N", help="number of total epochs to run") parser.add_argument( - "-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers (default: 4)" + "-j", + "--workers", + default=4, + type=int, + metavar="N", + help="number of data loading workers (default: 4)", ) parser.add_argument( "--lr", @@ -80,7 +118,9 @@ def get_args_parser(add_help=True): type=float, help="initial learning rate, 0.02 is the default value for training on 8 gpus and 2 images_per_gpu", ) - parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum") + parser.add_argument( + "--momentum", default=0.9, type=float, metavar="M", help="momentum" + ) parser.add_argument( "--wd", "--weight-decay", @@ -91,10 +131,16 @@ def get_args_parser(add_help=True): dest="weight_decay", ) parser.add_argument( - "--lr-scheduler", default="multisteplr", type=str, help="name of lr scheduler (default: multisteplr)" + "--lr-scheduler", + default="multisteplr", + type=str, + help="name of lr scheduler (default: multisteplr)", ) parser.add_argument( - "--lr-step-size", default=8, type=int, help="decrease lr every step-size epochs (multisteplr scheduler only)" + "--lr-step-size", + default=8, + type=int, + help="decrease lr every step-size epochs (multisteplr scheduler only)", ) parser.add_argument( "--lr-steps", @@ -104,19 +150,35 @@ def get_args_parser(add_help=True): help="decrease lr every step-size epochs (multisteplr scheduler only)", ) parser.add_argument( - "--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)" + "--lr-gamma", + default=0.1, + type=float, + help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)", ) parser.add_argument("--print-freq", default=20, type=int, help="print frequency") - parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs") + parser.add_argument( + "--output-dir", default=".", type=str, help="path to save outputs" + ) parser.add_argument("--resume", default="", type=str, help="path of checkpoint") parser.add_argument("--start_epoch", default=0, type=int, help="start epoch") parser.add_argument("--aspect-ratio-group-factor", default=3, type=int) - parser.add_argument("--rpn-score-thresh", default=None, type=float, help="rpn score threshold for faster-rcnn") parser.add_argument( - "--trainable-backbone-layers", default=None, type=int, help="number of trainable layers of backbone" + "--rpn-score-thresh", + default=None, + type=float, + help="rpn score threshold for faster-rcnn", ) parser.add_argument( - "--data-augmentation", default="hflip", type=str, help="data augmentation policy (default: hflip)" + "--trainable-backbone-layers", + default=None, + type=int, + help="number of trainable layers of backbone", + ) + parser.add_argument( + "--data-augmentation", + default="hflip", + type=str, + help="data augmentation policy (default: hflip)", ) parser.add_argument( "--sync-bn", @@ -138,18 +200,29 @@ def get_args_parser(add_help=True): ) # distributed training parameters - parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes") - parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training") + parser.add_argument( + "--world-size", default=1, type=int, help="number of distributed processes" + ) + parser.add_argument( + "--dist-url", + default="env://", + type=str, + help="url used to set up distributed training", + ) # Prototype models only - parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load") + parser.add_argument( + "--weights", default=None, type=str, help="the weights enum name to load" + ) return parser def main(args): if args.weights and PM is None: - raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.") + raise ImportError( + "The prototype module couldn't be found. Please install the latest torchvision nightly." + ) if args.output_dir: utils.mkdir(args.output_dir) @@ -161,8 +234,12 @@ def main(args): # Data loading code print("Loading data") - dataset, num_classes = get_dataset(args.dataset, "train", get_transform(True, args), args.data_path) - dataset_test, _ = get_dataset(args.dataset, "val", get_transform(False, args), args.data_path) + dataset, num_classes = get_dataset( + args.dataset, "train", get_transform(True, args), args.data_path + ) + dataset_test, _ = get_dataset( + args.dataset, "val", get_transform(False, args), args.data_path + ) print("Creating data loaders") if args.distributed: @@ -173,17 +250,30 @@ def main(args): test_sampler = torch.utils.data.SequentialSampler(dataset_test) if args.aspect_ratio_group_factor >= 0: - group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor) - train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size) + group_ids = create_aspect_ratio_groups( + dataset, k=args.aspect_ratio_group_factor + ) + train_batch_sampler = GroupedBatchSampler( + train_sampler, group_ids, args.batch_size + ) else: - train_batch_sampler = torch.utils.data.BatchSampler(train_sampler, args.batch_size, drop_last=True) + train_batch_sampler = torch.utils.data.BatchSampler( + train_sampler, args.batch_size, drop_last=True + ) data_loader = torch.utils.data.DataLoader( - dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, collate_fn=utils.collate_fn + dataset, + batch_sampler=train_batch_sampler, + num_workers=args.workers, + collate_fn=utils.collate_fn, ) data_loader_test = torch.utils.data.DataLoader( - dataset_test, batch_size=1, sampler=test_sampler, num_workers=args.workers, collate_fn=utils.collate_fn + dataset_test, + batch_size=1, + sampler=test_sampler, + num_workers=args.workers, + collate_fn=utils.collate_fn, ) print("Creating model") @@ -196,7 +286,9 @@ def main(args): pretrained=args.pretrained, num_classes=num_classes, **kwargs ) else: - model = PM.detection.__dict__[args.model](weights=args.weights, num_classes=num_classes, **kwargs) + model = PM.detection.__dict__[args.model]( + weights=args.weights, num_classes=num_classes, **kwargs + ) model.to(device) if args.distributed and args.sync_bn: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) @@ -207,13 +299,19 @@ def main(args): model_without_ddp = model.module params = [p for p in model.parameters() if p.requires_grad] - optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + optimizer = torch.optim.SGD( + params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay + ) args.lr_scheduler = args.lr_scheduler.lower() if args.lr_scheduler == "multisteplr": - lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma) + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=args.lr_steps, gamma=args.lr_gamma + ) elif args.lr_scheduler == "cosineannealinglr": - lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + optimizer, T_max=args.epochs + ) else: raise RuntimeError( f"Invalid lr scheduler '{args.lr_scheduler}'. Only MultiStepLR and CosineAnnealingLR are supported." @@ -245,8 +343,12 @@ def main(args): "args": args, "epoch": epoch, } - utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) - utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) + utils.save_on_master( + checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth") + ) + utils.save_on_master( + checkpoint, os.path.join(args.output_dir, "checkpoint.pth") + ) # evaluate after every epoch evaluate(model, data_loader_test, device=device) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/transforms.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/transforms.py index 4ab5a6525..9aa5efc6f 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/transforms.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/transforms.py @@ -103,7 +103,9 @@ def forward( if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: - raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.") + raise ValueError( + f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions." + ) elif image.ndimension() == 2: image = image.unsqueeze(0) @@ -113,7 +115,9 @@ def forward( # sample an option idx = int(torch.randint(low=0, high=len(self.options), size=(1,))) min_jaccard_overlap = self.options[idx] - if min_jaccard_overlap >= 1.0: # a value larger than 1 encodes the leave as-is option + if ( + min_jaccard_overlap >= 1.0 + ): # a value larger than 1 encodes the leave as-is option return image, target for _ in range(self.trials): @@ -137,14 +141,21 @@ def forward( # check for any valid boxes with centers within the crop area cx = 0.5 * (target["boxes"][:, 0] + target["boxes"][:, 2]) cy = 0.5 * (target["boxes"][:, 1] + target["boxes"][:, 3]) - is_within_crop_area = (left < cx) & (cx < right) & (top < cy) & (cy < bottom) + is_within_crop_area = ( + (left < cx) & (cx < right) & (top < cy) & (cy < bottom) + ) if not is_within_crop_area.any(): continue # check at least 1 box with jaccard limitations boxes = target["boxes"][is_within_crop_area] ious = torchvision.ops.boxes.box_iou( - boxes, torch.tensor([[left, top, right, bottom]], dtype=boxes.dtype, device=boxes.device) + boxes, + torch.tensor( + [[left, top, right, bottom]], + dtype=boxes.dtype, + device=boxes.device, + ), ) if ious.max() < min_jaccard_overlap: continue @@ -163,7 +174,10 @@ def forward( class RandomZoomOut(nn.Module): def __init__( - self, fill: Optional[List[float]] = None, side_range: Tuple[float, float] = (1.0, 4.0), p: float = 0.5 + self, + fill: Optional[List[float]] = None, + side_range: Tuple[float, float] = (1.0, 4.0), + p: float = 0.5, ): super().__init__() if fill is None: @@ -185,7 +199,9 @@ def forward( ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: - raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.") + raise ValueError( + f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions." + ) elif image.ndimension() == 2: image = image.unsqueeze(0) @@ -194,7 +210,9 @@ def forward( orig_w, orig_h = F.get_image_size(image) - r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0]) + r = self.side_range[0] + torch.rand(1) * ( + self.side_range[1] - self.side_range[0] + ) canvas_width = int(orig_w * r) canvas_height = int(orig_h * r) @@ -211,10 +229,12 @@ def forward( image = F.pad(image, [left, top, right, bottom], fill=fill) if isinstance(image, torch.Tensor): - v = torch.tensor(self.fill, device=image.device, dtype=image.dtype).view(-1, 1, 1) - image[..., :top, :] = image[..., :, :left] = image[..., (top + orig_h) :, :] = image[ - ..., :, (left + orig_w) : - ] = v + v = torch.tensor(self.fill, device=image.device, dtype=image.dtype).view( + -1, 1, 1 + ) + image[..., :top, :] = image[..., :, :left] = image[ + ..., (top + orig_h) :, : + ] = image[..., :, (left + orig_w) :] = v if target is not None: target["boxes"][:, 0::2] += left @@ -244,7 +264,9 @@ def forward( ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: if isinstance(image, torch.Tensor): if image.ndimension() not in {2, 3}: - raise ValueError(f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions.") + raise ValueError( + f"image should be 2/3 dimensional. Got {image.ndimension()} dimensions." + ) elif image.ndimension() == 2: image = image.unsqueeze(0) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/utils.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/utils.py index f73915580..21d164683 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/utils.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/pytorch_utils/utils.py @@ -63,7 +63,11 @@ def value(self): def __str__(self): return self.fmt.format( - median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value, ) @@ -127,7 +131,9 @@ def __getattr__(self, attr): return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] - raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) def __str__(self): loss_str = [] @@ -165,7 +171,14 @@ def log_every(self, iterable, print_freq, header=None): ) else: log_msg = self.delimiter.join( - [header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"] + [ + header, + "[{0" + space_fmt + "}/{1}]", + "eta: {eta}", + "{meters}", + "time: {time}", + "data: {data}", + ] ) MB = 1024.0 * 1024.0 for obj in iterable: @@ -190,14 +203,21 @@ def log_every(self, iterable, print_freq, header=None): else: print( log_msg.format( - i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time) + i, + len(iterable), + eta=eta_string, + meters=str(self), + time=str(iter_time), + data=str(data_time), ) ) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print(f"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)") + print( + f"{header} Total time: {total_time_str} ({total_time / len(iterable):.4f} s / it)" + ) def collate_fn(batch): @@ -276,7 +296,10 @@ def init_distributed_mode(args): args.dist_backend = "nccl" print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) torch.distributed.init_process_group( - backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank + backend=args.dist_backend, + init_method=args.dist_url, + world_size=args.world_size, + rank=args.rank, ) torch.distributed.barrier() setup_for_distributed(args.rank == 0) diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/training_lift_models.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/training_lift_models.py index 06c72fdf3..a5b3af6da 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/training_lift_models.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/training_lift_models.py @@ -1,22 +1,25 @@ from common_tools import * +keypoints_classes_ids2names = {0: "idealpos"} -keypoints_classes_ids2names = {0: 'idealpos'} +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") +writer = SummaryWriter("runs/experiment_3") -device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') -writer = SummaryWriter('runs/experiment_3') - -KEYPOINTS_FOLDER_TRAIN = 'data/train' -KEYPOINTS_FOLDER_TEST = 'data/test' +KEYPOINTS_FOLDER_TRAIN = "data/train" +KEYPOINTS_FOLDER_TEST = "data/test" dataset_train = ClassDataset(KEYPOINTS_FOLDER_TRAIN, transform=None, demo=False) dataset_test = ClassDataset(KEYPOINTS_FOLDER_TEST, transform=None, demo=False) -data_loader_train = DataLoader(dataset_train, batch_size=3, shuffle=True, collate_fn=collate_fn) -data_loader_test = DataLoader(dataset_test, batch_size=1, shuffle=False, collate_fn=collate_fn) +data_loader_train = DataLoader( + dataset_train, batch_size=3, shuffle=True, collate_fn=collate_fn +) +data_loader_test = DataLoader( + dataset_test, batch_size=1, shuffle=False, collate_fn=collate_fn +) -model = get_model(num_keypoints = 1) +model = get_model(num_keypoints=1) model.to(device) @@ -26,14 +29,12 @@ num_epochs = 5 - - for epoch in range(num_epochs): train_one_epoch(model, optimizer, data_loader_train, device, epoch, print_freq=1000) lr_scheduler.step() - #evaluate(model, data_loader_test, device) - + # evaluate(model, data_loader_test, device) + # Save model weights after training -torch.save(model.state_dict(), 'keypointsrcnn_weights_test.pth') +torch.save(model.state_dict(), "keypointsrcnn_weights_test.pth") diff --git a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/validate.py b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/validate.py index ac0601353..fb9997333 100755 --- a/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/validate.py +++ b/legacy/choosing_wait_position/src/choosing_wait_position/final_lift_key_point/validate.py @@ -6,19 +6,37 @@ from torchvision.models.detection.rpn import AnchorGenerator from torchvision.transforms import functional as F -import albumentations as A # Library for augmentations -from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.utils import collate_fn +import albumentations as A # Library for augmentations +from base.src.Base.common.navigation.choosing_wait_position.src.choosing_wait_position.final_lift_key_point.pytorch_utils.utils import ( + collate_fn, +) def train_transform(): - return A.Compose([ - A.Sequential([ - A.RandomRotate90(p=1), # Random rotation of an image by 90 degrees zero or more times - A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, brightness_by_max=True, always_apply=False, p=1), # Random change of brightness & contrast - ], p=1) - ], - keypoint_params=A.KeypointParams(format='xy'), # More about keypoint formats used in albumentations library read at https://albumentations.ai/docs/getting_started/keypoints_augmentation/ - bbox_params=A.BboxParams(format='pascal_voc', label_fields=['bboxes_labels']) # Bboxes should have labels, read more here https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/ + return A.Compose( + [ + A.Sequential( + [ + A.RandomRotate90( + p=1 + ), # Random rotation of an image by 90 degrees zero or more times + A.RandomBrightnessContrast( + brightness_limit=0.3, + contrast_limit=0.3, + brightness_by_max=True, + always_apply=False, + p=1, + ), # Random change of brightness & contrast + ], + p=1, + ) + ], + keypoint_params=A.KeypointParams( + format="xy" + ), # More about keypoint formats used in albumentations library read at https://albumentations.ai/docs/getting_started/keypoints_augmentation/ + bbox_params=A.BboxParams( + format="pascal_voc", label_fields=["bboxes_labels"] + ), # Bboxes should have labels, read more here https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/ ) @@ -26,117 +44,143 @@ class ClassDataset(Dataset): def __init__(self, root, transform=None, demo=False): self.root = root self.transform = transform - self.demo = demo # Use demo=True if you need transformed and original images (for example, for visualization purposes) + self.demo = demo # Use demo=True if you need transformed and original images (for example, for visualization purposes) self.imgs_files = sorted(os.listdir(os.path.join(root, "images"))) self.annotations_files = sorted(os.listdir(os.path.join(root, "annotations"))) - def __getitem__(self,idx): + def __getitem__(self, idx): img_path = os.path.join(self.root, "images", self.imgs_files[idx]) - annotations_path = os.path.join(self.root, "annotations", self.annotations_files[idx]) + annotations_path = os.path.join( + self.root, "annotations", self.annotations_files[idx] + ) img_original = cv2.imread(img_path) img_original = cv2.cvtColor(img_original, cv2.COLOR_BGR2RGB) with open(annotations_path) as f: data = json.load(f) - bboxes_original = data['bboxes'] - keypoints_original = data['keypoints'] - + bboxes_original = data["bboxes"] + keypoints_original = data["keypoints"] + # All objects are ideal area - bboxes_labels_original = ['Ideal Pos' for _ in bboxes_original] + bboxes_labels_original = ["Ideal Pos" for _ in bboxes_original] - if self.transform: - # Converting keypoints from [x,y,visibility]-format to [x, y]-format + Flattening nested list of keypoints + if self.transform: + # Converting keypoints from [x,y,visibility]-format to [x, y]-format + Flattening nested list of keypoints # For example, if we have the following list of keypoints for three objects (each object has two keypoints): - # [[obj1_kp1, obj1_kp2], [obj2_kp1, obj2_kp2], [obj3_kp1, obj3_kp2]], where each keypoint is in [x, y]-format + # [[obj1_kp1, obj1_kp2], [obj2_kp1, obj2_kp2], [obj3_kp1, obj3_kp2]], where each keypoint is in [x, y]-format # Then we need to convert it to the following list: # [obj1_kp1, obj1_kp2, obj2_kp1, obj2_kp2, obj3_kp1, obj3_kp2] - keypoints_original_flattened = [el[0:2] for kp in keypoints_original for el in kp] - + keypoints_original_flattened = [ + el[0:2] for kp in keypoints_original for el in kp + ] + # Apply augmentations - transformed = self.transform(image=img_original, bboxes=bboxes_original, bboxes_labels=bboxes_labels_original, keypoints=keypoints_original_flattened) - img = transformed['image'] - bboxes = transformed['bboxes'] - + transformed = self.transform( + image=img_original, + bboxes=bboxes_original, + bboxes_labels=bboxes_labels_original, + keypoints=keypoints_original_flattened, + ) + img = transformed["image"] + bboxes = transformed["bboxes"] + # Unflattening list transformed['keypoints'] # For example, if we have the following list of keypoints for three objects (each object has two keypoints): # [obj1_kp1, obj1_kp2, obj2_kp1, obj2_kp2, obj3_kp1, obj3_kp2], where each keypoint is in [x, y]-format # Then we need to convert it to the following list: # [[obj1_kp1, obj1_kp2], [obj2_kp1, obj2_kp2], [obj3_kp1, obj3_kp2]] - keypoints_transformed_unflattened = np.reshape(np.array(transformed['keypoints']), (-1,2,2)).tolist() + keypoints_transformed_unflattened = np.reshape( + np.array(transformed["keypoints"]), (-1, 2, 2) + ).tolist() # Converting transformed keypoints from [x, y]-format to [x,y,visibility]-format by appending original visibilities to transformed coordinates of keypoints keypoints = [] - for o_idx, obj in enumerate(keypoints_transformed_unflattened): # Iterating over objects + for o_idx, obj in enumerate( + keypoints_transformed_unflattened + ): # Iterating over objects obj_keypoints = [] - for k_idx, kp in enumerate(obj): # Iterating over keypoints in each object + for k_idx, kp in enumerate( + obj + ): # Iterating over keypoints in each object # kp - coordinates of keypoint # keypoints_original[o_idx][k_idx][2] - original visibility of keypoint obj_keypoints.append(kp + [keypoints_original[o_idx][k_idx][2]]) keypoints.append(obj_keypoints) - - else: + else: img, bboxes, keypoints = img_original, bboxes_original, keypoints_original - bboxes = torch.as_tensor(bboxes, dtype=torch.float32) + bboxes = torch.as_tensor(bboxes, dtype=torch.float32) target = {} target["boxes"] = bboxes - target["labels"] = torch.as_tensor([1 for _ in bboxes], dtype=torch.int64) # all objects are glue tubes + target["labels"] = torch.as_tensor( + [1 for _ in bboxes], dtype=torch.int64 + ) # all objects are glue tubes target["image_id"] = torch.tensor([idx]) target["area"] = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0]) target["iscrowd"] = torch.zeros(len(bboxes), dtype=torch.int64) - target["keypoints"] = torch.as_tensor(keypoints, dtype=torch.float32) + target["keypoints"] = torch.as_tensor(keypoints, dtype=torch.float32) img = F.to_tensor(img) bboxes_original = torch.as_tensor(bboxes_original, dtype=torch.float32) target_original = {} target_original["boxes"] = bboxes_original - target_original["labels"] = torch.as_tensor([1 for _ in bboxes_original], dtype=torch.int64) # all objects are glue tubes + target_original["labels"] = torch.as_tensor( + [1 for _ in bboxes_original], dtype=torch.int64 + ) # all objects are glue tubes target_original["image_id"] = torch.tensor([idx]) - target_original["area"] = (bboxes_original[:, 3] - bboxes_original[:, 1]) * (bboxes_original[:, 2] - bboxes_original[:, 0]) - target_original["iscrowd"] = torch.zeros(len(bboxes_original), dtype=torch.int64) - target_original["keypoints"] = torch.as_tensor(keypoints_original, dtype=torch.float32) + target_original["area"] = (bboxes_original[:, 3] - bboxes_original[:, 1]) * ( + bboxes_original[:, 2] - bboxes_original[:, 0] + ) + target_original["iscrowd"] = torch.zeros( + len(bboxes_original), dtype=torch.int64 + ) + target_original["keypoints"] = torch.as_tensor( + keypoints_original, dtype=torch.float32 + ) img_original = F.to_tensor(img_original) if self.demo: return img, target, img_original, target_original else: return img, target - + def __len__(self): return len(self.imgs_files) def get_model(num_keypoints, weights_path=None): - - anchor_generator = AnchorGenerator(sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0)) - model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False, - pretrained_backbone=True, - num_keypoints=num_keypoints, - num_classes = 2, # Background is the first class, object is the second class - rpn_anchor_generator=anchor_generator) + anchor_generator = AnchorGenerator( + sizes=(32, 64, 128, 256, 512), + aspect_ratios=(0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0), + ) + model = torchvision.models.detection.keypointrcnn_resnet50_fpn( + pretrained=False, + pretrained_backbone=True, + num_keypoints=num_keypoints, + num_classes=2, # Background is the first class, object is the second class + rpn_anchor_generator=anchor_generator, + ) if weights_path: state_dict = torch.load(weights_path) - model.load_state_dict(state_dict) - - return model - + model.load_state_dict(state_dict) + return model device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -KEYPOINTS_FOLDER_VALIDATE= 'data/valid' +KEYPOINTS_FOLDER_VALIDATE = "data/valid" dataset_valid = ClassDataset(KEYPOINTS_FOLDER_VALIDATE, transform=None, demo=False) -data_loader_valid = DataLoader(dataset_valid, batch_size=3, shuffle=True, collate_fn=collate_fn) +data_loader_valid = DataLoader( + dataset_valid, batch_size=3, shuffle=True, collate_fn=collate_fn +) -model = get_model(1, weights_path='models/keypointsrcnn_weights.pth') - +model = get_model(1, weights_path="models/keypointsrcnn_weights.pth") iterator = iter(data_loader_valid) @@ -151,24 +195,33 @@ def get_model(num_keypoints, weights_path=None): print("Predictions: \n", output) -image = (images[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8) -scores = output[0]['scores'].detach().cpu().numpy() +image = (images[0].permute(1, 2, 0).detach().cpu().numpy() * 255).astype(np.uint8) +scores = output[0]["scores"].detach().cpu().numpy() -high_scores_idxs = np.where(scores > 1.3)[0].tolist() # Indexes of boxes with scores > 0.7 -post_nms_idxs = torchvision.ops.nms(output[0]['boxes'][high_scores_idxs], output[0]['scores'][high_scores_idxs], 0.3).cpu().numpy() # Indexes of boxes left after applying NMS (iou_threshold=0.3) +high_scores_idxs = np.where(scores > 1.3)[ + 0 +].tolist() # Indexes of boxes with scores > 0.7 +post_nms_idxs = ( + torchvision.ops.nms( + output[0]["boxes"][high_scores_idxs], output[0]["scores"][high_scores_idxs], 0.3 + ) + .cpu() + .numpy() +) # Indexes of boxes left after applying NMS (iou_threshold=0.3) # Below, in output[0]['keypoints'][high_scores_idxs][post_nms_idxs] and output[0]['boxes'][high_scores_idxs][post_nms_idxs] # Firstly, we choose only those objects, which have score above predefined threshold. This is done with choosing elements with [high_scores_idxs] indexes # Secondly, we choose only those objects, which are left after NMS is applied. This is done with choosing elements with [post_nms_idxs] indexes - keypoints = [] -for kps in output[0]['keypoints'][high_scores_idxs][post_nms_idxs].detach().cpu().numpy(): +for kps in ( + output[0]["keypoints"][high_scores_idxs][post_nms_idxs].detach().cpu().numpy() +): keypoints.append([list(map(int, kp[:2])) for kp in kps]) bboxes = [] -for bbox in output[0]['boxes'][high_scores_idxs][post_nms_idxs].detach().cpu().numpy(): +for bbox in output[0]["boxes"][high_scores_idxs][post_nms_idxs].detach().cpu().numpy(): bboxes.append(list(map(int, bbox.tolist()))) - -#visualize(image, bboxes, keypoints) + +# visualize(image, bboxes, keypoints) diff --git a/legacy/graph_room_navigation/scripts/test_graph_room_navigation.py b/legacy/graph_room_navigation/scripts/test_graph_room_navigation.py index e8a732451..7ea1f5d61 100755 --- a/legacy/graph_room_navigation/scripts/test_graph_room_navigation.py +++ b/legacy/graph_room_navigation/scripts/test_graph_room_navigation.py @@ -15,13 +15,27 @@ # setup action client move_to_goal_client = actionlib.SimpleActionClient( - '/unsafe_traversal/move_to_goal', MoveToGoalAction) + "/unsafe_traversal/move_to_goal", MoveToGoalAction +) move_to_goal_client.wait_for_server() -add_room("1", Point(-0.19334179163, -4.55392599106, 0.0), Point(4.94751501083, 7.62173271179, 0.0)) -add_room("2", Point(2.69638562202, 7.27746152878, 0.0), Point(4.91735553741, 12.5359373093, 0.0)) +add_room( + "1", + Point(-0.19334179163, -4.55392599106, 0.0), + Point(4.94751501083, 7.62173271179, 0.0), +) +add_room( + "2", + Point(2.69638562202, 7.27746152878, 0.0), + Point(4.91735553741, 12.5359373093, 0.0), +) -add_doorway("1", Point(4.51002217519, 6.80491190474, 0.0), "2", Point(4.51002217519, 8.30578852245, 0.0)) +add_doorway( + "1", + Point(4.51002217519, 6.80491190474, 0.0), + "2", + Point(4.51002217519, 8.30578852245, 0.0), +) plan = plan_to_room("2") @@ -33,23 +47,22 @@ phrases = [ "Could someone please open this door for me?", "Open sesame", - "This door is not going to open itself!" + "This door is not going to open itself!", ] voice = Voice() for p1, p2 in zip(plan.points[0::2], plan.points[1::2]): - result = False tries = 0 while not result: goal = MoveToGoalGoal() goal.start_pose.header.stamp = rospy.get_rostime() - goal.start_pose.header.frame_id = 'map' + goal.start_pose.header.frame_id = "map" goal.start_pose.pose.position = p1 goal.end_pose.header.stamp = rospy.get_rostime() - goal.end_pose.header.frame_id = 'map' + goal.end_pose.header.frame_id = "map" goal.end_pose.pose.position = p2 move_to_goal_client.send_goal(goal) @@ -61,7 +74,7 @@ voice.sync_tts(phrases[tries]) rospy.loginfo(phrases[tries]) asked_for_help = True - tries+=1 + tries += 1 elif asked_for_help and tries < max_tries: asked_for_help = False elif tries >= max_tries: diff --git a/legacy/graph_room_navigation/setup.py b/legacy/graph_room_navigation/setup.py index 95678f596..741a6df9a 100644 --- a/legacy/graph_room_navigation/setup.py +++ b/legacy/graph_room_navigation/setup.py @@ -4,8 +4,7 @@ from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['graph_room_navigation'], - package_dir={'': 'src'} + packages=["graph_room_navigation"], package_dir={"": "src"} ) setup(**setup_args) diff --git a/legacy/graph_room_navigation/src/graph_room_navigation/graph.py b/legacy/graph_room_navigation/src/graph_room_navigation/graph.py index 8e6fa31b5..e774feb75 100755 --- a/legacy/graph_room_navigation/src/graph_room_navigation/graph.py +++ b/legacy/graph_room_navigation/src/graph_room_navigation/graph.py @@ -1,10 +1,11 @@ #!/usr/bin/env python3 import numpy as np -class Room: + +class Room: def __init__(self, name, corners): - self.name = name + self.name = name self.corners = corners self.doorways = {} @@ -18,14 +19,13 @@ def isin(self, x, y): def __str__(self): return self.name -class Graph: +class Graph: def __init__(self): - self.size = 0 self.adjLists = {} - def getRoom(self,name): + def getRoom(self, name): for room in self.adjLists.keys(): if room.name == name: return room @@ -46,7 +46,7 @@ def hasVertex(self, vertex): def addVertex(self, vertex): if not self.hasVertex(vertex): - self.size +=1 + self.size += 1 self.adjLists[vertex] = [] return True else: @@ -77,7 +77,7 @@ def dfs(self, u, v, visited=None): if not x in visited: self.dfs(x, v, visited=visited) return visited - + def bfs(self, u, v): # maintain a queue of paths queue = [] @@ -91,7 +91,7 @@ def bfs(self, u, v): # path found if node == v: return path - # enumerate all adjacent nodes, construct a + # enumerate all adjacent nodes, construct a # new path and push it into the queue for adjacent in self.adjLists[node]: new_path = list(path) @@ -101,7 +101,7 @@ def bfs(self, u, v): def points_from_path(self, path): print([p.name for p in path]) points = [] - for u, v in zip(list(path),list(path)[1:]): + for u, v in zip(list(path), list(path)[1:]): print(u.name, v.name) points.append(u.doorways[v]) points.append(v.doorways[u]) diff --git a/legacy/graph_room_navigation/src/graph_room_navigation/graph_navigation_server.py b/legacy/graph_room_navigation/src/graph_room_navigation/graph_navigation_server.py index 6d34d762b..c13f9abf3 100755 --- a/legacy/graph_room_navigation/src/graph_room_navigation/graph_navigation_server.py +++ b/legacy/graph_room_navigation/src/graph_room_navigation/graph_navigation_server.py @@ -2,26 +2,48 @@ import rospy from graph_room_navigation import Graph, Room -from graph_room_navigation.srv import AddRoom, AddRoomResponse, AddCrossing, AddCrossingResponse, PlanToRoom, PlanToRoomResponse, PlanToPoint, PlanToPointResponse +from graph_room_navigation.srv import ( + AddRoom, + AddRoomResponse, + AddCrossing, + AddCrossingResponse, + PlanToRoom, + PlanToRoomResponse, + PlanToPoint, + PlanToPointResponse, +) from geometry_msgs.msg import PoseWithCovarianceStamped, Point -class GraphNavigationServer(): + +class GraphNavigationServer: def __init__(self, path=""): self.graph = Graph() - - self.add_room_srv = rospy.Service("graph_navigation_server/add_room", AddRoom, self.add_room) - self.add_doorway_srv = rospy.Service("graph_navigation_server/add_doorway", AddCrossing, self.add_crossing) - self.plan_to_room_srv = rospy.Service("graph_navigation_server/plan_to_room", PlanToRoom, self.plan_to_room) - self.plan_to_point_srv = rospy.Service("graph_navigation_server/plan_to_point", PlanToPoint, self.plan_to_point) + + self.add_room_srv = rospy.Service( + "graph_navigation_server/add_room", AddRoom, self.add_room + ) + self.add_doorway_srv = rospy.Service( + "graph_navigation_server/add_doorway", AddCrossing, self.add_crossing + ) + self.plan_to_room_srv = rospy.Service( + "graph_navigation_server/plan_to_room", PlanToRoom, self.plan_to_room + ) + self.plan_to_point_srv = rospy.Service( + "graph_navigation_server/plan_to_point", PlanToPoint, self.plan_to_point + ) def add_room(self, req): response = AddRoomResponse() - response.success = self.graph.addVertex(Room(req.name, [[req.a.x, req.a.y], [req.b.x, req.b.y]])) + response.success = self.graph.addVertex( + Room(req.name, [[req.a.x, req.a.y], [req.b.x, req.b.y]]) + ) return response def add_crossing(self, req): response = AddCrossingResponse() - response.success = self.graph.addEdge(req.room1, req.room2, [req.door1.x, req.door1.y], [req.door2.x, req.door2.y]) + response.success = self.graph.addEdge( + req.room1, req.room2, [req.door1.x, req.door1.y], [req.door2.x, req.door2.y] + ) return response def plan_to_room(self, req): @@ -29,8 +51,10 @@ def plan_to_room(self, req): Given a goal room, generate a plan to that room. """ response = PlanToRoomResponse() - #print(self.graph.dfs(self.current_room().name, req.goal_room)) - path = self.graph.points_from_path(self.graph.bfs(self.current_room(), self.graph.getRoom(req.goal_room))) + # print(self.graph.dfs(self.current_room().name, req.goal_room)) + path = self.graph.points_from_path( + self.graph.bfs(self.current_room(), self.graph.getRoom(req.goal_room)) + ) print(path) response.points = [Point(p[0], p[1], 0) for p in path] if path: @@ -42,20 +66,24 @@ def plan_to_point(self, req): Given a point, generate a plan to that point. """ response = PlanToPointResponse() - path = self.graph.points_from_path(self.graph.bfs(self.current_room(), self.graph.localise(req.goal.x, req.goal.y))) + path = self.graph.points_from_path( + self.graph.bfs( + self.current_room(), self.graph.localise(req.goal.x, req.goal.y) + ) + ) response.points = [Point(p[0], p[1], 0) for p in path] if path: response.success = True return response - + def current_room(self): robot_pose = rospy.wait_for_message("/robot_pose", PoseWithCovarianceStamped) x = robot_pose.pose.pose.position.x y = robot_pose.pose.pose.position.y return self.graph.localise(x, y) + if __name__ == "__main__": rospy.init_node("graph_navigation_server") server = GraphNavigationServer() rospy.spin() - diff --git a/legacy/lasr_shapely/setup.py b/legacy/lasr_shapely/setup.py index c72a9a381..956f1c2ad 100644 --- a/legacy/lasr_shapely/setup.py +++ b/legacy/lasr_shapely/setup.py @@ -1,12 +1,10 @@ - #!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( - packages=['lasr_shapely'], - package_dir={'': 'src'} + packages=["lasr_shapely"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/legacy/lasr_shapely/src/lasr_shapely/__init__.py b/legacy/lasr_shapely/src/lasr_shapely/__init__.py index d75a0c87a..c7f60e41f 100644 --- a/legacy/lasr_shapely/src/lasr_shapely/__init__.py +++ b/legacy/lasr_shapely/src/lasr_shapely/__init__.py @@ -2,29 +2,44 @@ from lasr_shapely.srv import PointInPolygon2D, PointsInPolygon2D + class LasrShapely: - ''' + """ Wrapper class for Shapely service - ''' + """ def __init__(self): - rospy.wait_for_service('/lasr_shapely/point_in_polygon_2d') + rospy.wait_for_service("/lasr_shapely/point_in_polygon_2d") + + self.point_proxy = rospy.ServiceProxy( + "/lasr_shapely/point_in_polygon_2d", PointInPolygon2D + ) + self.points_proxy = rospy.ServiceProxy( + "/lasr_shapely/points_in_polygon_2d", PointsInPolygon2D + ) - self.point_proxy = rospy.ServiceProxy('/lasr_shapely/point_in_polygon_2d', PointInPolygon2D) - self.points_proxy = rospy.ServiceProxy('/lasr_shapely/points_in_polygon_2d', PointsInPolygon2D) - def is_point_in_polygon_2d(self, polygon_2d_array, x, y): - return self.point_proxy(polygon=[item for sublist in polygon_2d_array for item in sublist], x=x, y=y) - + return self.point_proxy( + polygon=[item for sublist in polygon_2d_array for item in sublist], x=x, y=y + ) + def are_points_in_polygon_2d(self, polygon_2d_array, points_2d_array): - return self.points_proxy(polygon=[item for sublist in polygon_2d_array for item in sublist], - points=[item for sublist in points_2d_array for item in sublist]) - + return self.points_proxy( + polygon=[item for sublist in polygon_2d_array for item in sublist], + points=[item for sublist in points_2d_array for item in sublist], + ) + def are_points_in_polygon_2d_flatarr(self, polygon_2d_array, points_2d_array): - return self.points_proxy(polygon=polygon_2d_array, - points=points_2d_array) + return self.points_proxy(polygon=polygon_2d_array, points=points_2d_array) + -if __name__=='__main__': - rospy.init_node('testgkfdp', anonymous=True) - print(LasrShapely().is_point_in_polygon_2d([[0, 0], [5,0], [10, 10], [0, 5]], 6, 6)) - print(LasrShapely().are_points_in_polygon_2d([[0, 0], [5,0], [10, 10], [0, 5]], [[1,2], [11, 11]])) +if __name__ == "__main__": + rospy.init_node("testgkfdp", anonymous=True) + print( + LasrShapely().is_point_in_polygon_2d([[0, 0], [5, 0], [10, 10], [0, 5]], 6, 6) + ) + print( + LasrShapely().are_points_in_polygon_2d( + [[0, 0], [5, 0], [10, 10], [0, 5]], [[1, 2], [11, 11]] + ) + ) diff --git a/legacy/narrow_space_navigation/setup.py b/legacy/narrow_space_navigation/setup.py index 288d46085..5155f92fd 100644 --- a/legacy/narrow_space_navigation/setup.py +++ b/legacy/narrow_space_navigation/setup.py @@ -5,8 +5,7 @@ # fetch values from package.xml setup_args = generate_distutils_setup( - packages=['narrow_space_navigation'], - package_dir={'': 'src'} + packages=["narrow_space_navigation"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav.py b/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav.py index 33e1f4b86..9aaf2b361 100755 --- a/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav.py +++ b/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav.py @@ -4,44 +4,45 @@ from narrow_space_nav_srv import NarrowSpaceNavSrv from tiago_controllers.controllers.controllers import Controllers + def get_narrow_space(): - w = Waypoint() - warped, analytics, M = w.get_lift_information(is_lift=True, is_sim=True) - # warped, analytics, M, dilation = w.get_lift_information(is_lift=True, is_sim=True) - points = rospy.get_param('lift_position_points') - print(f"the narrow space is {w.is_narrow_space(points)}") - res = w.has_enough_free_space(warped) - # print(f"has enough free space: {res}") - - return - - s = NarrowSpaceNavSrv() - occupancy_array = warped - thresh = np.mean(occupancy_array.flatten()) - occupancy_array[occupancy_array < thresh] = 0 # Black regions - Free space - occupancy_array[occupancy_array >= thresh] = 100 # White regions - Occupied - - # get the min point to go to - p = s.choose_target_point(occupancy_array) - # get the global point - global_points = w.local_to_global_points(M=M, points=p, is_lift=True) - # get tiago there - p = Pose() - p.position.x = global_points[0][0] - p.position.y = global_points[0][1] - p.orientation.w = 1 - c = Controllers() - c.base_controller.sync_to_pose(p) + w = Waypoint() + warped, analytics, M = w.get_lift_information(is_lift=True, is_sim=True) + # warped, analytics, M, dilation = w.get_lift_information(is_lift=True, is_sim=True) + points = rospy.get_param("lift_position_points") + print(f"the narrow space is {w.is_narrow_space(points)}") + res = w.has_enough_free_space(warped) + # print(f"has enough free space: {res}") + + return + + s = NarrowSpaceNavSrv() + occupancy_array = warped + thresh = np.mean(occupancy_array.flatten()) + occupancy_array[occupancy_array < thresh] = 0 # Black regions - Free space + occupancy_array[occupancy_array >= thresh] = 100 # White regions - Occupied + + # get the min point to go to + p = s.choose_target_point(occupancy_array) + # get the global point + global_points = w.local_to_global_points(M=M, points=p, is_lift=True) + # get tiago there + p = Pose() + p.position.x = global_points[0][0] + p.position.y = global_points[0][1] + p.orientation.w = 1 + c = Controllers() + c.base_controller.sync_to_pose(p) def height_map_service(points, warped): - rospy.wait_for_service('/input_to_location') + rospy.wait_for_service("/input_to_location") try: req = HeightMapRequest() req.points = points req.warped = warped req.timestamp = rospy.Time.now() - height_map_srv = rospy.ServiceProxy('', HeightMap) + height_map_srv = rospy.ServiceProxy("", HeightMap) resp = height_map_srv(req) print(resp) return resp @@ -49,8 +50,8 @@ def height_map_service(points, warped): print("Service call failed: %s" % e) -if __name__ == '__main__': - rospy.init_node('narrow_space_nav') +if __name__ == "__main__": + rospy.init_node("narrow_space_nav") try: print("narrow_space_nav") get_narrow_space() diff --git a/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav_srv.py b/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav_srv.py index 3c25d5652..43d2b2226 100755 --- a/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav_srv.py +++ b/legacy/narrow_space_navigation/src/narrow_space_navigation/narrow_space_nav_srv.py @@ -31,7 +31,7 @@ def calculate_height(self, x, y, occupancy_array, max_height=1.0): return height def distance_to_nearest_obstacle(self, x, y, occupancy_array): - min_distance = float('inf') + min_distance = float("inf") for i in range(occupancy_array.shape[0]): for j in range(occupancy_array.shape[1]): if occupancy_array[i][j] == 100: @@ -67,13 +67,13 @@ def build_height_map(self, occupancy_array): IMPACT_WALLS = 4 SPREAD_WALLS = 6 height_contribution = ( - 1 / (IMPACT_WALLS * math.sqrt(2 * math.pi)) - ) * ( - math.pow(x, 2) / (2 * math.pow(SPREAD_WALLS, 2)) - + math.pow(y, 2) / (2 * math.pow(SPREAD_WALLS, 2)) - + math.pow(SIZE_X - x, 2) / (2 * math.pow(SPREAD_WALLS, 2)) - + math.pow(SIZE_Y - y, 2) / (2 * math.pow(SPREAD_WALLS, 2)) - ) + 1 / (IMPACT_WALLS * math.sqrt(2 * math.pi)) + ) * ( + math.pow(x, 2) / (2 * math.pow(SPREAD_WALLS, 2)) + + math.pow(y, 2) / (2 * math.pow(SPREAD_WALLS, 2)) + + math.pow(SIZE_X - x, 2) / (2 * math.pow(SPREAD_WALLS, 2)) + + math.pow(SIZE_Y - y, 2) / (2 * math.pow(SPREAD_WALLS, 2)) + ) heights[y][x] += height_contribution return heights @@ -94,15 +94,16 @@ def choose_target_point(self, occupancy_array): def plot_height(self, heights): rospy.loginfo("Plotting height map") fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') + ax = fig.add_subplot(111, projection="3d") x, y = np.meshgrid(np.arange(heights.shape[1]), np.arange(heights.shape[0])) - ax.plot_surface(x, y, heights, cmap='viridis') - ax.set_xlabel('X') - ax.set_ylabel('Y') - ax.set_zlabel('Height') - plt.title('3D Height Map') + ax.plot_surface(x, y, heights, cmap="viridis") + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Height") + plt.title("3D Height Map") from lift.defaults import PLOT_SAVE, PLOT_SHOW, TEST, DEBUG_PATH + if PLOT_SHOW: plt.show() if PLOT_SAVE: @@ -112,6 +113,8 @@ def plot_height(self, heights): if __name__ == "__main__": rospy.init_node("narrow_space_nav_srv", anonymous=True) server = NarrowSpaceNavSrv() - srv = rospy.Service('/narrow_space_nav_srv', HeightMap, server.process_occupancy_grid) + srv = rospy.Service( + "/narrow_space_nav_srv", HeightMap, server.process_occupancy_grid + ) rospy.loginfo("String to location server initialised") rospy.spin() diff --git a/legacy/narrow_space_navigation/src/narrow_space_navigation/test.py b/legacy/narrow_space_navigation/src/narrow_space_navigation/test.py index 02acdb39e..b8e0334e0 100644 --- a/legacy/narrow_space_navigation/src/narrow_space_navigation/test.py +++ b/legacy/narrow_space_navigation/src/narrow_space_navigation/test.py @@ -17,30 +17,36 @@ IMPACT = 2 SPREAD = 8 + def prob_density_function(x, mu, sigma): - part1 = 1 / (sigma * math.sqrt(2 * math.pi)) # scaling of the curve + part1 = 1 / (sigma * math.sqrt(2 * math.pi)) # scaling of the curve part2 = math.pow(math.e, -0.5 * math.pow((x - mu) / sigma, 2)) # sigma controls the spread of the curve # mu controls the center of the curve # part one controls the height return part1 * part2 + def norm_dist(): return np.random.normal(0, 1) + # def std(dist, impact, spread): # v1 = 1 / (impact * math.sqrt(2 * math.pi)) # v2 = math.pow(dist, 2) / (2 * math.pow(spread, 2)) # return v1 * math.pow(math.e, -v2) + def process_occupancy_grid(occupancy_grid_image): # Convert the Image message to a grayscale numpy array bridge = CvBridge() - occupancy_array = bridge.imgmsg_to_cv2(occupancy_grid_image, desired_encoding='mono8') + occupancy_array = bridge.imgmsg_to_cv2( + occupancy_grid_image, desired_encoding="mono8" + ) # Convert grayscale image to binary occupancy grid - occupancy_array[occupancy_array < 128] = 0 # Black regions - Free space - occupancy_array[occupancy_array >= 128] = 100 # White regions - Occupied + occupancy_array[occupancy_array < 128] = 0 # Black regions - Free space + occupancy_array[occupancy_array >= 128] = 100 # White regions - Occupied # Initialize empty heightmap heights = np.zeros([SIZE, SIZE]) @@ -63,10 +69,12 @@ def process_occupancy_grid(occupancy_grid_image): for x in range(0, SIZE): for y in range(0, SIZE): - heights[x][y] += std(x, IMPACT, SPREAD) \ - + std(y, IMPACT, SPREAD) \ - + std(SIZE - x, IMPACT, SPREAD) \ + heights[x][y] += ( + std(x, IMPACT, SPREAD) + + std(y, IMPACT, SPREAD) + + std(SIZE - x, IMPACT, SPREAD) + std(SIZE - y, IMPACT, SPREAD) + ) # Convert to points for visualization points = np.empty([SIZE * SIZE, 3]) @@ -77,11 +85,8 @@ def process_occupancy_grid(occupancy_grid_image): points[y * SIZE + x][2] = heights[x][y] # mark the points where people are standing for clarity - standing = [ - (12, 25), - (38, 17) - ] - for (x, y) in standing: + standing = [(12, 25), (38, 17)] + for x, y in standing: heights[x][y] = 1 # iterate through height map and find least busy point @@ -96,17 +101,18 @@ def process_occupancy_grid(occupancy_grid_image): # Draw 3D plot using Matplotlib fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b', marker='o') - ax.set_xlabel('X') - ax.set_ylabel('Y') - ax.set_zlabel('Height') + ax = fig.add_subplot(111, projection="3d") + ax.scatter(points[:, 0], points[:, 1], points[:, 2], c="b", marker="o") + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Height") plt.show() # Return the least busy point as a tuple (x, y) return p + def handle_get_height_map(req): # Process the occupancy grid image and get the least busy point least_busy_point = process_occupancy_grid(req) @@ -114,11 +120,13 @@ def handle_get_height_map(req): # Return the least busy point as the response return GetMapResponse(least_busy_point=least_busy_point) + def height_map_service(): - rospy.init_node('height_map_service') + rospy.init_node("height_map_service") # rospy.Service('get_height_map', GetMap, handle_get_height_map) # rospy.spin() + if __name__ == "__main__": try: height_map_service() diff --git a/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoint_helpers.py b/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoint_helpers.py index de62faa15..d5f691b12 100755 --- a/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoint_helpers.py +++ b/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoint_helpers.py @@ -4,42 +4,60 @@ import matplotlib.pyplot as plt import numpy as np import rospy + # from skspatial.objects import Line import cv2 -from geometry_msgs.msg import Pose, Point, Quaternion, PoseStamped, PolygonStamped, Polygon, Point32 +from geometry_msgs.msg import ( + Pose, + Point, + Quaternion, + PoseStamped, + PolygonStamped, + Polygon, + Point32, +) np.set_printoptions(threshold=np.inf) from lift.defaults import TEST + def draw_edges_on_window(window, edges): - plt.imshow(window, cmap='gray') + plt.imshow(window, cmap="gray") plt.show() fig, (ax1, ax2) = plt.subplots(1, 2) - ax1.imshow(window, cmap='gray') - ax1.set_title('Original Image') - ax2.imshow(edges, cmap='gray') - ax2.set_title('Edge Image') - plt.savefig('/home/nicole/robocup/rexy/waypoints/waypoints' + str(TEST) + '-4.jpg', dpi=300, - bbox_inches='tight') + ax1.imshow(window, cmap="gray") + ax1.set_title("Original Image") + ax2.imshow(edges, cmap="gray") + ax2.set_title("Edge Image") + plt.savefig( + "/home/nicole/robocup/rexy/waypoints/waypoints" + str(TEST) + "-4.jpg", + dpi=300, + bbox_inches="tight", + ) plt.show() def draw_edges(edges): - plt.imshow(edges, cmap='gray') + plt.imshow(edges, cmap="gray") plt.show() def draw_contours(contours): for c in contours: - plt.plot(c[:, 0, 0], c[:, 0, 1], 'r') - plt.title('Contours ' + str(TEST)) - plt.savefig('/home/nicole/robocup/rexy/waypoints/waypoints-contours' + str(TEST) + '-5.jpg', dpi=300, - bbox_inches='tight') + plt.plot(c[:, 0, 0], c[:, 0, 1], "r") + plt.title("Contours " + str(TEST)) + plt.savefig( + "/home/nicole/robocup/rexy/waypoints/waypoints-contours" + str(TEST) + "-5.jpg", + dpi=300, + bbox_inches="tight", + ) plt.show() + import random + # def draw_cnp(grid, cnp): # plt.imshow(grid, cmap='gray') # plt.title('CNP' + str(TEST)) @@ -47,7 +65,7 @@ def draw_contours(contours): # plt.show() -def draw_edge(edge, color='r'): +def draw_edge(edge, color="r"): count = 0 for e in edge: plt.plot(e[:, 0], e[:, 1], color=color) @@ -57,51 +75,51 @@ def draw_edge(edge, color='r'): def plot_centers(image, centers): # Plot original image - plt.imshow(image, cmap='gray') - plt.axis('off') + plt.imshow(image, cmap="gray") + plt.axis("off") # Iterate over cluster centers and plot a red circle at each center for center in centers: x, y = center.astype(int) - plt.plot(x, y, 'ro', markersize=5) + plt.plot(x, y, "ro", markersize=5) # Show image with centers plt.show() def visualize_edges3(window, edges): - colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # add more colors if needed + colors = ["r", "g", "b", "c", "m", "y", "k"] # add more colors if needed fig, ax = plt.subplots(figsize=(10, 10)) - ax.imshow(window, cmap='gray') - ax.set_title('Edges') + ax.imshow(window, cmap="gray") + ax.set_title("Edges") for i, edge in enumerate(edges): color = colors[i % len(colors)] - ax.plot(edge[:, 0], edge[:, 1], '-', linewidth=2, color=color) + ax.plot(edge[:, 0], edge[:, 1], "-", linewidth=2, color=color) plt.show() def visualize_edges_test(window, edges): fig, ax = plt.subplots(figsize=(10, 10)) - ax.imshow(window, cmap='gray') - ax.set_title('Edges') + ax.imshow(window, cmap="gray") + ax.set_title("Edges") for edge in edges: - ax.plot(edge[:, 0], edge[:, 1], '-', linewidth=2) + ax.plot(edge[:, 0], edge[:, 1], "-", linewidth=2) plt.show() def plot(contours, window): contours = [np.array(c) for c in contours] - colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] # add more colors if needed + colors = ["r", "g", "b", "c", "m", "y", "k"] # add more colors if needed fig, ax = plt.subplots(figsize=(10, 10)) - ax.imshow(window, cmap='gray') + ax.imshow(window, cmap="gray") print(len(contours)) for i, contour in enumerate(contours): color = colors[i % len(colors)] try: - ax.plot(contour[:, 0], contour[:, 1], '+', linewidth=2, color=color) + ax.plot(contour[:, 0], contour[:, 1], "+", linewidth=2, color=color) except IndexError as e: rospy.logerr(str(e) + str("changing the contour shape")) # contour = contour.reshape(-1, 2) # ax.plot(contour[:, 0], contour[:, 1], '+', linewidth=2, color=color) - plt.title('Contours') + plt.title("Contours") plt.show() @@ -112,14 +130,17 @@ def get_pose(_x, _y, msg, isRobot=True): # x, y = robot_pose.position.x, robot_pose.position.y else: x, y = _x, _y - x, y = (x - msg.info.origin.position.x) / msg.info.resolution, \ - (y - msg.info.origin.position.y) / msg.info.resolution + x, y = (x - msg.info.origin.position.x) / msg.info.resolution, ( + y - msg.info.origin.position.y + ) / msg.info.resolution return x, y def get_footprint(msg): - footprint = rospy.wait_for_message('/move_base/global_costmap/footprint', PolygonStamped) + footprint = rospy.wait_for_message( + "/move_base/global_costmap/footprint", PolygonStamped + ) footprint = footprint.polygon.points ft = [] for p in footprint: @@ -132,9 +153,12 @@ def show_robot_footprint(window, footprint): new_points = np.array(footprint, dtype=np.int32) window = np.array(window, dtype=np.int32) mask = cv2.fillPoly(window, [new_points], 200) - plt.imshow(mask, cmap='gray') - plt.savefig('/home/nicole/robocup/rexy/footprint' + str(TEST) + '.jpg', dpi=300, - bbox_inches='tight') + plt.imshow(mask, cmap="gray") + plt.savefig( + "/home/nicole/robocup/rexy/footprint" + str(TEST) + ".jpg", + dpi=300, + bbox_inches="tight", + ) plt.show() @@ -146,9 +170,9 @@ def get_robot_pixels_in_window(window, footprint): def is_robot_fit_in_elevator(window, footprint, pixels): - rospy.loginfo('showing fit in elevator for robot') + rospy.loginfo("showing fit in elevator for robot") robot_pixels = get_robot_pixels_in_window(window, footprint) - return int(robot_pixels*2) < pixels + return int(robot_pixels * 2) < pixels def area_with_shoelace(msg, elevator_points): @@ -157,8 +181,10 @@ def area_with_shoelace(msg, elevator_points): area = 0.0 for i in range(num_footprint_vertices): j = (i + 1) % num_footprint_vertices # The next vertex index (wrapping around) - area += footprint[i].flatten()[0] * footprint[j].flatten()[1] - footprint[j].flatten()[0] * \ - footprint[i].flatten()[1] + area += ( + footprint[i].flatten()[0] * footprint[j].flatten()[1] + - footprint[j].flatten()[0] * footprint[i].flatten()[1] + ) # The cross product of two adjacent vectors area /= 2.0 # Divide by two to get the signed area @@ -168,7 +194,10 @@ def area_with_shoelace(msg, elevator_points): area_pixel_count_robot = polygon_area / (msg.info.resolution * msg.info.resolution) return area_pixel_count_robot * 3 + from lift.defaults import PLOT_SAVE, PLOT_SHOW, TEST, DEBUG_PATH + + def plot_clusters(points=None, labels=None, dilation=None, centers=None, msg=None): """ This function plots clusters. @@ -179,15 +208,15 @@ def plot_clusters(points=None, labels=None, dilation=None, centers=None, msg=Non :param centers: The centers of the clusters. """ # Set up color map - cmap = plt.get_cmap('jet') + cmap = plt.get_cmap("jet") # Show dilation image - plt.imshow(dilation, cmap='gray') + plt.imshow(dilation, cmap="gray") # Set title and axis off - plt.title('midpoints of clusters_' + str(TEST)) + plt.title("midpoints of clusters_" + str(TEST)) - plt.axis('off') + plt.axis("off") # Plot points with color according to their cluster label for label in set(labels): @@ -195,10 +224,16 @@ def plot_clusters(points=None, labels=None, dilation=None, centers=None, msg=Non mask = labels == label cluster_points = points[mask] color = cmap(label / len(set(labels))) - plt.plot(cluster_points[:, 0], cluster_points[:, 1], '.', markersize=2, color=color) + plt.plot( + cluster_points[:, 0], + cluster_points[:, 1], + ".", + markersize=2, + color=color, + ) # Show image with clusters centers - plt.scatter(centers[:, 0], centers[:, 1], c='r', s=10) + plt.scatter(centers[:, 0], centers[:, 1], c="r", s=10) combinations = list(itertools.combinations(centers, 2)) combinations = [list(elem) for elem in combinations] @@ -213,7 +248,7 @@ def plot_clusters(points=None, labels=None, dilation=None, centers=None, msg=Non # line_intersect_poly(centers, dilation) - plt.imshow(dilation, cmap='gray') + plt.imshow(dilation, cmap="gray") # inters = find_intersections(combinations, msg) # @@ -225,13 +260,15 @@ def plot_clusters(points=None, labels=None, dilation=None, centers=None, msg=Non if PLOT_SAVE: plt.savefig(DEBUG_PATH + "/dilation_in_plot_" + str(TEST) + ".jpg") + # from shapely.geometry import LineString, Polygon + def line_intersect_poly(centers, dilation): poly = Polygon(centers) lines = list(itertools.combinations(centers, 2)) lines = [list(elem) for elem in lines] - plt.imshow(dilation, cmap='gray') + plt.imshow(dilation, cmap="gray") # for line in lines: # plt.plot([line[0][0], line[1][0]], [line[0][1], line[1][1]], 'k-', color='r') intersections = [] @@ -252,20 +289,25 @@ def line_intersect_poly(centers, dilation): x2, y2 = line[1] # Check if the slope is different from any other line - if all((x1 - x2) / (y1 - y2) != (x3 - x4) / (y3 - y4) for x3, y3 in centers for x4, y4 in centers if - ((x3, y3) != (line[0][0], line[0][1]) and (x4, y4) != (line[1][0], line[1][0]))): + if all( + (x1 - x2) / (y1 - y2) != (x3 - x4) / (y3 - y4) + for x3, y3 in centers + for x4, y4 in centers + if ( + (x3, y3) != (line[0][0], line[0][1]) + and (x4, y4) != (line[1][0], line[1][0]) + ) + ): filtered_lines.append(line) - - print(filtered_lines) - plt.imshow(dilation, cmap='gray') + plt.imshow(dilation, cmap="gray") for inter in filtered_lines: - l , i = inter + l, i = inter - plt.plot([l[0][0], l[1][0]], [l[0][1], l[1][1]], 'k-', color='r') + plt.plot([l[0][0], l[1][0]], [l[0][1], l[1][1]], "k-", color="r") # plt.plot([inter[0][0], inter[1][0]], [inter[0][1], inter[1][1]], 'k-', color='r') - plt.title('not working_' + str(TEST)) + plt.title("not working_" + str(TEST)) plt.show() @@ -273,6 +315,8 @@ def is_outside_window(width, height, x, y): if x < 0 or y < 0 or x >= width or y >= height: return True return False + + def handle_outside_window(width, height, x, y): new_x = 0 new_y = 0 @@ -284,38 +328,44 @@ def handle_outside_window(width, height, x, y): new_x = width if y >= height: new_y = height - print('new_ points', new_x, new_y) + print("new_ points", new_x, new_y) return new_x, new_y + + def find_intersections(combinations, msg): intersections_lines = [] width, height = msg.info.width, msg.info.height - tl = [0,0] - tr = [width - 1,0] - bl = [0,height - 1] - br = [width - 1,height - 1] - print('tl', tl, 'tr', tr, 'bl', bl, 'br', br) - print(combinations, 'combinations') + tl = [0, 0] + tr = [width - 1, 0] + bl = [0, height - 1] + br = [width - 1, height - 1] + print("tl", tl, "tr", tr, "bl", bl, "br", br) + print(combinations, "combinations") ab = Line.from_points(tl, tr) bc = Line.from_points(tr, br) cd = Line.from_points(br, bl) da = Line.from_points(bl, tl) sides = [ab, bc, cd, da] - for combination in combinations: line1 = Line.from_points(combination[0], combination[1]) intersections = [] for side in sides: intersection_point = line1.intersect_line(side) - if is_outside_window(width, height, intersection_point[0], intersection_point[1]): - print('outside window', intersection_point) - intersection_point = handle_outside_window(width, height, intersection_point[0], intersection_point[1]) + if is_outside_window( + width, height, intersection_point[0], intersection_point[1] + ): + print("outside window", intersection_point) + intersection_point = handle_outside_window( + width, height, intersection_point[0], intersection_point[1] + ) intersections.append(intersection_point) intersections_lines.append(intersections) return intersections_lines -if __name__ == '__main__': - rospy.init_node('waypoint_node_helper') \ No newline at end of file + +if __name__ == "__main__": + rospy.init_node("waypoint_node_helper") diff --git a/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints.py b/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints.py index 350fc7c9b..cd230b5ca 100755 --- a/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints.py +++ b/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints.py @@ -25,14 +25,17 @@ # axs[0].imshow(template, cmap='gray') # axs[1].imshow(roi, cmap='gray') + def draw_cnp(grid, cnp): - plt.imshow(grid, cmap='gray') - plt.title('CNP' + str(TEST)) - plt.axis('off') - plt.plot(cnp[0], cnp[1], 'ro') + plt.imshow(grid, cmap="gray") + plt.title("CNP" + str(TEST)) + plt.axis("off") + plt.plot(cnp[0], cnp[1], "ro") # plt.savefig('/home/nicole/robocup/rexy/waypoints/waypoints' + str(TEST) + '-3.jpg', dpi=300, # bbox_inches='tight') plt.show() + + def plot_clusters_1(points=None, labels=None, dilation=None, centers=None, msg=None): """ This function plots clusters. @@ -43,16 +46,16 @@ def plot_clusters_1(points=None, labels=None, dilation=None, centers=None, msg=N :param centers: The centers of the clusters. """ # Set up color map - cmap = plt.get_cmap('jet') + cmap = plt.get_cmap("jet") # Show dilation image # plt.imshow(dilation, cmap='gray') # AXS[0].imshow(dilation, cmap='gray') # Set title and axis off - plt.title('midpoints of clusters_' + str(TEST)) + plt.title("midpoints of clusters_" + str(TEST)) - plt.axis('off') + plt.axis("off") # Plot points with color according to their cluster label for label in set(labels): @@ -60,10 +63,16 @@ def plot_clusters_1(points=None, labels=None, dilation=None, centers=None, msg=N mask = labels == label cluster_points = points[mask] color = cmap(label / len(set(labels))) - plt.plot(cluster_points[:, 0], cluster_points[:, 1], '.', markersize=2, color=color) + plt.plot( + cluster_points[:, 0], + cluster_points[:, 1], + ".", + markersize=2, + color=color, + ) # Show image with clusters centers - plt.scatter(centers[:, 0], centers[:, 1], c='r', s=10) + plt.scatter(centers[:, 0], centers[:, 1], c="r", s=10) combinations = list(itertools.combinations(centers, 2)) combinations = [list(elem) for elem in combinations] @@ -86,6 +95,8 @@ def plot_clusters_1(points=None, labels=None, dilation=None, centers=None, msg=N # plt.savefig('/home/nicole/robocup/rexy/intersects' + str(TEST) + '.png', dpi=300, # bbox_inches='tight') # plt.show() + + class Waypoint: def __init__(self): self._msg = None @@ -107,18 +118,22 @@ def __init__(self): def global_costmap_cb(self): """ - This function waits for a message on '/move_base/global_costmap/costmap' topic - and returns it. + This function waits for a message on '/move_base/global_costmap/costmap' topic + and returns it. """ - self._msg = rospy.wait_for_message('/move_base/global_costmap/costmap', OccupancyGrid) + self._msg = rospy.wait_for_message( + "/move_base/global_costmap/costmap", OccupancyGrid + ) return self._msg def local_costmap_cb(self): """ - This function waits for a message on '/move_base/global_costmap/costmap' topic - and returns it. + This function waits for a message on '/move_base/global_costmap/costmap' topic + and returns it. """ - self._msg = rospy.wait_for_message('/move_base/local_costmap/costmap', OccupancyGrid) + self._msg = rospy.wait_for_message( + "/move_base/local_costmap/costmap", OccupancyGrid + ) return self._msg def get_pose(self, _x: float, _y: float, msg: OccupancyGrid, isRobot: bool): @@ -127,8 +142,9 @@ def get_pose(self, _x: float, _y: float, msg: OccupancyGrid, isRobot: bool): x, y = robot_pose.position.x, robot_pose.position.y else: x, y = _x, _y - x, y = (x - msg.info.origin.position.x) / msg.info.resolution, \ - (y - msg.info.origin.position.y) / msg.info.resolution + x, y = (x - msg.info.origin.position.x) / msg.info.resolution, ( + y - msg.info.origin.position.y + ) / msg.info.resolution return x, y @@ -137,25 +153,33 @@ def np_grid(self, msg: OccupancyGrid): @staticmethod def clear_costmap(): - rospy.wait_for_service('/move_base/clear_costmaps') + rospy.wait_for_service("/move_base/clear_costmaps") try: - clear_costmaps = rospy.ServiceProxy('/move_base/clear_costmaps', Empty) + clear_costmaps = rospy.ServiceProxy("/move_base/clear_costmaps", Empty) except rospy.ServiceException as e: print("Service call failed: %s" % e) def get_global_costmap_window(self, isRobot=True, _x=None, _y=None): - return self.create_window(msg=self.global_costmap_cb(), isRobot=isRobot, _x=_x, _y=_y) + return self.create_window( + msg=self.global_costmap_cb(), isRobot=isRobot, _x=_x, _y=_y + ) # def get_local_costmap_window(self, isRobot=True, _x=None, _y=None): # return self.local_costmap_cb(msg=self.local_costmap_cb(), isRobot=isRobot, _x=_x, _y=_y) @staticmethod def current_pose(): - msg = rospy.wait_for_message('/amcl_pose', PoseWithCovarianceStamped) + msg = rospy.wait_for_message("/amcl_pose", PoseWithCovarianceStamped) pose = msg.pose.pose return pose - def create_window(self, msg: OccupancyGrid = None, isRobot: bool = True, _x: float = None, _y: float = None): + def create_window( + self, + msg: OccupancyGrid = None, + isRobot: bool = True, + _x: float = None, + _y: float = None, + ): x, y = self.get_pose(_x, _y, msg, isRobot=isRobot) ind = y * msg.info.width + x x, y, ind = int(x), int(y), int(ind) @@ -163,24 +187,30 @@ def create_window(self, msg: OccupancyGrid = None, isRobot: bool = True, _x: flo w_x_max = min(msg.info.width, int(x + self.window_size)) w_y_min = max(0, int(y - self.window_size)) w_y_max = min(msg.info.height, int(y + self.window_size)) - window = np.array(msg.data).reshape(msg.info.height, msg.info.width)[w_y_min:w_y_max, w_x_min:w_x_max] + window = np.array(msg.data).reshape(msg.info.height, msg.info.width)[ + w_y_min:w_y_max, w_x_min:w_x_max + ] # plt.imshow(window, cmap='gray') # plt.show() # plt.savefig('/home/nicole/robocup/rexy/window' + str(TEST) + '.png', dpi=300, bbox_inches='tight') return window @staticmethod - def mask_polygon(window: np = None, poly_points: List[List[float]] = None, threshold: int = 60, - is_white_mask: bool = True): + def mask_polygon( + window: np = None, + poly_points: List[List[float]] = None, + threshold: int = 60, + is_white_mask: bool = True, + ): """ - Masks a polygon in an image using bitwise or and white mask or black mask with bitwise and. + Masks a polygon in an image using bitwise or and white mask or black mask with bitwise and. - :param window: The image to mask. - :param poly_points: The points of the polygon to mask. - :param threshold: The threshold value for masking. - :param is_white_mask: If true, the mask will be white, otherwise it will be black. + :param window: The image to mask. + :param poly_points: The points of the polygon to mask. + :param threshold: The threshold value for masking. + :param is_white_mask: If true, the mask will be white, otherwise it will be black. - :return: The masked image. + :return: The masked image. """ try: temp_window = np.array(window).astype(np.int32) @@ -217,9 +247,9 @@ def get_black_pxl(pixels, threshold=60): return np.where(pixels <= threshold) def has_enough_free_space(self, window): - - - num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(window, connectivity=4, ltype=cv2.CV_32S) + num_labels, labels, stats, _ = cv2.connectedComponentsWithStats( + window, connectivity=4, ltype=cv2.CV_32S + ) print(f"num_labels: {num_labels}, labels: {labels}, stats: {stats}") mask = np.zeros(window.shape, dtype=np.uint8) @@ -229,13 +259,13 @@ def has_enough_free_space(self, window): mask = cv2.bitwise_or(mask, componentMask) plt.figure(figsize=(8, 8)) plt.subplot(121) - plt.imshow(window, cmap='gray') + plt.imshow(window, cmap="gray") plt.title("Image") plt.axis("off") # Display the mask image plt.subplot(122) - plt.imshow(mask, cmap='gray') + plt.imshow(mask, cmap="gray") plt.title("componentMask") plt.axis("off") @@ -252,9 +282,9 @@ def has_enough_free_space(self, window): # area = stats[label, cv2.CC_STAT_AREA] # if area >= min_required_area: # Check if the polygon can fit within the connected component - # if self.can_fit_polygon_in_connected_component(labels == label): - # print("Found a connected component that can fit the polygon") - # return True + # if self.can_fit_polygon_in_connected_component(labels == label): + # print("Found a connected component that can fit the polygon") + # return True return False @@ -262,6 +292,7 @@ def can_fit_polygon_in_connected_component(self, connected_component_mask): footprint = get_footprint(self._msg) polygon_points = np.array(footprint, dtype=np.int32) from shapely.geometry import Point, Polygon + polygon = Polygon(polygon_points) for y in range(connected_component_mask.shape[0]): @@ -275,8 +306,6 @@ def can_fit_polygon_in_connected_component(self, connected_component_mask): return True - - def is_narrow_space(self, points): """ Checks if the robot can fit in a narrow space of elevator @@ -288,17 +317,25 @@ def is_narrow_space(self, points): """ # get the window self.global_costmap_cb() - window = np.array(self._msg.data).astype(np.int32).reshape(self._msg.info.height, self._msg.info.width) + window = ( + np.array(self._msg.data) + .astype(np.int32) + .reshape(self._msg.info.height, self._msg.info.width) + ) elevator_sides = self.transform_points(points, self._msg) # mask a polygon area from the window - black_pixels = self.mask_polygon(window=window, poly_points=elevator_sides, threshold=60) + black_pixels = self.mask_polygon( + window=window, poly_points=elevator_sides, threshold=60 + ) # get the number of black pixels pixels_count = self.get_black_pxl_count(black_pixels) # get robot footprint footprint = get_footprint(self._msg) # show_robot_footprint(footprint=footprint, window=window) # calculate if the robot fits - return not is_robot_fit_in_elevator(window=window, footprint=footprint, pixels=pixels_count) + return not is_robot_fit_in_elevator( + window=window, footprint=footprint, pixels=pixels_count + ) @staticmethod def transform_points(points, msg): @@ -313,8 +350,9 @@ def transform_points(points, msg): pxl_pts = [] for pt in points: - x, y = (pt[0] - msg.info.origin.position.x) / msg.info.resolution, \ - (pt[1] - msg.info.origin.position.y) / msg.info.resolution + x, y = (pt[0] - msg.info.origin.position.x) / msg.info.resolution, ( + pt[1] - msg.info.origin.position.y + ) / msg.info.resolution x, y = int(x), int(y) pxl_pts.append([x, y]) return pxl_pts @@ -326,40 +364,59 @@ def extract_given_elevator_warping(self, points, msg: OccupancyGrid): pxl_pts = self.transform_points(points, msg) pxl_pts = np.array(pxl_pts, dtype=np.float32) - h_ab = np.sqrt(((pxl_pts[0][0] - pxl_pts[1][0]) ** 2) + ((pxl_pts[0][1] - pxl_pts[1][1]) ** 2)) - h_cd = np.sqrt(((pxl_pts[2][0] - pxl_pts[3][0]) ** 2) + ((pxl_pts[2][1] - pxl_pts[3][1]) ** 2)) - w_ad = np.sqrt(((pxl_pts[0][0] - pxl_pts[3][0]) ** 2) + ((pxl_pts[0][1] - pxl_pts[3][1]) ** 2)) - w_bc = np.sqrt(((pxl_pts[2][0] - pxl_pts[1][0]) ** 2) + ((pxl_pts[2][1] - pxl_pts[1][1]) ** 2)) + h_ab = np.sqrt( + ((pxl_pts[0][0] - pxl_pts[1][0]) ** 2) + + ((pxl_pts[0][1] - pxl_pts[1][1]) ** 2) + ) + h_cd = np.sqrt( + ((pxl_pts[2][0] - pxl_pts[3][0]) ** 2) + + ((pxl_pts[2][1] - pxl_pts[3][1]) ** 2) + ) + w_ad = np.sqrt( + ((pxl_pts[0][0] - pxl_pts[3][0]) ** 2) + + ((pxl_pts[0][1] - pxl_pts[3][1]) ** 2) + ) + w_bc = np.sqrt( + ((pxl_pts[2][0] - pxl_pts[1][0]) ** 2) + + ((pxl_pts[2][1] - pxl_pts[1][1]) ** 2) + ) max_height = max(h_ab, h_cd).astype(int) max_width = max(w_bc, w_ad).astype(int) - dest_pts = np.array([[0, 0], - [0, max_height - 1], - [max_width - 1, max_height - 1], - [max_width - 1, 0]], dtype=np.float32) + dest_pts = np.array( + [ + [0, 0], + [0, max_height - 1], + [max_width - 1, max_height - 1], + [max_width - 1, 0], + ], + dtype=np.float32, + ) # do the warp perspective M = cv2.getPerspectiveTransform(pxl_pts, dest_pts) grid = np.array(msg.data).reshape(msg.info.height, msg.info.width) - warped = cv2.warpPerspective(grid, M, (max_width, max_height), flags=cv2.INTER_NEAREST) - + warped = cv2.warpPerspective( + grid, M, (max_width, max_height), flags=cv2.INTER_NEAREST + ) # apply erosion warped = cv2.convertScaleAbs(warped) - plt.imshow(warped, cmap='gray', aspect='auto') - plt.axis('off') + plt.imshow(warped, cmap="gray", aspect="auto") + plt.axis("off") if PLOT_SHOW: plt.show() if PLOT_SAVE: plt.savefig(DEBUG_PATH + "/warped_in_extract" + str(TEST) + ".jpg") - self.window = warped return warped, M - def find_free_location_in_elevator(self, M=None, warped=None, elevator_number=None, is_cnp=False): + def find_free_location_in_elevator( + self, M=None, warped=None, elevator_number=None, is_cnp=False + ): # self.find_number_clusters(warped) centers, num_clusters, midpoints, dilation = self.find_clusters(warped) if num_clusters == 0: @@ -422,7 +479,7 @@ def local_to_global_points(self, M=None, points=None, is_lift=False): # x, y = points.flatten()[0], points.flatten()[1] # Loop through all the points and add each point to the new_points list - print(f'points: {points}') + print(f"points: {points}") if is_lift: try: new_points.append([points[0], points[1]]) @@ -458,8 +515,11 @@ def local_to_global_points(self, M=None, points=None, is_lift=False): _x, _y = global_points[i][0], global_points[i][1] # Calculate the x and y coordinates of each point - x, y = (_x * self._msg.info.resolution) + self._msg.info.origin.position.x, \ - (_y * self._msg.info.resolution) + self._msg.info.origin.position.y + x, y = ( + _x * self._msg.info.resolution + ) + self._msg.info.origin.position.x, ( + _y * self._msg.info.resolution + ) + self._msg.info.origin.position.y robot_points.append([x, y]) @@ -491,15 +551,14 @@ def find_clusters(self, warped): dilation = cv2.dilate(erosion, kernel, iterations=self.dilation_iterations) # Display the dilation - plt.imshow(dilation, cmap='gray', aspect='auto') - plt.title('dilation') + plt.imshow(dilation, cmap="gray", aspect="auto") + plt.title("dilation") if PLOT_SHOW: plt.show() if PLOT_SAVE: plt.savefig(DEBUG_PATH + "/dilation" + str(TEST) + ".jpg") - # Get non-black pixels non_black_pixels = self.get_white_pxl(pixels=dilation, threshold=55) @@ -510,10 +569,9 @@ def find_clusters(self, warped): try: num_clusters, cluster_labels, db = self.dbscan(points=points) except ValueError: - rospy.logerr('No clusters found') + rospy.logerr("No clusters found") return 0, 0, 0, dilation - # Find centers centers = self.find_center_clusters(db=db, points=points) @@ -540,7 +598,7 @@ def dbscan(self, points=None): db = DBSCAN(eps=self.eps, min_samples=self.min_samples).fit(points) labels = db.labels_ num_clusters = len(set(labels)) - (1 if -1 in labels else 0) - rospy.loginfo('Estimated number of clusters== people: %d' % num_clusters) + rospy.loginfo("Estimated number of clusters== people: %d" % num_clusters) return num_clusters, labels, db def find_center_clusters(self, db=None, points=None): @@ -551,7 +609,7 @@ def find_center_clusters(self, db=None, points=None): :param points: The points to find centers for. :return: The centers of the clusters. - """ + """ centers = [] # Find center of each cluster @@ -566,23 +624,25 @@ def find_center_clusters(self, db=None, points=None): return np.array(centers) # assume it is called only around elevator - def local_costmap_window(self, msg: OccupancyGrid, _x: float = None, _y: float = None): + def local_costmap_window( + self, msg: OccupancyGrid, _x: float = None, _y: float = None + ): w, h = msg.info.width, msg.info.height grid = np.array(msg.data).reshape(msg.info.height, msg.info.width) - window = grid[0:(3 * h // 4), w // 2:w] + window = grid[0 : (3 * h // 4), w // 2 : w] fig, ax = plt.subplots(1, 2) ax[0].imshow(grid) # plt.show() - ax[1].imshow(window, cmap='gray') - plt.title('local_costmap and window' + str(TEST)) + ax[1].imshow(window, cmap="gray") + plt.title("local_costmap and window" + str(TEST)) # plt.savefig('/home/nicole/robocup/rexy/waypoints/local_costmap' + str(TEST) + '-1.png', dpi=300, # bbox_inches='tight') # plt.show() edges = self.find_edges(window) - print('Edges: {}'.format(len(edges))) + print("Edges: {}".format(len(edges))) # draw_edges(edges) contours = self.find_contours(edges) - print('SHAPEEE: {}'.format(window.shape)) + print("SHAPEEE: {}".format(window.shape)) # Loop through each contour and check if it's above/below the threshold filtered_contours = [] @@ -593,15 +653,17 @@ def local_costmap_window(self, msg: OccupancyGrid, _x: float = None, _y: float = center_x, center_y = x + w // 2, y + h // 2 # check if the contour is above or below the threshold point - if threshold_point[1] - 35 < center_y < threshold_point[1] + 35: # contour is above the threshold point + if ( + threshold_point[1] - 35 < center_y < threshold_point[1] + 35 + ): # contour is above the threshold point if threshold_point[0] - 25 < center_x < threshold_point[0] + 25: filtered_contours.append(contour) - print('Filtered Contours LEN: {}'.format(len(filtered_contours) // 2)) + print("Filtered Contours LEN: {}".format(len(filtered_contours) // 2)) draw_contours(filtered_contours) - print('Contours: {}'.format(len(contours))) + print("Contours: {}".format(len(contours))) filtered_contours = self.filter_contours(filtered_contours, window) - print('Filtered Contours LEN: {}'.format(len(filtered_contours) // 2)) + print("Filtered Contours LEN: {}".format(len(filtered_contours) // 2)) return window @staticmethod @@ -624,7 +686,9 @@ def find_edges(window): @staticmethod def find_contours(edges): - contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + contours, _ = cv2.findContours( + edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) return contours @staticmethod @@ -684,7 +748,7 @@ def find_closest_points(self, edges): if distance < min_distance: min_distance = distance closest_points = self.find_extreme_points(ei, ej) - print(f'Closest points: {closest_points}') + print(f"Closest points: {closest_points}") return closest_points @staticmethod @@ -703,13 +767,16 @@ def filter_contours(contours, window): for c in contours: contour = [] for i in c: - if 2 < i[0][0] < window.shape[0] - 2 and 2 < i[0][1] < window.shape[1] - 2: + if ( + 2 < i[0][0] < window.shape[0] - 2 + and 2 < i[0][1] < window.shape[1] - 2 + ): contour.append(i[0]) if len(contour) > min_contour_length: filtered_contours.append(contour) - print('Filtered contours: {}'.format(len(filtered_contours))) - with open('/home/nicole/robocup/rexy/waypoint-filtered.txt', 'a') as f: - f.write(str(len(filtered_contours) // 2) + str(TEST) + '\n') + print("Filtered contours: {}".format(len(filtered_contours))) + with open("/home/nicole/robocup/rexy/waypoint-filtered.txt", "a") as f: + f.write(str(len(filtered_contours) // 2) + str(TEST) + "\n") # plot(filtered_contours, window) return filtered_contours @@ -728,13 +795,16 @@ def filter_contours_type(self, contours=None, window=None): for c in contours: contour = [] for i in c: - if 2 < i[0][0] < window.shape[0] - 2 and 2 < i[0][1] < window.shape[1] - 2: + if ( + 2 < i[0][0] < window.shape[0] - 2 + and 2 < i[0][1] < window.shape[1] - 2 + ): contour.append(i[0]) filtered_contours.append(contour) - print('Filtered contours: {}'.format(len(filtered_contours))) - with open('/home/nicole/robocup/rexy/waypoint-filtered.txt', 'a') as f: - f.write(str(len(filtered_contours) // 2) + str(TEST) + '\n') + print("Filtered contours: {}".format(len(filtered_contours))) + with open("/home/nicole/robocup/rexy/waypoint-filtered.txt", "a") as f: + f.write(str(len(filtered_contours) // 2) + str(TEST) + "\n") # plot(filtered_contours, self.window) return filtered_contours @@ -783,7 +853,9 @@ def min_dist_contours(self, filtered_contours, window=None): elif len(selected_contours) == 1: # add contour and get min_dist selected_contours.append(c) - min_d, Pcenter = self.min_dist_two_contours(selected_contours[0], selected_contours[1]) + min_d, Pcenter = self.min_dist_two_contours( + selected_contours[0], selected_contours[1] + ) else: # keep only the two contours with min distance between them min_d0, Pcenter0 = self.min_dist_two_contours(selected_contours[0], c) @@ -805,7 +877,6 @@ def min_dist_contours(self, filtered_contours, window=None): return Pcenter def find_multiple_cnp(self, window, threshold=60): - window = np.where(window <= threshold, 0, 255).astype(np.uint8) # plt.imshow(window, cmap='gray') # plt.title('dilation in find multiple cnp') @@ -822,9 +893,9 @@ def find_multiple_cnp(self, window, threshold=60): # filter the contours in the correct type contours = self.filter_contours_type(contours=contours, window=self.window) - print('Number of contours: {}'.format(len(contours))) - with open('/home/nicole/robocup/rexy/waypoint-original.txt', 'a') as f: - f.write(str("len contours ") + str(len(contours)) + str(TEST) + '\n') + print("Number of contours: {}".format(len(contours))) + with open("/home/nicole/robocup/rexy/waypoint-original.txt", "a") as f: + f.write(str("len contours ") + str(len(contours)) + str(TEST) + "\n") # create combinations of contours 1:1 combinations = list(itertools.combinations(contours, 2)) @@ -836,7 +907,9 @@ def find_multiple_cnp(self, window, threshold=60): for filtered_contours in combinations: pcenter = self.min_dist_contours(filtered_contours=filtered_contours) Dx = self._msg.info.origin.position.x - Dy = self._msg.info.height * self._msg.info.resolution - abs(self._msg.info.origin.position.y) + Dy = self._msg.info.height * self._msg.info.resolution - abs( + self._msg.info.origin.position.y + ) T = np.array([(1, 0, 0, Dx), (0, 1, 0, Dy), (0, 0, 1, 0), (0, 0, 0, 1)]) cnps.append(pcenter) @@ -857,12 +930,14 @@ def find_cnp(self): # find contours within window contours = self.find_contours(edges=edges) draw_contours(contours) - print('Number of contours: {}'.format(len(contours))) + print("Number of contours: {}".format(len(contours))) # print('Number of contours: {}'.format(len(contours))) filtered_contours = self.filter_contours(contours=contours, window=window) pcenter = self.min_dist_contours(filtered_contours=filtered_contours) Dx = self._msg.info.origin.position.x - Dy = self._msg.info.height * self._msg.info.resolution - abs(self._msg.info.origin.position.y) + Dy = self._msg.info.height * self._msg.info.resolution - abs( + self._msg.info.origin.position.y + ) T = np.array([(1, 0, 0, Dx), (0, -1, 0, Dy), (0, 0, -1, 0), (0, 0, 0, 1)]) # TODO: local to global coordinates @@ -891,20 +966,26 @@ def get_elevator_window(self, elevator_number=None): # [5.05235815048, -2.76944160461], # [3.45198941231, -3.251049757]] elevator_center = 5.353797912597656, -4.50060510635376 - points = [[4.282492637634277, -3.5537350177764893], - [4.353395938873291, -5.352860927581787], - [6.462785720825195, -5.264233589172363], - [6.569140911102295, -3.4651081562042236]] + points = [ + [4.282492637634277, -3.5537350177764893], + [4.353395938873291, -5.352860927581787], + [6.462785720825195, -5.264233589172363], + [6.569140911102295, -3.4651081562042236], + ] # uncomment to test with local costmap # self.local_costmap_cb() # self.local_costmap_cp_window(self._msg,_x=elevator_center[0],_y=elevator_center[1]) # self.global_costmap_cb() - self.get_global_costmap_window(isRobot=False,_x=elevator_center[0],_y=elevator_center[1]) + self.get_global_costmap_window( + isRobot=False, _x=elevator_center[0], _y=elevator_center[1] + ) warped, M = self.extract_given_elevator_warping(points=points, msg=self._msg) print(warped) print(type(warped)) - l = self.find_free_location_in_elevator(M=M, warped=warped, elevator_number=elevator_number) + l = self.find_free_location_in_elevator( + M=M, warped=warped, elevator_number=elevator_number + ) # p = Pose() # p.position.x = l[0][0] # p.position.y = l[0][1] @@ -915,24 +996,25 @@ def get_elevator_window(self, elevator_number=None): # self.get_global_costmap_window(isRobot=False,_x=elevator_center[0],_y=elevator_center[1]) # warped, M = self.extract_given_elevator_warping(points=points, msg=self._msg) - def get_lift_information(self, is_lift, is_sim=True): # sim elevator if is_lift: - c = rospy.get_param('lift_position_center_point') + c = rospy.get_param("lift_position_center_point") elevator_center = c[0], c[1] - points = rospy.get_param('lift_position_points') + points = rospy.get_param("lift_position_points") else: - c = rospy.get_param('wait_position_center_point') + c = rospy.get_param("wait_position_center_point") elevator_center = c[0], c[1] - points = rospy.get_param('wait_position_points') + points = rospy.get_param("wait_position_points") if DEBUG > 3: print(f"center 1: {c}") print(f"center x: {c[0]} and the type {type(c[0])}") print(f"center y : {c[1]}") - self.get_global_costmap_window(isRobot=False,_x=elevator_center[0],_y=elevator_center[1]) + self.get_global_costmap_window( + isRobot=False, _x=elevator_center[0], _y=elevator_center[1] + ) warped, M = self.extract_given_elevator_warping(points=points, msg=self._msg) centers, num_clusters, midpoints, dilation = self.find_clusters(warped) analytics = [centers, num_clusters, midpoints, elevator_center] @@ -940,9 +1022,8 @@ def get_lift_information(self, is_lift, is_sim=True): return warped, analytics, M - -if __name__ == '__main__': - rospy.init_node('waypoint_node') +if __name__ == "__main__": + rospy.init_node("waypoint_node") wp = Waypoint() # is narrow space working # print(wp.is_narrow_space()) diff --git a/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints_viz.py b/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints_viz.py index 1d5b9beb1..89192f092 100755 --- a/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints_viz.py +++ b/legacy/narrow_space_navigation/src/narrow_space_navigation/waypoints_viz.py @@ -12,10 +12,8 @@ heights = np.zeros([SIZE, SIZE]) # define points where we think people are standing -standing = [ - (12, 25), - (38, 17) -] +standing = [(12, 25), (38, 17)] + # standard deviation def std(dist, impact, spread): @@ -23,11 +21,12 @@ def std(dist, impact, spread): v2 = math.pow(dist, 2) / (2 * math.pow(spread, 2)) return v1 * math.pow(math.e, -v2) + # apply a crude normal distribution over the entire heightmap from points IMPACT = 2 SPREAD = 8 -for (x, y) in standing: +for x, y in standing: for targetX in range(0, SIZE): if targetX >= 0 and targetX < SIZE: for targetY in range(0, SIZE): @@ -41,10 +40,12 @@ def std(dist, impact, spread): for x in range(0, SIZE): for y in range(0, SIZE): - heights[x][y] += std(x, IMPACT, SPREAD) \ - + std(y, IMPACT, SPREAD) \ - + std(SIZE - x, IMPACT, SPREAD) \ + heights[x][y] += ( + std(x, IMPACT, SPREAD) + + std(y, IMPACT, SPREAD) + + std(SIZE - x, IMPACT, SPREAD) + std(SIZE - y, IMPACT, SPREAD) + ) # convert to points points = np.empty([SIZE * SIZE, 3]) @@ -55,7 +56,7 @@ def std(dist, impact, spread): points[y * SIZE + x][2] = heights[x][y] # mark the points where people are standing for clarity -for (x, y) in standing: +for x, y in standing: heights[x][y] = 1 # iterate through height map and find least busy point @@ -68,14 +69,14 @@ def std(dist, impact, spread): h = c p = (x, y) -print('Best point is', p) +print("Best point is", p) # Create a 3D scatter plot fig = plt.figure(figsize=(8, 6)) -ax = fig.add_subplot(111, projection='3d') -ax.scatter(points[:, 0], points[:, 1], points[:, 2], s=3, alpha=0.8, c='b', marker='o') -ax.set_xlabel('X') -ax.set_ylabel('Y') -ax.set_zlabel('Z') -plt.show() \ No newline at end of file +ax = fig.add_subplot(111, projection="3d") +ax.scatter(points[:, 0], points[:, 1], points[:, 2], s=3, alpha=0.8, c="b", marker="o") +ax.set_xlabel("X") +ax.set_ylabel("Y") +ax.set_zlabel("Z") +plt.show() diff --git a/legacy/pcl_segmentation/nodes/test.py b/legacy/pcl_segmentation/nodes/test.py index 24e3c859d..e7c147fb8 100755 --- a/legacy/pcl_segmentation/nodes/test.py +++ b/legacy/pcl_segmentation/nodes/test.py @@ -5,11 +5,14 @@ from sensor_msgs.msg import PointCloud2 from cv_bridge3 import CvBridge, cv2 from common_math import pcl_msg_to_cv2 + if __name__ == "__main__": rospy.init_node("test_segment") low = Point(1.34, -1.59, 0.0) high = Point(2.88, -0.61, 8.0) - segment = rospy.ServiceProxy("/pcl_segmentation_server/segment_cuboid", SegmentCuboid) + segment = rospy.ServiceProxy( + "/pcl_segmentation_server/segment_cuboid", SegmentCuboid + ) bridge = CvBridge() while True: @@ -17,8 +20,8 @@ im_before = pcl_msg_to_cv2(pcl) result = segment(pcl, low, high).mask - mask = bridge.imgmsg_to_cv2_np(result) + mask = bridge.imgmsg_to_cv2_np(result) cv2.imshow("mask", mask) cv2.waitKey(1) - cv2.imshow("applied mask", cv2.bitwise_and(im_before,im_before,mask = mask)) + cv2.imshow("applied mask", cv2.bitwise_and(im_before, im_before, mask=mask)) cv2.waitKey(1) diff --git a/legacy/read_pcl_info/setup.py b/legacy/read_pcl_info/setup.py index a50229204..6eac5b2cb 100644 --- a/legacy/read_pcl_info/setup.py +++ b/legacy/read_pcl_info/setup.py @@ -5,8 +5,7 @@ # fetch values from package.xml setup_args = generate_distutils_setup( - packages=['read_pcl_info'], - package_dir={'': 'src'} + packages=["read_pcl_info"], package_dir={"": "src"} ) setup(**setup_args) diff --git a/legacy/read_pcl_info/src/read_pcl_info/clear_continuosly.py b/legacy/read_pcl_info/src/read_pcl_info/clear_continuosly.py index bf08efff1..355d9ac7f 100755 --- a/legacy/read_pcl_info/src/read_pcl_info/clear_continuosly.py +++ b/legacy/read_pcl_info/src/read_pcl_info/clear_continuosly.py @@ -3,11 +3,16 @@ import rospy from std_srvs.srv import Empty + class ClearCostmapNode: def __init__(self): - rospy.init_node('clear_costmap_node') - self.clear_costmap_service = rospy.ServiceProxy('/move_base/clear_costmaps', Empty) - self.clear_interval = rospy.Duration(30.0) # Adjust the clear interval as needed + rospy.init_node("clear_costmap_node") + self.clear_costmap_service = rospy.ServiceProxy( + "/move_base/clear_costmaps", Empty + ) + self.clear_interval = rospy.Duration( + 30.0 + ) # Adjust the clear interval as needed self.last_clear_time = rospy.Time.now() def run(self): @@ -26,7 +31,8 @@ def clear_costmap(self): except rospy.ServiceException as e: rospy.logerr("Failed to clear costmap: {}".format(e)) -if __name__ == '__main__': + +if __name__ == "__main__": try: clear_costmap_node = ClearCostmapNode() clear_costmap_node.run() diff --git a/legacy/read_pcl_info/src/read_pcl_info/pcl_helpers.py b/legacy/read_pcl_info/src/read_pcl_info/pcl_helpers.py index 4d6cc98ff..24fb77ffe 100755 --- a/legacy/read_pcl_info/src/read_pcl_info/pcl_helpers.py +++ b/legacy/read_pcl_info/src/read_pcl_info/pcl_helpers.py @@ -9,30 +9,41 @@ from math import sqrt, cos, sin import laser_geometry.laser_geometry as lg import sensor_msgs.point_cloud2 as pc2 -from tf_module.srv import BaseTransformRequest, ApplyTransformRequest, LatestTransformRequest, BaseTransform, \ - LatestTransform, ApplyTransform +from tf_module.srv import ( + BaseTransformRequest, + ApplyTransformRequest, + LatestTransformRequest, + BaseTransform, + LatestTransform, + ApplyTransform, +) from lift.defaults import PUBLISH_MARKERS from geometry_msgs.msg import Pose from markers.markers_helpers import create_point_marker from image_geometry import PinholeCameraModel + # from tiago_controllers.helpers.pose_helpers import get_pose_from_param from geometry_msgs.msg import Pose, Point, Quaternion MEAN_DISTANCE_THRESHOLD = 0.5 DIST_THRESHOLD = 0.1 + def get_pose_from_param(name): if not rospy.has_param(name): return None pose = rospy.get_param(name) print(pose) - return Pose(Point(pose['position']['x'], - pose['position']['y'], - pose['position']['z']), - Quaternion(pose['orientation']['x'], - pose['orientation']['y'], - pose['orientation']['z'], - pose['orientation']['w'])) + return Pose( + Point(pose["position"]["x"], pose["position"]["y"], pose["position"]["z"]), + Quaternion( + pose["orientation"]["x"], + pose["orientation"]["y"], + pose["orientation"]["z"], + pose["orientation"]["w"], + ), + ) + object_pos = get_pose_from_param("/door/pose") door = (object_pos.position.x, object_pos.position.y) @@ -44,25 +55,30 @@ def get_pose_from_param(name): door_detected = False prev_door_detected = False + # get pointcloud def get_pointcloud(): - pointcloud = rospy.wait_for_message('/xtion/depth_registered/points', PointCloud2) + pointcloud = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) return pointcloud # get the laser scan def get_laser_scan(): - laser_scan = rospy.wait_for_message('/scan', LaserScan) + laser_scan = rospy.wait_for_message("/scan", LaserScan) return laser_scan def filter_laser_scan(laser_scan): """ - Filter the laser scan data based on angle and range + Filter the laser scan data based on angle and range """ - middle_part = laser_scan.ranges[len(laser_scan.ranges) // 3: 2 * len(laser_scan.ranges) // 3] + middle_part = laser_scan.ranges[ + len(laser_scan.ranges) // 3 : 2 * len(laser_scan.ranges) // 3 + ] filtered_ranges = [np.nan] * len(laser_scan.ranges) - filtered_ranges[len(laser_scan.ranges) // 3: 2 * len(laser_scan.ranges) // 3] = middle_part + filtered_ranges[len(laser_scan.ranges) // 3 : 2 * len(laser_scan.ranges) // 3] = ( + middle_part + ) mean_distance = np.nanmean(filtered_ranges) return mean_distance, filtered_ranges @@ -70,7 +86,7 @@ def filter_laser_scan(laser_scan): def limit_laser_scan(laser_scan, is_pub_markers=True, is_publish=False, is_pcl=False): """ - Filter the laser scan data based on angle and range and publish it + Filter the laser scan data based on angle and range and publish it """ # def limit_laser_scan(laser_scan, tf_base,tf_latest, tf_apply, tf, is_pub_markers=True, is_publish=False, is_pcl=False): print("Door detected!{}".format(door_detected)) @@ -96,7 +112,6 @@ def limit_laser_scan(laser_scan, is_pub_markers=True, is_publish=False, is_pcl=F pub_pcl.publish(padded_pcl) - def laser_callback(laser_data): global door_detected, object_location # Filter the laser scan data based on angle and range @@ -117,8 +132,8 @@ def laser_callback(laser_data): print("i: {}, range_val: {}, testing".format(i, range_val)) req = BaseTransformRequest() req.point = Point(x=range_val[0], y=range_val[1], z=0.0) - req.frame = String('base_laser_link') - req.target_frame = String('map') + req.frame = String("base_laser_link") + req.target_frame = String("map") x, y, z = tf_base(req) door_pose_laser.publish(create_point_marker(x, y, 0.0, i)) @@ -150,8 +165,12 @@ def laser_callback(laser_data): def point_cloud_callback(pcl_data): # Filter the point cloud data based on range and height filtered_points = [] - for point in point_cloud2.read_points(pcl_data, field_names=("x", "y", "z"), skip_nans=True): - dist = sqrt((point[0] - door_location.x) ** 2 + (point[1] - door_location.y) ** 2) + for point in point_cloud2.read_points( + pcl_data, field_names=("x", "y", "z"), skip_nans=True + ): + dist = sqrt( + (point[0] - door_location.x) ** 2 + (point[1] - door_location.y) ** 2 + ) if dist < 0.1 and abs(point[2] - 1.0) < 0.05: # Adjust the thresholds as needed filtered_points.append(point) @@ -163,8 +182,8 @@ def point_cloud_callback(pcl_data): def laser_scan_to_points(laser_data): # def laser_scan_to_points(laser_data, tf_base): req = BaseTransformRequest() - req.frame = String('base_laser_link') - req.target_frame = String('map') + req.frame = String("base_laser_link") + req.target_frame = String("map") points = [] for i, range_val in enumerate(laser_data.ranges): @@ -187,10 +206,17 @@ def laser_scan_to_points(laser_data): def pub_markers(points, is_point=True): if is_point: for i in range(len(points)): - door_pose_laser.publish(create_point_marker(points[i].x, points[i].y, points[i].z, i)) + door_pose_laser.publish( + create_point_marker(points[i].x, points[i].y, points[i].z, i) + ) else: for i, pose in enumerate(points): - pcl_to_point.publish(create_point_marker(pose.position.x, pose.position.y, pose.position.z, i)) + pcl_to_point.publish( + create_point_marker( + pose.position.x, pose.position.y, pose.position.z, i + ) + ) + def is_door_detected(laser_data): global door_detected, door_location, prev_door_detected @@ -218,12 +244,15 @@ def is_door_detected(laser_data): door_detected = False if door_detected != prev_door_detected: - rospy.loginfo("Door state changed: {} -> {}".format(prev_door_detected, door_detected)) + rospy.loginfo( + "Door state changed: {} -> {}".format(prev_door_detected, door_detected) + ) prev_door_detected = door_detected + def get_pcl_from_laser(msg): - """ Converts a LaserScan message to a collection of points in camera frame, with their corresponding pixel values in a flat array. The method pads out the points to add vertical "pillars" to the point cloud. + """Converts a LaserScan message to a collection of points in camera frame, with their corresponding pixel values in a flat array. The method pads out the points to add vertical "pillars" to the point cloud. Args: msg (LaserScan): ROS Laser Scan message from /scan topic. @@ -233,7 +262,9 @@ def get_pcl_from_laser(msg): """ # First get the laser scan points, and then convert to camera frame pcl_msg = lg.LaserProjection().projectLaser(msg) - pcl_points = [p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True)] + pcl_points = [ + p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True) + ] tf_req = LatestTransformRequest() tf_req.target_frame = "xtion_rgb_optical_frame" @@ -243,7 +274,7 @@ def get_pcl_from_laser(msg): padded_points = [] for point in pcl_points: # Pad out the points to add vertical "pillars" to the point cloud - for z in np.linspace(0., 1., 5): + for z in np.linspace(0.0, 1.0, 5): padded_points.append(Point(x=point[0], y=point[1], z=z)) apply_req = ApplyTransformRequest() @@ -260,17 +291,16 @@ def get_pcl_from_laser(msg): return padded_pcl - - - def transform_pointcloud_to_map(pcl, source_frame="xtion_rgb_optical_frame"): pss = [] - for point in point_cloud2.read_points(pcl, field_names=("x", "y", "z"), skip_nans=True): + for point in point_cloud2.read_points( + pcl, field_names=("x", "y", "z"), skip_nans=True + ): pss.append(Point(x=point[0], y=point[1], z=point[2])) req = BaseTransformRequest() req.frame = String(source_frame) - req.target_frame = String('map') + req.target_frame = String("map") req.points = pss points_map = tf_base(req) @@ -296,25 +326,25 @@ def door_detected_callback(msg): camera = PinholeCameraModel() # tf - tf_base = rospy.ServiceProxy('base_transform', BaseTransform) - tf_latest = rospy.ServiceProxy('latest_transform', LatestTransform) - tf_apply = rospy.ServiceProxy('apply_transform', ApplyTransform) + tf_base = rospy.ServiceProxy("base_transform", BaseTransform) + tf_latest = rospy.ServiceProxy("latest_transform", LatestTransform) + tf_apply = rospy.ServiceProxy("apply_transform", ApplyTransform) # door detected door_pose = rospy.Publisher("/door_detected", Bool, queue_size=100) door_pose_laser = rospy.Publisher("/door_pose_laser", Marker, queue_size=100) - sub_door_detected = rospy.Subscriber('/door_detected', Bool, door_detected_callback) + sub_door_detected = rospy.Subscriber("/door_detected", Bool, door_detected_callback) pcl_to_point = rospy.Publisher("/pcl_to_point", Marker, queue_size=100) pub_pcl = rospy.Publisher("/pcl_from_laser_debug", PointCloud2, queue_size=10) # laser - pub_filtered = rospy.Publisher('/filtered_laser_scan', LaserScan, queue_size=10) - laser = rospy.wait_for_message('/scan', LaserScan) + pub_filtered = rospy.Publisher("/filtered_laser_scan", LaserScan, queue_size=10) + laser = rospy.wait_for_message("/scan", LaserScan) limit_laser_scan(laser) print(door_detected, " door_detected++") while True: - laser = rospy.wait_for_message('/scan', LaserScan) + laser = rospy.wait_for_message("/scan", LaserScan) limit_laser_scan(laser) print(door_detected, " door_detected----") rospy.sleep(5) diff --git a/skills/config/motions.yaml b/skills/config/motions.yaml index 8d28c5b52..302eb6083 100644 --- a/skills/config/motions.yaml +++ b/skills/config/motions.yaml @@ -1,6 +1,11 @@ play_motion: motions: - reach_arm: + reach_arm_vertical_gripper: + joints: [arm_1_joint, arm_2_joint, arm_3_joint, arm_4_joint, arm_5_joint, arm_6_joint, arm_7_joint] + points: + - positions: [1.61, -0.93, -3.14, 1.83, -1.58, -0.53, 0.00] + time_from_start: 0.0 + reach_arm_horizontal_gripper: joints: [arm_1_joint, arm_2_joint, arm_3_joint, arm_4_joint, arm_5_joint, arm_6_joint, arm_7_joint] points: - positions: [1.61, -0.93, -3.14, 1.83, -1.58, -0.53, 0.00] @@ -19,4 +24,19 @@ play_motion: joints: [torso_lift_joint, head_1_joint, head_2_joint] points: - positions: [0.25, 0.0, 0.0] + time_from_start: 0.0 + look_left: + joints: [head_1_joint, head_2_joint] + points: + - positions: [0.5, 0.0] + time_from_start: 0.0 + look_right: + joints: [head_1_joint, head_2_joint] + points: + - positions: [-0.5, 0.0] + time_from_start: 0.0 + look_centre: + joints: [head_1_joint, head_2_joint] + points: + - positions: [0.0, 0.0] time_from_start: 0.0 \ No newline at end of file diff --git a/skills/scripts/unit_test_describe_people.py b/skills/scripts/unit_test_describe_people.py index 364099a28..f85220d52 100644 --- a/skills/scripts/unit_test_describe_people.py +++ b/skills/scripts/unit_test_describe_people.py @@ -8,14 +8,17 @@ if __name__ == "__main__": rospy.init_node("test_describe") - sm = smach.StateMachine(outcomes=['end'], output_keys=['people']) + sm = smach.StateMachine(outcomes=["end"], output_keys=["people"]) with sm: - sm.add('DESCRIBE', DescribePeople(), transitions={ - 'succeeded': 'end', 'failed': 'end'}) + sm.add( + "DESCRIBE", + DescribePeople(), + transitions={"succeeded": "end", "failed": "end"}, + ) sm.execute() - print('\n\nDetected people:', sm.userdata['people'][0]['features']) + print("\n\nDetected people:", sm.userdata["people"][0]["features"]) rospy.signal_shutdown("down") diff --git a/skills/setup.py b/skills/setup.py index 2443ee264..a1cf81c17 100644 --- a/skills/setup.py +++ b/skills/setup.py @@ -1,12 +1,8 @@ - #!/usr/bin/env python3 from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['lasr_skills'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["lasr_skills"], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/skills/src/lasr_skills/__init__.py b/skills/src/lasr_skills/__init__.py index a1931c23d..736ee4823 100755 --- a/skills/src/lasr_skills/__init__.py +++ b/skills/src/lasr_skills/__init__.py @@ -17,3 +17,7 @@ from .handover_object import HandoverObject from .ask_and_listen import AskAndListen from .clip_vqa import QueryImage +from .detect_faces import DetectFaces +from .recognise import Recognise +from .detect_pointing import DetectPointingDirection +from .detect_gesture import DetectGesture diff --git a/skills/src/lasr_skills/ask_and_listen.py b/skills/src/lasr_skills/ask_and_listen.py index cd133437e..49221283f 100644 --- a/skills/src/lasr_skills/ask_and_listen.py +++ b/skills/src/lasr_skills/ask_and_listen.py @@ -11,7 +11,6 @@ def __init__( tts_phrase: Union[str, None] = None, tts_phrase_format_str: Union[str, None] = None, ): - if tts_phrase is not None: smach.StateMachine.__init__( self, diff --git a/skills/src/lasr_skills/clip_vqa.py b/skills/src/lasr_skills/clip_vqa.py index 7126d10c0..ad95ad302 100755 --- a/skills/src/lasr_skills/clip_vqa.py +++ b/skills/src/lasr_skills/clip_vqa.py @@ -5,9 +5,7 @@ class QueryImage(smach_ros.ServiceState): - def __init__(self, possible_answers: Union[None, List[str]] = None): - if possible_answers is not None: super(QueryImage, self).__init__( "/clip_vqa/query_service", diff --git a/skills/src/lasr_skills/describe_people.py b/skills/src/lasr_skills/describe_people.py index 1530311fb..cc2333c8d 100755 --- a/skills/src/lasr_skills/describe_people.py +++ b/skills/src/lasr_skills/describe_people.py @@ -6,75 +6,106 @@ import cv2_img import numpy as np from lasr_vision_msgs.msg import BodyPixMaskRequest -from lasr_vision_msgs.srv import YoloDetection, BodyPixDetection, TorchFaceFeatureDetectionDescription +from lasr_vision_msgs.srv import ( + YoloDetection, + BodyPixDetection, + TorchFaceFeatureDetectionDescription, +) from numpy2message import numpy2message from .vision import GetImage, ImageMsgToCv2 import numpy as np class DescribePeople(smach.StateMachine): - def __init__(self): smach.StateMachine.__init__( - self, outcomes=['succeeded', 'failed'], input_keys=[], output_keys=['people']) + self, + outcomes=["succeeded", "failed"], + input_keys=[], + output_keys=["people"], + ) with self: - smach.StateMachine.add('GET_IMAGE', GetImage(), transitions={ - 'succeeded': 'CONVERT_IMAGE'}) - smach.StateMachine.add('CONVERT_IMAGE', ImageMsgToCv2(), transitions={ - 'succeeded': 'SEGMENT'}) - - sm_con = smach.Concurrence(outcomes=['succeeded', 'failed'], - default_outcome='failed', - outcome_map={'succeeded': { - 'SEGMENT_YOLO': 'succeeded', 'SEGMENT_BODYPIX': 'succeeded'}}, - input_keys=['img', 'img_msg',], - output_keys=['people_detections', 'bodypix_masks']) + smach.StateMachine.add( + "GET_IMAGE", GetImage(), transitions={"succeeded": "CONVERT_IMAGE"} + ) + smach.StateMachine.add( + "CONVERT_IMAGE", ImageMsgToCv2(), transitions={"succeeded": "SEGMENT"} + ) + + sm_con = smach.Concurrence( + outcomes=["succeeded", "failed"], + default_outcome="failed", + outcome_map={ + "succeeded": { + "SEGMENT_YOLO": "succeeded", + "SEGMENT_BODYPIX": "succeeded", + } + }, + input_keys=[ + "img", + "img_msg", + ], + output_keys=["people_detections", "bodypix_masks"], + ) with sm_con: - smach.Concurrence.add('SEGMENT_YOLO', self.SegmentYolo()) - smach.Concurrence.add('SEGMENT_BODYPIX', self.SegmentBodypix()) - - smach.StateMachine.add('SEGMENT', sm_con, transitions={ - 'succeeded': 'FEATURE_EXTRACTION'}) - smach.StateMachine.add('FEATURE_EXTRACTION', self.FeatureExtraction(), transitions={ - 'succeeded': 'succeeded'}) - + smach.Concurrence.add("SEGMENT_YOLO", self.SegmentYolo()) + smach.Concurrence.add("SEGMENT_BODYPIX", self.SegmentBodypix()) + + smach.StateMachine.add( + "SEGMENT", sm_con, transitions={"succeeded": "FEATURE_EXTRACTION"} + ) + smach.StateMachine.add( + "FEATURE_EXTRACTION", + self.FeatureExtraction(), + transitions={"succeeded": "succeeded"}, + ) + class SegmentYolo(smach.State): - ''' + """ Segment using YOLO This should be turned into / merged with generic states - ''' + """ def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=[ - 'img_msg'], output_keys=['people_detections']) - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["img_msg"], + output_keys=["people_detections"], + ) + self.yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) def execute(self, userdata): try: - result = self.yolo( - userdata.img_msg, "yolov8n-seg.pt", 0.5, 0.3) + result = self.yolo(userdata.img_msg, "yolov8n-seg.pt", 0.5, 0.3) userdata.people_detections = [ - det for det in result.detected_objects if det.name == "person"] - return 'succeeded' + det for det in result.detected_objects if det.name == "person" + ] + return "succeeded" except rospy.ServiceException as e: rospy.logwarn(f"Unable to perform inference. ({str(e)})") - return 'failed' + return "failed" class SegmentBodypix(smach.State): - ''' + """ Segment using Bodypix This should be turned into / merged with generic states - ''' + """ def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=[ - 'img_msg',], output_keys=['bodypix_masks']) - self.bodypix = rospy.ServiceProxy( - '/bodypix/detect', BodyPixDetection) + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=[ + "img_msg", + ], + output_keys=["bodypix_masks"], + ) + self.bodypix = rospy.ServiceProxy("/bodypix/detect", BodyPixDetection) def execute(self, userdata): try: @@ -86,90 +117,104 @@ def execute(self, userdata): result = self.bodypix(userdata.img_msg, "resnet50", 0.7, masks) userdata.bodypix_masks = result.masks rospy.logdebug("Found poses: %s" % str(len(result.poses))) - neck_coord = (int(result.poses[0].coord[0]), int(result.poses[0].coord[1])) + neck_coord = ( + int(result.poses[0].coord[0]), + int(result.poses[0].coord[1]), + ) rospy.logdebug("Coordinate of the neck is: %s" % str(neck_coord)) - return 'succeeded' + return "succeeded" except rospy.ServiceException as e: rospy.logerr(f"Unable to perform inference. ({str(e)})") - return 'failed' + return "failed" class FeatureExtraction(smach.State): - ''' + """ Perform feature extraction This could be split into more states in theory, but that might just be unnecessary work - ''' + """ def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], input_keys=[ - 'img', 'people_detections', 'bodypix_masks'], output_keys=['people']) + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["img", "people_detections", "bodypix_masks"], + output_keys=["people"], + ) self.torch_face_features = rospy.ServiceProxy( - '/torch/detect/face_features', TorchFaceFeatureDetectionDescription) + "/torch/detect/face_features", TorchFaceFeatureDetectionDescription + ) def execute(self, userdata): if len(userdata.people_detections) == 0: rospy.logerr("Couldn't find anyone!") - return 'failed' + return "failed" elif len(userdata.people_detections) == 1: rospy.logdebug("There is one person.") else: - rospy.logdebug( - f"There are {len(userdata.people_detections)} people.") + rospy.logdebug(f"There are {len(userdata.people_detections)} people.") img = userdata.img height, width, _ = img.shape people = [] for person in userdata.people_detections: - rospy.logdebug( - f"\n\nFound person with confidence {person.confidence}!") + rospy.logdebug(f"\n\nFound person with confidence {person.confidence}!") # mask for this person mask_image = np.zeros((height, width), np.uint8) contours = np.array(person.xyseg).reshape(-1, 2) - cv2.fillPoly(mask_image, pts=np.int32( - [contours]), color=(255, 255, 255)) + cv2.fillPoly( + mask_image, pts=np.int32([contours]), color=(255, 255, 255) + ) mask_bin = mask_image > 128 - + # process part masks - for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']): + for bodypix_mask, part in zip( + userdata.bodypix_masks, ["torso", "head"] + ): part_mask = np.array(bodypix_mask.mask).reshape( - bodypix_mask.shape[0], bodypix_mask.shape[1]) - + bodypix_mask.shape[0], bodypix_mask.shape[1] + ) + # filter out part for current person segmentation try: part_mask[mask_bin == 0] = 0 except Exception: - rospy.logdebug('|> Failed to check {part} is visible') + rospy.logdebug("|> Failed to check {part} is visible") continue if part_mask.any(): - rospy.logdebug(f'|> Person has {part} visible') + rospy.logdebug(f"|> Person has {part} visible") else: - rospy.logdebug( - f'|> Person does not have {part} visible') + rospy.logdebug(f"|> Person does not have {part} visible") continue - if part == 'torso': + if part == "torso": torso_mask = part_mask - elif part == 'head': + elif part == "head": head_mask = part_mask - torso_mask_data, torso_mask_shape, torso_mask_dtype = numpy2message(torso_mask) - head_mask_data, head_mask_shape, head_mask_dtype = numpy2message(head_mask) + torso_mask_data, torso_mask_shape, torso_mask_dtype = numpy2message( + torso_mask + ) + head_mask_data, head_mask_shape, head_mask_dtype = numpy2message( + head_mask + ) full_frame = cv2_img.cv2_img_to_msg(img) rst = self.torch_face_features( - full_frame, - head_mask_data, head_mask_shape, head_mask_dtype, - torso_mask_data, torso_mask_shape, torso_mask_dtype, + full_frame, + head_mask_data, + head_mask_shape, + head_mask_dtype, + torso_mask_data, + torso_mask_shape, + torso_mask_dtype, ).description - people.append({ - 'detection': person, - 'features': rst - }) + people.append({"detection": person, "features": rst}) # Userdata: # - people @@ -177,5 +222,5 @@ def execute(self, userdata): # - parts # - - part # - mask - userdata['people'] = people - return 'succeeded' + userdata["people"] = people + return "succeeded" diff --git a/skills/src/lasr_skills/detect.py b/skills/src/lasr_skills/detect.py index a647bd108..ee172c429 100644 --- a/skills/src/lasr_skills/detect.py +++ b/skills/src/lasr_skills/detect.py @@ -10,7 +10,6 @@ class Detect(smach.State): - def __init__( self, image_topic: str = "/xtion/rgb/image_raw", diff --git a/skills/src/lasr_skills/detect_3d.py b/skills/src/lasr_skills/detect_3d.py index 0e8eba925..3a3984aa5 100644 --- a/skills/src/lasr_skills/detect_3d.py +++ b/skills/src/lasr_skills/detect_3d.py @@ -8,7 +8,6 @@ class Detect3D(smach.State): - def __init__( self, depth_topic: str = "/xtion/depth_registered/points", diff --git a/skills/src/lasr_skills/detect_3d_in_area.py b/skills/src/lasr_skills/detect_3d_in_area.py index 8f3d32a37..4273dee48 100644 --- a/skills/src/lasr_skills/detect_3d_in_area.py +++ b/skills/src/lasr_skills/detect_3d_in_area.py @@ -9,9 +9,7 @@ class Detect3DInArea(smach.StateMachine): - class FilterDetections(smach.State): - def __init__(self, area_polygon: Polygon): smach.State.__init__( self, @@ -53,7 +51,6 @@ def __init__( ) with self: - smach.StateMachine.add( "DETECT_OBJECTS_3D", Detect3D( diff --git a/skills/src/lasr_skills/detect_faces.py b/skills/src/lasr_skills/detect_faces.py new file mode 100644 index 000000000..c0605b3f8 --- /dev/null +++ b/skills/src/lasr_skills/detect_faces.py @@ -0,0 +1,25 @@ +import rospy +import smach + +from lasr_vision_msgs.srv import DetectFaces as DetectFacesSrv +from sensor_msgs.msg import Image + + +class DetectFaces(smach.State): + + def __init__(self, image_topic: str = "/xtion/rgb/image_raw"): + smach.State.__init__( + self, outcomes=["succeeded", "failed"], output_keys=["detections"] + ) + self._image_topic = image_topic + self._detect_faces = rospy.ServiceProxy("/detect_faces", DetectFacesSrv) + + def execute(self, userdata): + img_msg = rospy.wait_for_message(self._image_topic, Image) + try: + result = self._detect_faces(img_msg) + userdata.detections = result + return "succeeded" + except rospy.ServiceException as e: + rospy.logwarn(f"Unable to perform inference. ({str(e)})") + return "failed" diff --git a/skills/src/lasr_skills/detect_gesture.py b/skills/src/lasr_skills/detect_gesture.py new file mode 100755 index 000000000..5ac565366 --- /dev/null +++ b/skills/src/lasr_skills/detect_gesture.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +import smach +import rospy +from lasr_skills.vision import GetImage +from lasr_vision_msgs.srv import BodyPixDetection, BodyPixDetectionRequest +from lasr_vision_msgs.msg import BodyPixMaskRequest + + +class DetectGesture(smach.State): + """ + State for detecting gestures. + """ + + def __init__(self, gesture_to_detect: str = "raising_left_hand"): + self.gesture_to_detect = gesture_to_detect + smach.State.__init__( + self, + outcomes=["succeeded", "failed"], + input_keys=["img_msg"], + output_keys=["gesture_detected"], + ) + + self.body_pix_client = rospy.ServiceProxy("/bodypix/detect", BodyPixDetection) + + def execute(self, userdata): + + body_pix_masks = BodyPixMaskRequest() + body_pix_masks.parts = [ + "left_shoulder", + "right_shoulder", + "left_wrist", + "right_wrist", + ] + masks = [body_pix_masks] + + req = BodyPixDetectionRequest() + req.image_raw = userdata.img_msg + req.masks = masks + req.dataset = "resnet50" + req.confidence = 0.7 + + try: + res = self.body_pix_client(req) + except Exception as e: + print(e) + return "failed" + + part_info = {} + poses = res.poses + for pose in poses: + for keypoint in pose.keypoints: + part_info[keypoint.part] = { + "x": keypoint.xy[0], + "y": keypoint.xy[1], + "score": keypoint.score, + } + + if self.gesture_to_detect == "raising_left_hand": + if part_info["leftWrist"]["y"] < part_info["leftShoulder"]["y"]: + userdata.gesture_detected = True + else: + userdata.gesture_detected = False + elif self.gesture_to_detect == "raising_right_hand": + if part_info["rightWrist"]["y"] < part_info["rightShoulder"]["y"]: + userdata.gesture_detected = True + else: + userdata.gesture_detected = False + else: + raise ValueError("Invalid gesture to detect") + + return "succeeded" + + +class GestureDetectionSM(smach.StateMachine): + """ + State machine for detecting gestures. + """ + + def __init__(self, gesture_to_detect: str = "raising_left_hand"): + smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"]) + self.gesture_to_detect = gesture_to_detect + with self: + smach.StateMachine.add( + "GET_IMAGE", + GetImage(), + transitions={"succeeded": "BODY_PIX_DETECTION", "failed": "failed"}, + ) + + smach.StateMachine.add( + "BODY_PIX_DETECTION", + DetectGesture(gesture_to_detect=self.gesture_to_detect), + transitions={"succeeded": "succeeded", "failed": "failed"}, + ) + + +if __name__ == "__main__": + rospy.init_node("gesture_detection_sm") + while not rospy.is_shutdown(): + sm = GestureDetectionSM() + sm.execute() + print("Raising Left Hand Detected:", sm.userdata.gesture_detected) + + rospy.spin() diff --git a/skills/src/lasr_skills/detect_pointing.py b/skills/src/lasr_skills/detect_pointing.py index 95f2ebe6c..21895dfc1 100644 --- a/skills/src/lasr_skills/detect_pointing.py +++ b/skills/src/lasr_skills/detect_pointing.py @@ -4,14 +4,23 @@ from lasr_vision_msgs.srv import PointingDirection -class PointingDetector(smach.State): - def __init__(self): - smach.State.__init__(self, outcomes=['succeeded', 'failed'], output_keys=['direction']) - self.service = rospy.ServiceProxy('/pointing_detection_service', PointingDirection) - self.image_raw = rospy.wait_for_message('/xtion/rgb/image_raw', Image) + +class DetectPointingDirection(smach.State): + def __init__(self, image_topic: str = "/xtion/rgb/image_raw"): + smach.State.__init__( + self, outcomes=["succeeded", "failed"], output_keys=["pointing_direction"] + ) + self._image_topic = image_topic + self._pointing_service = rospy.ServiceProxy( + "/pointing_detection_service", PointingDirection + ) + self._pointing_service.wait_for_service() def execute(self, userdata): - resp = self.service(self.image_raw) - userdata.direction = resp.direction - return 'succeeded' if resp.direction != 'Err' else 'failed' \ No newline at end of file + img_msg = rospy.wait_for_message(self._image_topic, Image) + resp = self._pointing_service(img_msg) + if resp.direction == "NONE" or resp.direction == "FORWARDS": + return "failed" + userdata.pointing_direction = resp.direction + return "succeeded" diff --git a/skills/src/lasr_skills/find_named_person.py b/skills/src/lasr_skills/find_named_person.py index f1055f79a..e4a2773fb 100755 --- a/skills/src/lasr_skills/find_named_person.py +++ b/skills/src/lasr_skills/find_named_person.py @@ -11,7 +11,6 @@ class FindNamedPerson(smach.StateMachine): - class GetLocation(smach.State): def __init__(self): smach.State.__init__( @@ -115,7 +114,6 @@ def __init__( waypoints_to_iterate: List[Pose] = waypoints with self: - smach.StateMachine.add( "GET_POSE", self.GetPose(), @@ -138,7 +136,6 @@ def __init__( ) with waypoint_iterator: - container_sm = smach.StateMachine( outcomes=["succeeded", "failed", "continue"], input_keys=["location_index", "waypoints"], @@ -146,7 +143,6 @@ def __init__( ) with container_sm: - smach.StateMachine.add( "GET_LOCATION", self.GetLocation(), diff --git a/skills/src/lasr_skills/find_person.py b/skills/src/lasr_skills/find_person.py index 94dadccc1..e975f25d5 100755 --- a/skills/src/lasr_skills/find_person.py +++ b/skills/src/lasr_skills/find_person.py @@ -10,7 +10,6 @@ class FindPerson(smach.StateMachine): - class GetLocation(smach.State): def __init__(self): smach.State.__init__( @@ -76,7 +75,6 @@ def __init__(self, waypoints: List[Pose]): ) with self: - smach.StateMachine.add( "GET_POSE", self.GetPose(), @@ -99,7 +97,6 @@ def __init__(self, waypoints: List[Pose]): ) with waypoint_iterator: - container_sm = smach.StateMachine( outcomes=["succeeded", "failed", "continue"], input_keys=["location_index", "waypoints"], @@ -107,7 +104,6 @@ def __init__(self, waypoints: List[Pose]): ) with container_sm: - smach.StateMachine.add( "GET_LOCATION", self.GetLocation(), diff --git a/skills/src/lasr_skills/go_to_location.py b/skills/src/lasr_skills/go_to_location.py index 01553e30d..3d30129e8 100755 --- a/skills/src/lasr_skills/go_to_location.py +++ b/skills/src/lasr_skills/go_to_location.py @@ -24,13 +24,11 @@ class GoToLocation(smach.StateMachine): - def __init__( self, location: Union[Pose, None] = None, location_param: Union[str, None] = None, ): - if location is not None or location_param is not None: super(GoToLocation, self).__init__(outcomes=["succeeded", "failed"]) else: @@ -56,7 +54,6 @@ def __init__( ) if not IS_SIMULATION: - if PUBLIC_CONTAINER: rospy.logwarn( "You are using a public container. The head manager will not be stopped during navigation." @@ -155,7 +152,6 @@ def __init__( ) if not IS_SIMULATION: - if PUBLIC_CONTAINER: rospy.logwarn( "You are using a public container. The head manager will not be start following navigation." diff --git a/skills/src/lasr_skills/handover_object.py b/skills/src/lasr_skills/handover_object.py index 9104e2e8f..f8742834f 100755 --- a/skills/src/lasr_skills/handover_object.py +++ b/skills/src/lasr_skills/handover_object.py @@ -12,7 +12,6 @@ class HandoverObject(smach.StateMachine): - def __init__(self): smach.StateMachine.__init__( self, outcomes=["succeeded", "failed"], input_keys=["object_name"] diff --git a/skills/src/lasr_skills/listen_for.py b/skills/src/lasr_skills/listen_for.py index 52df8b5ef..6d1446a23 100644 --- a/skills/src/lasr_skills/listen_for.py +++ b/skills/src/lasr_skills/listen_for.py @@ -9,13 +9,12 @@ class ListenFor(smach_ros.SimpleActionState): def __init__(self, wake_word: str): - def speech_result_cb(userdata, status, result): if status == GoalStatus.SUCCEEDED: if wake_word in result.sequence.lower(): return "succeeded" return "not_done" - return "aborted" + return "not_done" smach_ros.SimpleActionState.__init__( self, diff --git a/skills/src/lasr_skills/play_motion.py b/skills/src/lasr_skills/play_motion.py index a24e480d2..909c9e1f8 100644 --- a/skills/src/lasr_skills/play_motion.py +++ b/skills/src/lasr_skills/play_motion.py @@ -7,7 +7,6 @@ class PlayMotion(smach_ros.SimpleActionState): - def _needs_planning(self, motion_name: str) -> bool: joints: List[str] = rospy.get_param( f"/play_motion/motions/{motion_name}/joints" diff --git a/skills/src/lasr_skills/receive_object.py b/skills/src/lasr_skills/receive_object.py index 45864e61b..68fbbc925 100755 --- a/skills/src/lasr_skills/receive_object.py +++ b/skills/src/lasr_skills/receive_object.py @@ -10,13 +10,18 @@ import rosparam import os +from typing import Union + class ReceiveObject(smach.StateMachine): + def __init__(self, object_name: Union[str, None] = None, vertical: bool = True): - def __init__(self): - smach.StateMachine.__init__( - self, outcomes=["succeeded", "failed"], input_keys=["object_name"] - ) + if object_name is not None: + super(ReceiveObject, self).__init__(outcomes=["succeeded", "failed"]) + else: + smach.StateMachine.__init__( + self, outcomes=["succeeded", "failed"], input_keys=["object_name"] + ) r = rospkg.RosPack() els = rosparam.load_file( @@ -30,41 +35,98 @@ def __init__(self): "CLEAR_OCTOMAP", smach_ros.ServiceState("clear_octomap", Empty), transitions={ - "succeeded": "REACH_ARM", + "succeeded": "LOOK_LEFT", "aborted": "failed", "preempted": "failed", }, ) + smach.StateMachine.add( - "REACH_ARM", - PlayMotion(motion_name="reach_arm"), + "LOOK_LEFT", + PlayMotion(motion_name="look_left"), transitions={ - "succeeded": "OPEN_GRIPPER", + "succeeded": "LOOK_RIGHT", "aborted": "failed", "preempted": "failed", }, ) + smach.StateMachine.add( - "OPEN_GRIPPER", - PlayMotion(motion_name="open_gripper"), + "LOOK_RIGHT", + PlayMotion(motion_name="look_right"), transitions={ - "succeeded": "SAY_PLACE", + "succeeded": "LOOK_CENTRE", "aborted": "failed", "preempted": "failed", }, ) + smach.StateMachine.add( - "SAY_PLACE", - Say( - format_str="Please place the {} in my end-effector, and say `I am done`.", - ), + "LOOK_CENTRE", + PlayMotion(motion_name="look_centre"), transitions={ - "succeeded": "LISTEN_DONE", + "succeeded": "REACH_ARM", "aborted": "failed", "preempted": "failed", }, - remapping={"placeholders": "object_name"}, ) + + if vertical: + smach.StateMachine.add( + "REACH_ARM", + PlayMotion(motion_name="reach_arm_vertical_gripper"), + transitions={ + "succeeded": "OPEN_GRIPPER", + "aborted": "failed", + "preempted": "failed", + }, + ) + else: + smach.StateMachine.add( + "REACH_ARM", + PlayMotion(motion_name="reach_arm_horizontal_gripper"), + transitions={ + "succeeded": "OPEN_GRIPPER", + "aborted": "failed", + "preempted": "failed", + }, + ) + + smach.StateMachine.add( + "OPEN_GRIPPER", + PlayMotion(motion_name="open_gripper"), + transitions={ + "succeeded": "SAY_PLACE", + "aborted": "failed", + "preempted": "failed", + }, + ) + + if object_name is not None: + smach.StateMachine.add( + "SAY_PLACE", + Say( + text=f"Please place the {object_name} in my end-effector, and say `I am done`.", + ), + transitions={ + "succeeded": "LISTEN_DONE", + "aborted": "failed", + "preempted": "failed", + }, + ) + else: + smach.StateMachine.add( + "SAY_PLACE", + Say( + format_str="Please place the {} in my end-effector, and say `I am done`.", + ), + transitions={ + "succeeded": "LISTEN_DONE", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"placeholders": "object_name"}, + ) smach.StateMachine.add( "LISTEN_DONE", ListenFor("done"), diff --git a/skills/src/lasr_skills/recognise.py b/skills/src/lasr_skills/recognise.py new file mode 100644 index 000000000..e3770a897 --- /dev/null +++ b/skills/src/lasr_skills/recognise.py @@ -0,0 +1,32 @@ +import rospy +import smach + +from lasr_vision_msgs.srv import Recognise as RecogniseSrv +from sensor_msgs.msg import Image + + +class Recognise(smach.State): + + def __init__( + self, + dataset: str, + confidence: float = 0.5, + image_topic: str = "/xtion/rgb/image_raw", + ): + smach.State.__init__( + self, outcomes=["succeeded", "failed"], output_keys=["detections"] + ) + self._dataset = dataset + self._confidence = confidence + self._image_topic = image_topic + self._recognise = rospy.ServiceProxy("/recognise", RecogniseSrv) + + def execute(self, userdata): + img_msg = rospy.wait_for_message(self._image_topic, Image) + try: + result = self._recognise(img_msg, self._dataset, self._confidence) + userdata.detections = result + return "succeeded" + except rospy.ServiceException as e: + rospy.logwarn(f"Unable to perform inference. ({str(e)})") + return "failed" diff --git a/skills/src/lasr_skills/vision/get_image.py b/skills/src/lasr_skills/vision/get_image.py index 6d67ecbc5..d0deeecdd 100644 --- a/skills/src/lasr_skills/vision/get_image.py +++ b/skills/src/lasr_skills/vision/get_image.py @@ -3,6 +3,7 @@ import rospy from sensor_msgs.msg import Image, PointCloud2 + class GetImage(smach.State): """ State for reading an sensor_msgs Image message @@ -10,23 +11,27 @@ class GetImage(smach.State): def __init__(self, topic: str = None): smach.State.__init__( - self, outcomes=['succeeded', 'failed'], output_keys=['img_msg']) + self, outcomes=["succeeded", "failed"], output_keys=["img_msg"] + ) if topic is None: - self.topic = '/xtion/rgb/image_raw' if 'tiago' in os.environ['ROS_MASTER_URI'] else '/camera/image_raw' + self.topic = ( + "/xtion/rgb/image_raw" + if "tiago" in os.environ["ROS_MASTER_URI"] + else "/usb_cam/image_raw" + ) else: self.topic = topic def execute(self, userdata): try: - userdata.img_msg = rospy.wait_for_message( - self.topic, Image) + userdata.img_msg = rospy.wait_for_message(self.topic, Image) except Exception as e: print(e) - return 'failed' + return "failed" + + return "succeeded" - return 'succeeded' - class GetPointCloud(smach.State): """ @@ -35,22 +40,22 @@ class GetPointCloud(smach.State): def __init__(self, topic: str = None): smach.State.__init__( - self, outcomes=['succeeded', 'failed'], output_keys=['pcl_msg']) + self, outcomes=["succeeded", "failed"], output_keys=["pcl_msg"] + ) if topic is None: - self.topic = '/xtion/depth_registered/points' + self.topic = "/xtion/depth_registered/points" else: self.topic = topic def execute(self, userdata): try: - userdata.pcl_msg = rospy.wait_for_message( - self.topic, PointCloud2) + userdata.pcl_msg = rospy.wait_for_message(self.topic, PointCloud2) except Exception as e: print(e) - return 'failed' + return "failed" - return 'succeeded' + return "succeeded" class GetImageAndPointCloud(smach.State): @@ -60,20 +65,18 @@ class GetImageAndPointCloud(smach.State): def __init__(self, topic: str = None): smach.State.__init__( - self, outcomes=['succeeded', 'failed'], output_keys=['img_msg', 'pcl_msg']) + self, outcomes=["succeeded", "failed"], output_keys=["img_msg", "pcl_msg"] + ) - self.topic1 = '/xtion/rgb/image_raw' - self.topic2 = '/xtion/depth_registered/points' + self.topic1 = "/xtion/rgb/image_raw" + self.topic2 = "/xtion/depth_registered/points" def execute(self, userdata): try: - userdata.img_msg = rospy.wait_for_message( - self.topic1, Image) - userdata.pcl_msg = rospy.wait_for_message( - self.topic2, PointCloud2) + userdata.img_msg = rospy.wait_for_message(self.topic1, Image) + userdata.pcl_msg = rospy.wait_for_message(self.topic2, PointCloud2) except Exception as e: print(e) - return 'failed' - - return 'succeeded' + return "failed" + return "succeeded" diff --git a/skills/src/lasr_skills/vision/image_cv2_to_msg.py b/skills/src/lasr_skills/vision/image_cv2_to_msg.py index 1e1402b82..6e19fbfde 100644 --- a/skills/src/lasr_skills/vision/image_cv2_to_msg.py +++ b/skills/src/lasr_skills/vision/image_cv2_to_msg.py @@ -10,8 +10,9 @@ class ImageCv2ToMsg(smach.State): def __init__(self): smach.State.__init__( - self, outcomes=['succeeded'], input_keys=['img'], output_keys=['img_msg']) + self, outcomes=["succeeded"], input_keys=["img"], output_keys=["img_msg"] + ) def execute(self, userdata): userdata.img_msg = cv2_img.cv2_img_to_msg(userdata.img) - return 'succeeded' + return "succeeded" diff --git a/skills/src/lasr_skills/vision/image_msg_to_cv2.py b/skills/src/lasr_skills/vision/image_msg_to_cv2.py index 3021aebac..23c013daf 100644 --- a/skills/src/lasr_skills/vision/image_msg_to_cv2.py +++ b/skills/src/lasr_skills/vision/image_msg_to_cv2.py @@ -11,12 +11,13 @@ class ImageMsgToCv2(smach.State): def __init__(self): smach.State.__init__( - self, outcomes=['succeeded'], input_keys=['img_msg'], output_keys=['img']) + self, outcomes=["succeeded"], input_keys=["img_msg"], output_keys=["img"] + ) def execute(self, userdata): userdata.img = cv2_img.msg_to_cv2_img(userdata.img_msg) - return 'succeeded' - + return "succeeded" + class PclMsgToCv2(smach.State): """ @@ -25,9 +26,13 @@ class PclMsgToCv2(smach.State): def __init__(self): smach.State.__init__( - self, outcomes=['succeeded'], input_keys=['img_msg_3d'], output_keys=['img', 'xyz']) + self, + outcomes=["succeeded"], + input_keys=["img_msg_3d"], + output_keys=["img", "xyz"], + ) def execute(self, userdata): userdata.img = cv2_img.pcl_msg_to_cv2(userdata.img_msg_3d) userdata.xyz = cv2_img.pcl_msg_to_xyz(userdata.img_msg_3d) - return 'succeeded' + return "succeeded" diff --git a/skills/src/lasr_skills/wait_for_person.py b/skills/src/lasr_skills/wait_for_person.py index ada04b8a2..de10e34ff 100755 --- a/skills/src/lasr_skills/wait_for_person.py +++ b/skills/src/lasr_skills/wait_for_person.py @@ -4,9 +4,7 @@ class WaitForPerson(smach.StateMachine): - class CheckForPerson(smach.State): - def __init__(self): smach.State.__init__( self, outcomes=["done", "not_done"], input_keys=["detections"] @@ -18,8 +16,10 @@ def execute(self, userdata): else: return "not_done" - def __init__(self): - + def __init__( + self, + image_topic: str = "/xtion/rgb/image_raw", + ): smach.StateMachine.__init__( self, outcomes=["succeeded", "failed"], @@ -29,7 +29,7 @@ def __init__(self): with self: smach.StateMachine.add( "DETECT_PEOPLE", - Detect(filter=["person"]), + Detect(image_topic=image_topic, filter=["person"]), transitions={"succeeded": "CHECK_FOR_PERSON", "failed": "failed"}, ) smach.StateMachine.add( diff --git a/skills/src/lasr_skills/wait_for_person_in_area.py b/skills/src/lasr_skills/wait_for_person_in_area.py index a203e98b0..79f34ad5c 100755 --- a/skills/src/lasr_skills/wait_for_person_in_area.py +++ b/skills/src/lasr_skills/wait_for_person_in_area.py @@ -6,9 +6,7 @@ class WaitForPersonInArea(smach.StateMachine): - class CheckForPerson(smach.State): - def __init__(self): smach.State.__init__( self, outcomes=["done", "not_done"], input_keys=["detections_3d"] diff --git a/skills/src/lasr_skills/xml_question_answer.py b/skills/src/lasr_skills/xml_question_answer.py index abede91b5..0ab64b8b8 100644 --- a/skills/src/lasr_skills/xml_question_answer.py +++ b/skills/src/lasr_skills/xml_question_answer.py @@ -35,7 +35,6 @@ def parse_question_xml(xml_file_path: str) -> dict: class XmlQuestionAnswer(smach.State): - def __init__(self, index_path: str, txt_path: str, xml_path: str, k: int = 1): smach.State.__init__( self, diff --git a/tasks/carry_my_luggage/launch/setup.launch b/tasks/carry_my_luggage/launch/setup.launch index b5b32e1c4..bb8820885 100644 --- a/tasks/carry_my_luggage/launch/setup.launch +++ b/tasks/carry_my_luggage/launch/setup.launch @@ -3,6 +3,18 @@ + + + + + + + + + + + + diff --git a/tasks/carry_my_luggage/scripts/main.py b/tasks/carry_my_luggage/scripts/main.py old mode 100644 new mode 100755 diff --git a/tasks/carry_my_luggage/src/carry_my_luggage/state_machine.py b/tasks/carry_my_luggage/src/carry_my_luggage/state_machine.py index ae8765b5f..39cd67ce5 100644 --- a/tasks/carry_my_luggage/src/carry_my_luggage/state_machine.py +++ b/tasks/carry_my_luggage/src/carry_my_luggage/state_machine.py @@ -1,8 +1,68 @@ import smach +from lasr_skills import ( + WaitForPerson, + DetectPointingDirection, + Say, + PlayMotion, + ReceiveObject, +) + +import rospy + class CarryMyLuggage(smach.StateMachine): def __init__(self): - smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"]) + + with self: + smach.StateMachine.add( + "WAIT_FOR_PERSON", + WaitForPerson(), + transitions={ + "succeeded": "DETECT_POINTING_DIRECTION", + "failed": "failed", + }, + ) + + smach.StateMachine.add( + "DETECT_POINTING_DIRECTION", + DetectPointingDirection(), + transitions={ + "succeeded": "SAY_BAG", + "failed": "SAY_FAILED_POINTING", + }, + ) + + smach.StateMachine.add( + "SAY_FAILED_POINTING", + Say( + text="I could not detect the direction that you are pointing. I'll try again." + ), + transitions={ + "succeeded": "DETECT_POINTING_DIRECTION", + "aborted": "failed", + "preempted": "failed", + }, + ) + + smach.StateMachine.add( + "SAY_BAG", + Say(format_str="I need you to give me the bag on your {}."), + transitions={ + "succeeded": "RECEIVE_BAG", + "aborted": "failed", + "preempted": "failed", + }, + remapping={"placeholders": "pointing_direction"}, + ) + + smach.StateMachine.add( + "RECEIVE_BAG", + ReceiveObject(object_name="bag", vertical=False), + transitions={ + "succeeded": "succeeded", + "failed": "failed", + }, + ) diff --git a/tasks/coffee_shop/nodes/tf_transform.py b/tasks/coffee_shop/nodes/tf_transform.py index 616f7409b..67bd684f6 100755 --- a/tasks/coffee_shop/nodes/tf_transform.py +++ b/tasks/coffee_shop/nodes/tf_transform.py @@ -5,7 +5,14 @@ import tf2_sensor_msgs from geometry_msgs.msg import PoseStamped, PoseArray -from coffee_shop.srv import TfTransform, TfTransformResponse, LatestTransform, LatestTransformResponse, ApplyTransform, ApplyTransformResponse +from coffee_shop.srv import ( + TfTransform, + TfTransformResponse, + LatestTransform, + LatestTransformResponse, + ApplyTransform, + ApplyTransformResponse, +) from geometry_msgs.msg import PoseStamped, Vector3Stamped, PointStamped, WrenchStamped @@ -17,81 +24,124 @@ def tf_transform(msg): tf_response = TfTransformResponse() - if msg.pose_array.header.frame_id != '': - transformation = get_transform(source_frame=msg.pose_array.header.frame_id, target_frame=msg.target_frame.data, stamp = msg.pose_array.header.stamp) + if msg.pose_array.header.frame_id != "": + transformation = get_transform( + source_frame=msg.pose_array.header.frame_id, + target_frame=msg.target_frame.data, + stamp=msg.pose_array.header.stamp, + ) if transformation: pose_array = PoseArray() pose_array.header.frame_id = msg.target_frame.data pose_array.header.stamp = rospy.get_rostime() for pose in msg.pose_array.poses: - new_pose = tf2_geometry_msgs.do_transform_pose(PoseStamped(pose=pose), transformation).pose + new_pose = tf2_geometry_msgs.do_transform_pose( + PoseStamped(pose=pose), transformation + ).pose pose_array.poses.append(new_pose) tf_response.target_pose_array = pose_array else: - print('Error: No transformation') - if msg.pointcloud.header.frame_id != '': - transformation = get_transform(source_frame=msg.pointcloud.header.frame_id, target_frame=msg.target_frame.data, stamp = msg.pointcloud.header.stamp) + print("Error: No transformation") + if msg.pointcloud.header.frame_id != "": + transformation = get_transform( + source_frame=msg.pointcloud.header.frame_id, + target_frame=msg.target_frame.data, + stamp=msg.pointcloud.header.stamp, + ) if transformation: - new_pointcloud = tf2_sensor_msgs.do_transform_cloud(msg.pointcloud, transformation) + new_pointcloud = tf2_sensor_msgs.do_transform_cloud( + msg.pointcloud, transformation + ) tf_response.target_pointcloud = new_pointcloud else: - print('Error: No transformation') - if msg.point.header.frame_id != '': - transformation = get_transform(source_frame=msg.point.header.frame_id, target_frame=msg.target_frame.data,stamp = msg.point.header.stamp) + print("Error: No transformation") + if msg.point.header.frame_id != "": + transformation = get_transform( + source_frame=msg.point.header.frame_id, + target_frame=msg.target_frame.data, + stamp=msg.point.header.stamp, + ) if transformation: new_point = do_transform_point(msg.point, transformation) tf_response.target_point = new_point else: - print('Error: No transformation') + print("Error: No transformation") - return tf_response + def get_transform(source_frame, target_frame, stamp): """ - Converts to target frame - Returns the pose in the target frame + Converts to target frame + Returns the pose in the target frame """ - assert(source_frame and target_frame) + assert source_frame and target_frame # print('source_frame', source_frame) # print('target_frame', target_frame) try: - transformation = tfBuffer.lookup_transform(target_frame, source_frame, stamp, rospy.Duration(0.1)) + transformation = tfBuffer.lookup_transform( + target_frame, source_frame, stamp, rospy.Duration(0.1) + ) return transformation - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e: + except ( + tf2_ros.LookupException, + tf2_ros.ConnectivityException, + tf2_ros.ExtrapolationException, + ) as e: print(e) + def do_transform_point(point, transform): - p = transform_to_kdl(transform) * PyKDL.Vector(point.point.x, point.point.y, point.point.z) + p = transform_to_kdl(transform) * PyKDL.Vector( + point.point.x, point.point.y, point.point.z + ) res = PointStamped() res.point.x = p[0] res.point.y = p[1] res.point.z = p[2] res.header = transform.header return res + + tf2_ros.TransformRegistration().add(PointStamped, do_transform_point) def transform_to_kdl(t): - return PyKDL.Frame(PyKDL.Rotation.Quaternion(t.transform.rotation.x, t.transform.rotation.y, - t.transform.rotation.z, t.transform.rotation.w), - PyKDL.Vector(t.transform.translation.x, - t.transform.translation.y, - t.transform.translation.z)) + return PyKDL.Frame( + PyKDL.Rotation.Quaternion( + t.transform.rotation.x, + t.transform.rotation.y, + t.transform.rotation.z, + t.transform.rotation.w, + ), + PyKDL.Vector( + t.transform.translation.x, + t.transform.translation.y, + t.transform.translation.z, + ), + ) + def get_latest_transform(msg): to_frame = msg.target_frame from_frame = msg.from_frame try: - t = tfBuffer.lookup_transform(to_frame, from_frame, rospy.Time(0), rospy.Duration(5)) + t = tfBuffer.lookup_transform( + to_frame, from_frame, rospy.Time(0), rospy.Duration(5) + ) return t - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): + except ( + tf2_ros.LookupException, + tf2_ros.ConnectivityException, + tf2_ros.ExtrapolationException, + ): raise + def apply_transform(msg): - rospy.logwarn('Applying transform') + rospy.logwarn("Applying transform") new_p = [] for p in msg.points: ps = PointStamped() @@ -100,14 +150,14 @@ def apply_transform(msg): new_p.append(p_tf.point) return ApplyTransformResponse(new_p) -if __name__ == '__main__': - - rospy.init_node('tf_transform_node') - s = rospy.Service('tf_transform', TfTransform, tf_transform) - s2 = rospy.Service('get_latest_transform', LatestTransform, get_latest_transform) - s3 = rospy.Service('apply_transform', ApplyTransform, apply_transform) + +if __name__ == "__main__": + rospy.init_node("tf_transform_node") + s = rospy.Service("tf_transform", TfTransform, tf_transform) + s2 = rospy.Service("get_latest_transform", LatestTransform, get_latest_transform) + s3 = rospy.Service("apply_transform", ApplyTransform, apply_transform) tfBuffer = tf2_ros.Buffer() listener = tf2_ros.TransformListener(tfBuffer) - print('Ready to transform!') + print("Ready to transform!") rospy.spin() diff --git a/tasks/coffee_shop/scripts/datahub.py b/tasks/coffee_shop/scripts/datahub.py index 246945aec..607ebb6cf 100755 --- a/tasks/coffee_shop/scripts/datahub.py +++ b/tasks/coffee_shop/scripts/datahub.py @@ -8,20 +8,62 @@ DATAHUB_API = "https://ecs-mnemosyne.azurewebsites.net/api/Hub" ROBOT_ID = "019b1442-728d-48c4-1de7-08dbb68bfcf1" + def start_episode(_): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STARTEPISODE", "Episode" : 1}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STARTEPISODE", + "Episode": 1, + }, + ) + def stop_episode(_): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STOPEPISODE", "Episode" : 1}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STOPEPISODE", + "Episode": 1, + }, + ) + def start_phase(msg): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STARTPHASE", "Episode" : 1, "Phase" : msg.data}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STARTPHASE", + "Episode": 1, + "Phase": msg.data, + }, + ) + def stop_phase(msg): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STOPPHASE", "Episode" : 1, "Phase" : msg.data}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STOPPHASE", + "Episode": 1, + "Phase": msg.data, + }, + ) + def ping(_): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "PING"}) + requests.post( + DATAHUB_API, json={"RobotId": ROBOT_ID, "Competition": "ERL", "Action": "PING"} + ) + rospy.Subscriber("/datahub/start_episode", Empty, start_episode) rospy.Subscriber("/datahub/stop_episode", Empty, stop_episode) @@ -29,4 +71,4 @@ def ping(_): rospy.Subscriber("/datahub/stop_phase", Int16, stop_phase) rospy.Subscriber("/datahub/ping", Int16, ping) -rospy.spin() \ No newline at end of file +rospy.spin() diff --git a/tasks/coffee_shop/scripts/prep.py b/tasks/coffee_shop/scripts/prep.py index 45b06dde0..a9680b551 100755 --- a/tasks/coffee_shop/scripts/prep.py +++ b/tasks/coffee_shop/scripts/prep.py @@ -12,7 +12,9 @@ rospy.init_node("coffee_shop_prep") -save_navigation_point = rospy.ServiceProxy("/save_navigation_points", SaveNavigationPoint) +save_navigation_point = rospy.ServiceProxy( + "/save_navigation_points", SaveNavigationPoint +) save_table_cuboid = rospy.ServiceProxy("/generate_table_cuboid", GenerateTableCuboid) @@ -29,20 +31,22 @@ rospy.loginfo(f"There are {n_tables} tables.") for i in range(n_tables): - while True: long_side_length = short_side_length = radius = 0.0 - input(f"Place an Aruco marker in the corner of table {i}, adjust me so that I can see it, then press Enter.") + input( + f"Place an Aruco marker in the corner of table {i}, adjust me so that I can see it, then press Enter." + ) is_rect = bool(int(input("Is rect (0/1): "))) if is_rect: - long_side_length = float(input(f"Long side length: ")) short_side_length = float(input(f"Short side length: ")) else: radius = float(input("Radius: ")) padding_sz = float((input("Padding: "))) try: - save_table_cuboid(i, long_side_length, short_side_length, padding_sz, is_rect, radius) + save_table_cuboid( + i, long_side_length, short_side_length, padding_sz, is_rect, radius + ) break except rospy.service.ServiceException: rospy.logwarn("Something went wrong - is the Aruco marker in view?") @@ -52,7 +56,9 @@ while True: padding_sz = 0.0 - input(f"Place an Aruco marker in the corner of counter, adjust me so that I can see it, then press Enter.") + input( + f"Place an Aruco marker in the corner of counter, adjust me so that I can see it, then press Enter." + ) is_rect = bool(int(input("Is rect (0/1): "))) if is_rect: long_side_length = float(input(f"Long side length: ")) @@ -60,7 +66,9 @@ else: radius = float(input("Radius: ")) try: - save_table_cuboid(-1, long_side_length, short_side_length, padding_sz, is_rect, radius) + save_table_cuboid( + -1, long_side_length, short_side_length, padding_sz, is_rect, radius + ) break except rospy.service.ServiceException: rospy.logwarn("Something went wrong - is the Aruco marker in view?") @@ -69,11 +77,13 @@ save_navigation_point(-1) while True: - input(f"Place an Aruco marker to indicate the waiting area, adjust me so that I can see it, then press Enter.") + input( + f"Place an Aruco marker to indicate the waiting area, adjust me so that I can see it, then press Enter." + ) try: long_side_length = float(input(f"Long side length: ")) short_side_length = float(input(f"Short side length: ")) - save_table_cuboid(-2, long_side_length, short_side_length, 0.0 , 1, 0.0) + save_table_cuboid(-2, long_side_length, short_side_length, 0.0, 1, 0.0) break except rospy.service.ServiceException: rospy.logwarn("Something went wrong - is the Aruco marker in view?") @@ -84,4 +94,4 @@ rospy.loginfo("ALL OK.") els = rosparam.load_file(OUTPUT_PATH) for param, ns in els: - rosparam.upload_params(ns, param) \ No newline at end of file + rosparam.upload_params(ns, param) diff --git a/tasks/coffee_shop/scripts/test_check_table.py b/tasks/coffee_shop/scripts/test_check_table.py index 4253fe403..0f410ef41 100755 --- a/tasks/coffee_shop/scripts/test_check_table.py +++ b/tasks/coffee_shop/scripts/test_check_table.py @@ -7,11 +7,11 @@ if __name__ == "__main__": rospy.init_node("test_check_table") - sm = smach.StateMachine(outcomes=['end', 'not_finished']) + sm = smach.StateMachine(outcomes=["end", "not_finished"]) context = Context(sys.argv[1], sys.argv[2]) context.current_table = "table0" with sm: - sm.add('CHECK_TABLE', CheckTable(context), transitions={'finished' : 'end'}) + sm.add("CHECK_TABLE", CheckTable(context), transitions={"finished": "end"}) sm.execute() - rospy.signal_shutdown("down") \ No newline at end of file + rospy.signal_shutdown("down") diff --git a/tasks/coffee_shop/scripts/test_laser_to_pointcloud.py b/tasks/coffee_shop/scripts/test_laser_to_pointcloud.py index 1092116b1..2f55a7288 100755 --- a/tasks/coffee_shop/scripts/test_laser_to_pointcloud.py +++ b/tasks/coffee_shop/scripts/test_laser_to_pointcloud.py @@ -13,11 +13,18 @@ def get_transform(from_frame, to_frame): try: - t = tf_buffer.lookup_transform(to_frame, from_frame, rospy.Time(0), rospy.Duration(0.5)) + t = tf_buffer.lookup_transform( + to_frame, from_frame, rospy.Time(0), rospy.Duration(0.5) + ) return t - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): + except ( + tf2_ros.LookupException, + tf2_ros.ConnectivityException, + tf2_ros.ExtrapolationException, + ): raise + def apply_transform(input_xyz, transform, target="xtion_rgb_optical_frame"): ps = PointStamped() ps.point.x = input_xyz[0] @@ -29,8 +36,9 @@ def apply_transform(input_xyz, transform, target="xtion_rgb_optical_frame"): tr_point = tf2_geometry_msgs.do_transform_point(ps, transform) return (tr_point.point.x, tr_point.point.y, tr_point.point.z) + def get_pcl_from_laser(msg): - """ Converts a LaserScan message to a padded point cloud in camera frame. + """Converts a LaserScan message to a padded point cloud in camera frame. Args: msg (LaserScan): ROS Laser Scan message from /scan topic. @@ -38,12 +46,14 @@ def get_pcl_from_laser(msg): PointCloud2: ROS PointCloud2 message with padded points in camera frame. """ pcl_msg = lg.LaserProjection().projectLaser(msg) - pcl_points = [p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True)] + pcl_points = [ + p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True) + ] tr = get_transform("base_laser_link", "xtion_rgb_optical_frame") - + padded_converted_points = [] for point in pcl_points: - for z in np.linspace(0., 2., 10): + for z in np.linspace(0.0, 2.0, 10): p = apply_transform((point[0], point[1], z), tr) padded_converted_points.append(p) @@ -51,13 +61,15 @@ def get_pcl_from_laser(msg): h.stamp = rospy.Time.now() h.frame_id = "xtion_rgb_optical_frame" padded_pcl = pc2.create_cloud_xyz32(h, padded_converted_points) - + return padded_pcl + def get_laser_and_publish_pcl(msg): padded_pcl = get_pcl_from_laser(msg) pub.publish(padded_pcl) + if __name__ == "__main__": rospy.init_node("laser_to_pointcloud_node") tf_buffer = tf2_ros.Buffer() @@ -65,4 +77,4 @@ def get_laser_and_publish_pcl(msg): pub = rospy.Publisher("/pcl_from_laser_debug", PointCloud2, queue_size=10) sub = rospy.Subscriber("/scan", LaserScan, get_laser_and_publish_pcl) while not rospy.is_shutdown(): - rospy.spin() \ No newline at end of file + rospy.spin() diff --git a/tasks/coffee_shop/scripts/test_make_order.py b/tasks/coffee_shop/scripts/test_make_order.py index 531e18350..7d9493b29 100755 --- a/tasks/coffee_shop/scripts/test_make_order.py +++ b/tasks/coffee_shop/scripts/test_make_order.py @@ -1,24 +1,38 @@ #!/usr/bin/env python3 import smach import rospy -from coffee_shop.phases.phase_2.states import MakeOrder, CheckOrder, LoadOrder, WaitForOrder, GoToCounter +from coffee_shop.phases.phase_2.states import ( + MakeOrder, + CheckOrder, + LoadOrder, + WaitForOrder, + GoToCounter, +) from coffee_shop.context import Context import sys if __name__ == "__main__": rospy.init_node("test_make_order") - sm = smach.StateMachine(outcomes=['end']) + sm = smach.StateMachine(outcomes=["end"]) context = Context(sys.argv[1], sys.argv[2]) context.current_table = "table0" context.tables[context.current_table]["status"] = "currently serving" context.tables[context.current_table]["order"] = ["coffee", "coffee"] with sm: - sm.add('GO_TO_COUNTER', GoToCounter(context), transitions={'done' : 'MAKE_ORDER'}) - sm.add('MAKE_ORDER', MakeOrder(context), transitions={'done' : 'WAIT_FOR_ORDER'}) - sm.add('WAIT_FOR_ORDER', WaitForOrder(context), transitions={'done': 'CHECK_ORDER'}) - sm.add('CHECK_ORDER', CheckOrder(context), transitions={'correct': 'LOAD_ORDER', 'incorrect': 'WAIT_FOR_ORDER'}) - sm.add('LOAD_ORDER', LoadOrder(context), transitions={'done':'end'}) + sm.add( + "GO_TO_COUNTER", GoToCounter(context), transitions={"done": "MAKE_ORDER"} + ) + sm.add("MAKE_ORDER", MakeOrder(context), transitions={"done": "WAIT_FOR_ORDER"}) + sm.add( + "WAIT_FOR_ORDER", WaitForOrder(context), transitions={"done": "CHECK_ORDER"} + ) + sm.add( + "CHECK_ORDER", + CheckOrder(context), + transitions={"correct": "LOAD_ORDER", "incorrect": "WAIT_FOR_ORDER"}, + ) + sm.add("LOAD_ORDER", LoadOrder(context), transitions={"done": "end"}) sm.execute() rospy.signal_shutdown("down") diff --git a/tasks/coffee_shop/scripts/test_people_pose.py b/tasks/coffee_shop/scripts/test_people_pose.py index 717fa3ce4..4e3601704 100755 --- a/tasks/coffee_shop/scripts/test_people_pose.py +++ b/tasks/coffee_shop/scripts/test_people_pose.py @@ -10,6 +10,7 @@ from cv_bridge3 import CvBridge import numpy as np + def create_point_marker(x, y, z, idx): marker_msg = Marker() marker_msg.header.frame_id = "map" @@ -30,12 +31,13 @@ def create_point_marker(x, y, z, idx): marker_msg.color.b = 0.0 return marker_msg + rospy.init_node("test_people_pose") people_pose_pub = rospy.Publisher("/people_poses", Marker, queue_size=100) rospy.wait_for_service("/yolov8/detect", rospy.Duration(15.0)) -yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) +yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) tf = rospy.ServiceProxy("/tf_transform", TfTransform) bridge = CvBridge() @@ -56,8 +58,10 @@ def create_point_marker(x, y, z, idx): tf_req.target_frame = String("map") tf_req.point = centroid centroid_tf = tf(tf_req).target_point.point - people_pose_pub.publish(create_point_marker(centroid_tf.x, centroid_tf.y, centroid_tf.z, idx)) + people_pose_pub.publish( + create_point_marker(centroid_tf.x, centroid_tf.y, centroid_tf.z, idx) + ) idx += 1 prev_xyz = centroid_xyz - rospy.sleep(rospy.Duration(1.0)) \ No newline at end of file + rospy.sleep(rospy.Duration(1.0)) diff --git a/tasks/coffee_shop/scripts/test_people_pose_laser.py b/tasks/coffee_shop/scripts/test_people_pose_laser.py index 8031f3b96..d112e1652 100755 --- a/tasks/coffee_shop/scripts/test_people_pose_laser.py +++ b/tasks/coffee_shop/scripts/test_people_pose_laser.py @@ -12,6 +12,7 @@ import tf2_ros import tf2_geometry_msgs + def create_point_marker(x, y, z, idx): marker_msg = Marker() marker_msg.header.frame_id = "xtion_rgb_optical_frame" @@ -32,15 +33,23 @@ def create_point_marker(x, y, z, idx): marker_msg.color.b = 0.0 return marker_msg + def get_transform(from_frame, to_frame): tf_buffer = tf2_ros.Buffer() tf_listener = tf2_ros.TransformListener(tf_buffer) try: - t = tf_buffer.lookup_transform(to_frame, from_frame, rospy.Time(0), rospy.Duration(5)) + t = tf_buffer.lookup_transform( + to_frame, from_frame, rospy.Time(0), rospy.Duration(5) + ) return t - except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): + except ( + tf2_ros.LookupException, + tf2_ros.ConnectivityException, + tf2_ros.ExtrapolationException, + ): raise + def apply_transform(input_xyz, transform, target="xtion_rgb_optical_frame"): ps = PointStamped() ps.point.x = input_xyz[0] @@ -52,8 +61,9 @@ def apply_transform(input_xyz, transform, target="xtion_rgb_optical_frame"): tr_point = tf2_geometry_msgs.do_transform_point(ps, transform) return (tr_point.point.x, tr_point.point.y, tr_point.point.z) + def get_points_and_pixels_from_laser(msg): - """ Converts a LaserScan message to a collection of points in camera frame, with their corresponding pixel values in a flat array. The method pads out the points to add vertical "pillars" to the point cloud. + """Converts a LaserScan message to a collection of points in camera frame, with their corresponding pixel values in a flat array. The method pads out the points to add vertical "pillars" to the point cloud. Args: msg (LaserScan): ROS Laser Scan message from /scan topic. @@ -62,15 +72,17 @@ def get_points_and_pixels_from_laser(msg): List: A list of pixel values corresponding to the points in the first list. """ pcl_msg = lg.LaserProjection().projectLaser(msg) - pcl_points = [p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True)] + pcl_points = [ + p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True) + ] tr = get_transform("base_laser_link", "xtion_rgb_optical_frame") - + padded_converted_points = [] pixels = [] for point in pcl_points: - for z in np.linspace(0., 2., 5): + for z in np.linspace(0.0, 2.0, 5): p = apply_transform((point[0], point[1], z), tr) - u,v = camera.project3dToPixel(p) + u, v = camera.project3dToPixel(p) if u >= 0 and u < 640 and v >= 0 and v < 480: padded_converted_points.append(p) pixels.append(u) @@ -78,18 +90,20 @@ def get_points_and_pixels_from_laser(msg): return padded_converted_points, pixels -if __name__ == "__main__": +if __name__ == "__main__": rospy.init_node("test_people_pose_laser") people_pose_pub = rospy.Publisher("/people_poses_laser", Marker, queue_size=100) rospy.wait_for_service("/yolov8/detect", rospy.Duration(15.0)) - yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) - shapely = rospy.ServiceProxy('/lasr_shapely/points_in_polygon_2d', PointsInPolygon2D) + yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) + shapely = rospy.ServiceProxy( + "/lasr_shapely/points_in_polygon_2d", PointsInPolygon2D + ) camera = PinholeCameraModel() camera.fromCameraInfo(rospy.wait_for_message("/xtion/rgb/camera_info", CameraInfo)) - + try: corners = rospy.get_param("/wait/cuboid") except KeyError: @@ -105,20 +119,26 @@ def get_points_and_pixels_from_laser(msg): for detection in detections.detected_objects: if detection.name == "person": decision = shapely(detection.xyseg, pixels) - #save index for every true decision + # save index for every true decision idx = [idx for idx, el in enumerate(decision.inside) if el] filtered_points = [points[i] for i in idx] if corners is not None: - waiting_area = shapely(corners, filtered_points) if corners else None + waiting_area = ( + shapely(corners, filtered_points) if corners else None + ) idx = [idx for idx, el in enumerate(waiting_area.inside) if el] points_inside_area = [filtered_points[i] for i in idx] if len(points_inside_area): point = points_inside_area[0] - marker_msg = create_point_marker(point[0], point[1], point[2], i) + marker_msg = create_point_marker( + point[0], point[1], point[2], i + ) people_pose_pub.publish(marker_msg) else: for point in filtered_points: - marker_msg = create_point_marker(point[0], point[1], point[2], i) + marker_msg = create_point_marker( + point[0], point[1], point[2], i + ) people_pose_pub.publish(marker_msg) - - rospy.sleep(rospy.Duration(.1)) \ No newline at end of file + + rospy.sleep(rospy.Duration(0.1)) diff --git a/tasks/coffee_shop/scripts/test_phase3.py b/tasks/coffee_shop/scripts/test_phase3.py index d6be598bd..4d8bfaa92 100755 --- a/tasks/coffee_shop/scripts/test_phase3.py +++ b/tasks/coffee_shop/scripts/test_phase3.py @@ -7,11 +7,11 @@ if __name__ == "__main__": rospy.init_node("test_wait_for_person") - sm = smach.StateMachine(outcomes=['end']) - context = Context(sys.argv[1],True) + sm = smach.StateMachine(outcomes=["end"]) + context = Context(sys.argv[1], True) context.current_table = "table0" context.tables[context.current_table]["status"] = "ready" with sm: - sm.add('PHASE_3', Phase3(context), transitions={'done' : 'end'}) + sm.add("PHASE_3", Phase3(context), transitions={"done": "end"}) sm.execute() diff --git a/tasks/coffee_shop/scripts/test_take_order.py b/tasks/coffee_shop/scripts/test_take_order.py index 3a7932455..83c97eaa9 100755 --- a/tasks/coffee_shop/scripts/test_take_order.py +++ b/tasks/coffee_shop/scripts/test_take_order.py @@ -7,11 +7,18 @@ if __name__ == "__main__": rospy.init_node("test_take_order") - sm = smach.StateMachine(outcomes=['end']) + sm = smach.StateMachine(outcomes=["end"]) context = Context(sys.argv[1], sys.argv[2]) - context.target_object_remappings = {"coffee" : "coffee", "smoothie" : "smoothie", "biscuits" : "biscuits", "orange_juice" : "orange juice", "granola" : "granola", "sandwich": "sandwich"} + context.target_object_remappings = { + "coffee": "coffee", + "smoothie": "smoothie", + "biscuits": "biscuits", + "orange_juice": "orange juice", + "granola": "granola", + "sandwich": "sandwich", + } context.current_table = "table0" context.tables[context.current_table]["people"] = [[0.114, 2.16, 1.8]] with sm: - sm.add('TAKE_ORDER', TakeOrder(context), transitions={'done' : 'end'}) + sm.add("TAKE_ORDER", TakeOrder(context), transitions={"done": "end"}) sm.execute() diff --git a/tasks/coffee_shop/scripts/test_wait_for_person.py b/tasks/coffee_shop/scripts/test_wait_for_person.py index 963ccb8f8..d06d55909 100644 --- a/tasks/coffee_shop/scripts/test_wait_for_person.py +++ b/tasks/coffee_shop/scripts/test_wait_for_person.py @@ -6,11 +6,17 @@ if __name__ == "__main__": rospy.init_node("test_wait_for_person") - sm = smach.StateMachine(outcomes=['end']) + sm = smach.StateMachine(outcomes=["end"]) context = Context() with sm: - sm.add('LOOK_FOR_PERSON', LookForPerson(context), transitions={'found' : 'GO_TO_PERSON', 'not found' : 'LOOK_FOR_PERSON'}) - sm.add('GO_TO_PERSON', GoToPerson(context), transitions={'done' : 'GREET_PERSON'}) - sm.add('GREET_PERSON', GreetPerson(context), transitions={'done' : 'end'}) + sm.add( + "LOOK_FOR_PERSON", + LookForPerson(context), + transitions={"found": "GO_TO_PERSON", "not found": "LOOK_FOR_PERSON"}, + ) + sm.add( + "GO_TO_PERSON", GoToPerson(context), transitions={"done": "GREET_PERSON"} + ) + sm.add("GREET_PERSON", GreetPerson(context), transitions={"done": "end"}) sm.execute() diff --git a/tasks/coffee_shop/setup.py b/tasks/coffee_shop/setup.py index b5dc2d9f9..64b20351a 100644 --- a/tasks/coffee_shop/setup.py +++ b/tasks/coffee_shop/setup.py @@ -3,10 +3,6 @@ from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup -setup_args = generate_distutils_setup( - packages=['coffee_shop'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["coffee_shop"], package_dir={"": "src"}) setup(**setup_args) - diff --git a/tasks/coffee_shop/src/coffee_shop/context.py b/tasks/coffee_shop/src/coffee_shop/context.py index 042b52e99..d4a0ca3a0 100755 --- a/tasks/coffee_shop/src/coffee_shop/context.py +++ b/tasks/coffee_shop/src/coffee_shop/context.py @@ -17,27 +17,29 @@ import random from std_msgs.msg import Empty, Int16 -class Context: +class Context: def __init__(self, config_path=None, tablet=False): - - self.tablet = tablet - + self.base_controller = BaseController() rospy.loginfo("Got base controller") self.head_controller = HeadController() rospy.loginfo("Got head controller") self.voice_controller = Voice() rospy.loginfo("Got voice controller") - self.play_motion_client = actionlib.SimpleActionClient('/play_motion', PlayMotionAction) + self.play_motion_client = actionlib.SimpleActionClient( + "/play_motion", PlayMotionAction + ) self.play_motion_client.wait_for_server() rospy.loginfo("Got PM") - self.point_head_client = actionlib.SimpleActionClient('/head_controller/point_head_action', PointHeadAction) + self.point_head_client = actionlib.SimpleActionClient( + "/head_controller/point_head_action", PointHeadAction + ) self.point_head_client.wait_for_server() rospy.loginfo("Got PH") - rospy.wait_for_service('/yolov8/detect') - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) + rospy.wait_for_service("/yolov8/detect") + self.yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) rospy.loginfo("Got YOLO") rospy.wait_for_service("/tf_transform") rospy.wait_for_service("/get_latest_transform") @@ -51,44 +53,57 @@ def __init__(self, config_path=None, tablet=False): self.bridge = CvBridge() rospy.loginfo("CV Bridge") - self.datahub_ping = rospy.Publisher("/datahub/ping", Empty, queue_size=10) - self.datahub_start_episode = rospy.Publisher("/datahub/start_episode", Empty, queue_size=10) - self.datahub_stop_epsiode = rospy.Publisher("/datahub/stop_episode", Empty, queue_size=10) - self.datahub_start_phase = rospy.Publisher("/datahub/start_phase", Int16, queue_size=10) - self.datahub_stop_phase = rospy.Publisher("/datahub/stop_phase", Int16, queue_size=10) - + self.datahub_start_episode = rospy.Publisher( + "/datahub/start_episode", Empty, queue_size=10 + ) + self.datahub_stop_epsiode = rospy.Publisher( + "/datahub/stop_episode", Empty, queue_size=10 + ) + self.datahub_start_phase = rospy.Publisher( + "/datahub/start_phase", Int16, queue_size=10 + ) + self.datahub_stop_phase = rospy.Publisher( + "/datahub/stop_phase", Int16, queue_size=10 + ) if not tablet: rospy.wait_for_service("/lasr_speech/transcribe_and_parse") - self.speech = rospy.ServiceProxy("/lasr_speech/transcribe_and_parse", Speech) + self.speech = rospy.ServiceProxy( + "/lasr_speech/transcribe_and_parse", Speech + ) else: self.speech = None rospy.loginfo("Speech") - if '/pal_startup_control/start' in rosservice.get_service_list(): + if "/pal_startup_control/start" in rosservice.get_service_list(): # Assume that if the topics are available, then the services are running. try: from pal_startup_msgs.srv import StartupStart, StartupStop - self.start_head_manager = rospy.ServiceProxy("/pal_startup_control/start", StartupStart) - self.stop_head_manager = rospy.ServiceProxy("/pal_startup_control/stop", StartupStop) + + self.start_head_manager = rospy.ServiceProxy( + "/pal_startup_control/start", StartupStart + ) + self.stop_head_manager = rospy.ServiceProxy( + "/pal_startup_control/stop", StartupStop + ) except ModuleNotFoundError: - self.start_head_manager = lambda a, b : None - self.stop_head_manager = lambda a : None + self.start_head_manager = lambda a, b: None + self.stop_head_manager = lambda a: None else: - self.start_head_manager = lambda a, b : None - self.stop_head_manager = lambda a : None + self.start_head_manager = lambda a, b: None + self.stop_head_manager = lambda a: None self.tables = dict() self.target_object_remappings = dict() - if config_path is not None: with open(config_path, "r") as fp: data = yaml.safe_load(fp) self.tables = { - table: {"status" : "unvisited", "people": list(), "order": list()} for table in data.get("tables", dict()).keys() + table: {"status": "unvisited", "people": list(), "order": list()} + for table in data.get("tables", dict()).keys() } self.target_object_remappings = data.get("objects", dict()) @@ -97,22 +112,28 @@ def __init__(self, config_path=None, tablet=False): self.YOLO_objects_model = data.get("yolo_objects_model", "yolov8n-seg.pt") self.YOLO_counter_model = data.get("yolo_counter_model", "MK_COUNTER.pt") - if rosparam.list_params("/mmap"): rosparam.delete_param("mmap") - mmap_dict = {"vo": {"submap_0": dict()}, "numberOfSubMaps" : 1} - rospy.loginfo(f"There are {len(data['tables'].keys())}, should be {len(data['tables'].keys()) + 1} VOs") + mmap_dict = {"vo": {"submap_0": dict()}, "numberOfSubMaps": 1} + rospy.loginfo( + f"There are {len(data['tables'].keys())}, should be {len(data['tables'].keys()) + 1} VOs" + ) count = 0 for i, table in enumerate(data["tables"].keys()): for j, corner in enumerate(data["tables"][table]["objects_cuboid"]): vo = f"vo_00{count}" - mmap_dict["vo"]["submap_0"][vo] = ["submap_0", f"table{i}", *corner, 0.0] - count +=1 + mmap_dict["vo"]["submap_0"][vo] = [ + "submap_0", + f"table{i}", + *corner, + 0.0, + ] + count += 1 for j, corner in enumerate(data["counter"]["cuboid"]): vo = f"vo_00{count}" mmap_dict["vo"]["submap_0"][vo] = ["submap_0", f"counter", *corner, 0.0] - count +=1 + count += 1 rosparam.upload_params("mmap", mmap_dict) else: @@ -132,7 +153,7 @@ def __init__(self, config_path=None, tablet=False): "Can you say that again, please?", "I think my ears need cleaning, could you say that again?", "Please could you repeat that", - "Can you repeat yourself, and possibly speak louder, please?" + "Can you repeat yourself, and possibly speak louder, please?", ] @staticmethod @@ -157,11 +178,19 @@ def _create_point_marker(idx, x, y, z, frame_id, r, g, b): return marker_msg def publish_person_pose(self, x, y, z, frame_id): - self._people_pose_pub.publish(self._create_point_marker(self._people_idx, x, y, z, frame_id, 1.0, 0.0, 0.0)) + self._people_pose_pub.publish( + self._create_point_marker( + self._people_idx, x, y, z, frame_id, 1.0, 0.0, 0.0 + ) + ) self._people_idx += 1 def publish_object_pose(self, x, y, z, frame_id): - self._object_pose_pub.publish(self._create_point_marker(self._objects_idx, x, y, z, frame_id, 0.0, 1.0, 0.0)) + self._object_pose_pub.publish( + self._create_point_marker( + self._objects_idx, x, y, z, frame_id, 0.0, 1.0, 0.0 + ) + ) self._objects_idx += 1 def __str__(self): @@ -170,7 +199,9 @@ def __str__(self): status = table_info["status"] people = table_info["people"] order = table_info["order"] - table_summary.append(f"Table {table}: Status={status}, People={people}, Order={order}") + table_summary.append( + f"Table {table}: Status={status}, People={people}, Order={order}" + ) return "\n".join(table_summary) diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/phase_1.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/phase_1.py index cb5d5c037..e57019c1d 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/phase_1.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/phase_1.py @@ -2,13 +2,22 @@ from .states import Start, GoToTable, CheckTable, GoToPreTable, PreCheckTable + class Phase1(smach.StateMachine): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) + smach.StateMachine.__init__(self, outcomes=["done"]) with self: - smach.StateMachine.add('START_PHASE_1', Start(context), transitions={'done' : 'GO_TO_TABLE'}) - # smach.StateMachine.add('GO_TO_TABLE', GoToPreTable(context), transitions={'done' : 'GO_TO_TABLE'}) - #smach.StateMachine.add('PRE_CHECK_TABLE', PreCheckTable(context), transitions={'done' : 'GO_TO_TABLE'}) - smach.StateMachine.add('GO_TO_TABLE', GoToTable(context), transitions={'done' : 'CHECK_TABLE'}) - smach.StateMachine.add('CHECK_TABLE', CheckTable(context), transitions={'not_finished' : 'GO_TO_TABLE', 'finished' : 'done'}) + smach.StateMachine.add( + "START_PHASE_1", Start(context), transitions={"done": "GO_TO_TABLE"} + ) + # smach.StateMachine.add('GO_TO_TABLE', GoToPreTable(context), transitions={'done' : 'GO_TO_TABLE'}) + # smach.StateMachine.add('PRE_CHECK_TABLE', PreCheckTable(context), transitions={'done' : 'GO_TO_TABLE'}) + smach.StateMachine.add( + "GO_TO_TABLE", GoToTable(context), transitions={"done": "CHECK_TABLE"} + ) + smach.StateMachine.add( + "CHECK_TABLE", + CheckTable(context), + transitions={"not_finished": "GO_TO_TABLE", "finished": "done"}, + ) diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/__init__.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/__init__.py index 581bbebcb..2c362473b 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/__init__.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/__init__.py @@ -2,4 +2,4 @@ from .check_table import CheckTable from .go_to_table import GoToTable from .go_to_pre_table import GoToPreTable -from .pre_check_table import PreCheckTable \ No newline at end of file +from .pre_check_table import PreCheckTable diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/check_table.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/check_table.py index e3087b74b..6f3a4daeb 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/check_table.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/check_table.py @@ -10,9 +10,10 @@ from coffee_shop.srv import TfTransform, TfTransformRequest import numpy as np + class CheckTable(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['not_finished', 'finished']) + smach.State.__init__(self, outcomes=["not_finished", "finished"]) self.context = context self.detections_objects = [] self.detections_people = [] @@ -27,7 +28,13 @@ def estimate_pose(self, pcl_msg, detection): tf_req.target_frame = String("map") tf_req.point = centroid response = self.context.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) + return np.array( + [ + response.target_point.point.x, + response.target_point.point.y, + response.target_point.point.z, + ] + ) def publish_object_points(self): for _, point in self.detections_objects: @@ -41,7 +48,9 @@ def filter_detections_by_pose(self, detections, threshold=0.2): filtered = [] for i, (detection, point) in enumerate(detections): - distances = np.array([np.sqrt(np.sum((point - ref_point) ** 2)) for _, ref_point in filtered]) + distances = np.array( + [np.sqrt(np.sum((point - ref_point) ** 2)) for _, ref_point in filtered] + ) if not np.any(distances < threshold): filtered.append((detection, point)) @@ -51,13 +60,21 @@ def perform_detection(self, pcl_msg, polygon, filter, model): cv_im = pcl_msg_to_cv2(pcl_msg) img_msg = self.context.bridge.cv2_to_imgmsg(cv_im) detections = self.context.yolo(img_msg, model, 0.6, 0.3) - detections = [(det, self.estimate_pose(pcl_msg, det)) for det in detections.detected_objects if det.name in filter] + detections = [ + (det, self.estimate_pose(pcl_msg, det)) + for det in detections.detected_objects + if det.name in filter + ] rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") rospy.loginfo(f"Boundary: {polygon}") - #satisfied_points = self.context.shapely.are_points_in_polygon_2d(self.arena_polygon, [[pose[0], pose[1]] for (_, pose) in detections]).inside - #detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] - satisfied_points = self.context.shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in detections]).inside - detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] + # satisfied_points = self.context.shapely.are_points_in_polygon_2d(self.arena_polygon, [[pose[0], pose[1]] for (_, pose) in detections]).inside + # detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] + satisfied_points = self.context.shapely.are_points_in_polygon_2d( + polygon, [[pose[0], pose[1]] for (_, pose) in detections] + ).inside + detections = [ + detections[i] for i in range(0, len(detections)) if satisfied_points[i] + ] rospy.loginfo(f"Filtered: {[(det.name, pose) for det, pose in detections]}") return detections @@ -69,11 +86,18 @@ def check_table(self, pcl_msg): target_objects = list(self.context.target_object_remappings.keys()) if not self.biscuits and "biscuits" in target_objects: target_objects.remove("biscuits") - detections_objects_ = self.perform_detection(pcl_msg, self.object_polygon, target_objects, self.context.YOLO_objects_model) + detections_objects_ = self.perform_detection( + pcl_msg, + self.object_polygon, + target_objects, + self.context.YOLO_objects_model, + ) self.detections_objects.extend(detections_objects_) def check_people(self, pcl_msg): - detections_people_ = self.perform_detection(pcl_msg, self.person_polygon, ["person"], self.context.YOLO_person_model) + detections_people_ = self.perform_detection( + pcl_msg, self.person_polygon, ["person"], self.context.YOLO_person_model + ) self.detections_people.extend(detections_people_) def execute(self, userdata): @@ -84,34 +108,51 @@ def execute(self, userdata): self.people_debug_images = [] rospy.loginfo(self.context.current_table) - self.object_polygon = rospy.get_param(f"/tables/{self.context.current_table}/objects_cuboid") - self.person_polygon = rospy.get_param(f"/tables/{self.context.current_table}/persons_cuboid") - #self.arena_polygon = rospy.get_param(f"/arena/cuboid") + self.object_polygon = rospy.get_param( + f"/tables/{self.context.current_table}/objects_cuboid" + ) + self.person_polygon = rospy.get_param( + f"/tables/{self.context.current_table}/persons_cuboid" + ) + # self.arena_polygon = rospy.get_param(f"/arena/cuboid") self.detections_objects = [] self.detections_people = [] + motions = [ + "back_to_default", + "check_table", + "check_table_low", + "look_left", + "look_right", + "back_to_default", + ] + # self.detection_sub = rospy.Subscriber("/xtion/depth_registered/points", PointCloud2, self.check) - motions = ["back_to_default", "check_table", "check_table_low", "look_left", "look_right", "back_to_default"] - #self.detection_sub = rospy.Subscriber("/xtion/depth_registered/points", PointCloud2, self.check) - if self.context.current_table == "table2": pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pm_goal = PlayMotionGoal(motion_name="check_annoying_table_low", skip_planning=True) + pm_goal = PlayMotionGoal( + motion_name="check_annoying_table_low", skip_planning=True + ) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) self.check_table(pcl_msg) - pm_goal = PlayMotionGoal(motion_name="look_left", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) self.check_people(pcl_msg) pm_goal = PlayMotionGoal(motion_name="look_right", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) self.check_people(pcl_msg) pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) @@ -121,7 +162,9 @@ def execute(self, userdata): for motion in motions: pm_goal = PlayMotionGoal(motion_name=motion, skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) self.check(pcl_msg) status = "unknown" @@ -134,16 +177,20 @@ def execute(self, userdata): elif len(self.detections_objects) == 0 and len(self.detections_people) == 0: status = "ready" - #self.detection_sub.unregister() - - #self.detections_objects = self.filter_detections_by_pose(self.detections_objects, threshold=0.1) - self.detections_people = self.filter_detections_by_pose(self.detections_people, threshold=0.6) + # self.detection_sub.unregister() + + # self.detections_objects = self.filter_detections_by_pose(self.detections_objects, threshold=0.1) + self.detections_people = self.filter_detections_by_pose( + self.detections_people, threshold=0.6 + ) self.publish_object_points() self.publish_people_points() self.context.tables[self.context.current_table]["status"] = status - self.context.tables[self.context.current_table]["people"] = [pose for _, pose in self.detections_people] + self.context.tables[self.context.current_table]["people"] = [ + pose for _, pose in self.detections_people + ] people_count = len(self.detections_people) people_text = "person" if people_count == 1 else "people" @@ -151,5 +198,15 @@ def execute(self, userdata): count_text = f"There {'is' if people_count == 1 else 'are'} {people_count} {people_text}." self.context.voice_controller.sync_tts(f"{status_text} {count_text}") - self.context.start_head_manager("head_manager", '') - return 'not_finished' if len([(label, table) for label, table in self.context.tables.items() if table["status"] == "unvisited"]) else 'finished' + self.context.start_head_manager("head_manager", "") + return ( + "not_finished" + if len( + [ + (label, table) + for label, table in self.context.tables.items() + if table["status"] == "unvisited" + ] + ) + else "finished" + ) diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_pre_table.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_pre_table.py index 426fbe22b..d556d4269 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_pre_table.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_pre_table.py @@ -4,19 +4,37 @@ from geometry_msgs.msg import Pose, Point, Quaternion import numpy as np + class GoToPreTable(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): robot_x, robot_y = self.context.base_controller.get_pose() - unvisited = [(label, rospy.get_param(f"/tables/{label}")) for label, table in self.context.tables.items() if table["status"] == "unvisited"] - closest_table = min(unvisited, key=lambda table: np.linalg.norm([table[1]["pre_location"]["position"]["x"] - robot_x, table[1]["pre_location"]["position"]["y"] - robot_y])) + unvisited = [ + (label, rospy.get_param(f"/tables/{label}")) + for label, table in self.context.tables.items() + if table["status"] == "unvisited" + ] + closest_table = min( + unvisited, + key=lambda table: np.linalg.norm( + [ + table[1]["pre_location"]["position"]["x"] - robot_x, + table[1]["pre_location"]["position"]["y"] - robot_y, + ] + ), + ) label, next_table = closest_table self.context.voice_controller.async_tts(f"I am going to {label}") - position, orientation = next_table["pre_location"]["position"], next_table["pre_location"]["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + position, orientation = ( + next_table["pre_location"]["position"], + next_table["pre_location"]["orientation"], + ) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) self.context.tables[label]["status"] = "currently visiting" self.context.current_table = label - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_table.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_table.py index c06784ca9..ac070130b 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_table.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/go_to_table.py @@ -4,19 +4,37 @@ from geometry_msgs.msg import Pose, Point, Quaternion import numpy as np + class GoToTable(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): robot_x, robot_y = self.context.base_controller.get_pose() - unvisited = [(label, rospy.get_param(f"/tables/{label}")) for label, table in self.context.tables.items() if table["status"] == "unvisited"] - closest_table = min(unvisited, key=lambda table: np.linalg.norm([table[1]["location"]["position"]["x"] - robot_x, table[1]["location"]["position"]["y"] - robot_y])) + unvisited = [ + (label, rospy.get_param(f"/tables/{label}")) + for label, table in self.context.tables.items() + if table["status"] == "unvisited" + ] + closest_table = min( + unvisited, + key=lambda table: np.linalg.norm( + [ + table[1]["location"]["position"]["x"] - robot_x, + table[1]["location"]["position"]["y"] - robot_y, + ] + ), + ) label, next_table = closest_table self.context.voice_controller.async_tts(f"I am going to {label}") - position, orientation = next_table["location"]["position"], next_table["location"]["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + position, orientation = ( + next_table["location"]["position"], + next_table["location"]["orientation"], + ) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) self.context.tables[label]["status"] = "currently visiting" self.context.current_table = label - return 'done' \ No newline at end of file + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/pre_check_table.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/pre_check_table.py index b59b6bca1..e2ff505fa 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/pre_check_table.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/pre_check_table.py @@ -10,9 +10,10 @@ from coffee_shop.srv import TfTransform, TfTransformRequest import numpy as np + class PreCheckTable(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context self.detections_people = [] @@ -25,7 +26,13 @@ def estimate_pose(self, pcl_msg, detection): tf_req.target_frame = String("map") tf_req.point = centroid response = self.context.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) + return np.array( + [ + response.target_point.point.x, + response.target_point.point.y, + response.target_point.point.z, + ] + ) def publish_object_points(self): for _, point in self.detections_objects: @@ -39,7 +46,9 @@ def filter_detections_by_pose(self, detections, threshold=0.2): filtered = [] for i, (detection, point) in enumerate(detections): - distances = np.array([np.sqrt(np.sum((point - ref_point) ** 2)) for _, ref_point in filtered]) + distances = np.array( + [np.sqrt(np.sum((point - ref_point) ** 2)) for _, ref_point in filtered] + ) if not np.any(distances < threshold): filtered.append((detection, point)) @@ -49,11 +58,19 @@ def perform_detection(self, pcl_msg, polygon, filter, model): cv_im = pcl_msg_to_cv2(pcl_msg) img_msg = self.context.bridge.cv2_to_imgmsg(cv_im) detections = self.context.yolo(img_msg, model, 0.5, 0.3) - detections = [(det, self.estimate_pose(pcl_msg, det)) for det in detections.detected_objects if det.name in filter] + detections = [ + (det, self.estimate_pose(pcl_msg, det)) + for det in detections.detected_objects + if det.name in filter + ] rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") rospy.loginfo(f"Boundary: {polygon}") - satisfied_points = self.context.shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in detections]).inside - detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] + satisfied_points = self.context.shapely.are_points_in_polygon_2d( + polygon, [[pose[0], pose[1]] for (_, pose) in detections] + ).inside + detections = [ + detections[i] for i in range(0, len(detections)) if satisfied_points[i] + ] rospy.loginfo(f"Filtered: {[(det.name, pose) for det, pose in detections]}") return detections @@ -61,7 +78,9 @@ def check(self, pcl_msg): self.check_people(pcl_msg) def check_people(self, pcl_msg): - detections_people_ = self.perform_detection(pcl_msg, self.person_polygon, ["person"], self.context.YOLO_person_model) + detections_people_ = self.perform_detection( + pcl_msg, self.person_polygon, ["person"], self.context.YOLO_person_model + ) self.detections_people.extend(detections_people_) def execute(self, userdata): @@ -70,25 +89,34 @@ def execute(self, userdata): self.context.voice_controller.async_tts("I'm taking a quick look at the table") rospy.loginfo(self.context.current_table) - self.person_polygon = rospy.get_param(f"/tables/{self.context.current_table}/persons_cuboid") + self.person_polygon = rospy.get_param( + f"/tables/{self.context.current_table}/persons_cuboid" + ) self.detections_people = [] motions = ["back_to_default", "look_left", "look_right", "back_to_default"] - #self.detection_sub = rospy.Subscriber("/xtion/depth_registered/points", PointCloud2, self.check) + # self.detection_sub = rospy.Subscriber("/xtion/depth_registered/points", PointCloud2, self.check) for motion in motions: pm_goal = PlayMotionGoal(motion_name=motion, skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) self.check(pcl_msg) - self.detections_people = self.filter_detections_by_pose(self.detections_people, threshold=0.50) - + self.detections_people = self.filter_detections_by_pose( + self.detections_people, threshold=0.50 + ) people_count = len(self.detections_people) people_text = "person" if people_count == 1 else "people" - self.context.voice_controller.async_tts(f"I saw {people_count} {people_text} so far") + self.context.voice_controller.async_tts( + f"I saw {people_count} {people_text} so far" + ) - self.context.tables[self.context.current_table]["pre_people"] = self.detections_people - self.context.start_head_manager("head_manager", '') - return 'done' \ No newline at end of file + self.context.tables[self.context.current_table][ + "pre_people" + ] = self.detections_people + self.context.start_head_manager("head_manager", "") + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/start.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/start.py index a39f5e048..41178e7a2 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/start.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_1/states/start.py @@ -2,12 +2,14 @@ import rospy from std_msgs.msg import Int16 + class Start(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context + def execute(self, userdata): self.context.voice_controller.sync_tts("Starting Phase 1.") self.context.datahub_start_phase.publish(Int16(1)) rospy.loginfo(f"Context: {str(self.context)}") - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/phase_2.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/phase_2.py index dcfcfcbfd..0832c0b24 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/phase_2.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/phase_2.py @@ -1,17 +1,54 @@ import smach -from .states import TakeOrder, MakeOrder, CheckOrder, GoToTable, DeliverOrder, GoToCounter, Start, LoadOrder, WaitForOrder +from .states import ( + TakeOrder, + MakeOrder, + CheckOrder, + GoToTable, + DeliverOrder, + GoToCounter, + Start, + LoadOrder, + WaitForOrder, +) + class Phase2(smach.StateMachine): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) - + smach.StateMachine.__init__(self, outcomes=["done"]) + with self: - smach.StateMachine.add('START_PHASE_2', Start(context), transitions={'done' : 'GO_TO_TABLE'}) - smach.StateMachine.add('GO_TO_TABLE', GoToTable(context), transitions={'done' : 'TAKE_ORDER', 'skip' : 'done'}) - smach.StateMachine.add('TAKE_ORDER', TakeOrder(context), transitions={'done' : 'GO_TO_COUNTER'}) - smach.StateMachine.add('GO_TO_COUNTER', GoToCounter(context), transitions={'done' : 'MAKE_ORDER'}) - smach.StateMachine.add('MAKE_ORDER', MakeOrder(context), transitions={'done' : 'WAIT_FOR_ORDER'}) - smach.StateMachine.add('WAIT_FOR_ORDER', WaitForOrder(context), transitions={'done' : 'CHECK_ORDER'}) - smach.StateMachine.add('CHECK_ORDER', CheckOrder(context), transitions={'correct' : 'LOAD_ORDER', 'incorrect' : 'WAIT_FOR_ORDER'}) - smach.StateMachine.add('LOAD_ORDER', LoadOrder(context), transitions={'done' : 'DELIVER_ORDER'}) - smach.StateMachine.add('DELIVER_ORDER', DeliverOrder(context), transitions={'done' : 'done'}) + smach.StateMachine.add( + "START_PHASE_2", Start(context), transitions={"done": "GO_TO_TABLE"} + ) + smach.StateMachine.add( + "GO_TO_TABLE", + GoToTable(context), + transitions={"done": "TAKE_ORDER", "skip": "done"}, + ) + smach.StateMachine.add( + "TAKE_ORDER", TakeOrder(context), transitions={"done": "GO_TO_COUNTER"} + ) + smach.StateMachine.add( + "GO_TO_COUNTER", + GoToCounter(context), + transitions={"done": "MAKE_ORDER"}, + ) + smach.StateMachine.add( + "MAKE_ORDER", MakeOrder(context), transitions={"done": "WAIT_FOR_ORDER"} + ) + smach.StateMachine.add( + "WAIT_FOR_ORDER", + WaitForOrder(context), + transitions={"done": "CHECK_ORDER"}, + ) + smach.StateMachine.add( + "CHECK_ORDER", + CheckOrder(context), + transitions={"correct": "LOAD_ORDER", "incorrect": "WAIT_FOR_ORDER"}, + ) + smach.StateMachine.add( + "LOAD_ORDER", LoadOrder(context), transitions={"done": "DELIVER_ORDER"} + ) + smach.StateMachine.add( + "DELIVER_ORDER", DeliverOrder(context), transitions={"done": "done"} + ) diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/check_order.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/check_order.py index 45e99f893..425ef3166 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/check_order.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/check_order.py @@ -15,7 +15,7 @@ class CheckOrder(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['correct', 'incorrect']) + smach.State.__init__(self, outcomes=["correct", "incorrect"]) self.context = context self.n_checks = 0 @@ -28,13 +28,20 @@ def estimate_pose(self, pcl_msg, detection): tf_req.target_frame = String("map") tf_req.point = centroid response = self.context.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) + return np.array( + [ + response.target_point.point.x, + response.target_point.point.y, + response.target_point.point.z, + ] + ) def execute(self, userdata): - if self.n_checks == 3: - self.context.voice_controller.sync_tts("I think I have something in my eyes, I'm struggling to check the order. I trust you that the order is correct!") - return 'correct' + self.context.voice_controller.sync_tts( + "I think I have something in my eyes, I'm struggling to check the order. I trust you that the order is correct!" + ) + return "correct" self.n_checks += 1 @@ -59,10 +66,20 @@ def execute(self, userdata): pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) cv_im = pcl_msg_to_cv2(pcl_msg) img_msg = self.context.bridge.cv2_to_imgmsg(cv_im) - detections = self.context.yolo(img_msg, self.context.YOLO_counter_model, 0.6, 0.3) - detections = [(det, self.estimate_pose(pcl_msg, det)) for det in detections.detected_objects if det.name in self.context.target_object_remappings.keys()] - satisfied_points = self.context.shapely.are_points_in_polygon_2d(counter_corners, [[pose[0], pose[1]] for (_, pose) in detections]).inside - given_order = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] + detections = self.context.yolo( + img_msg, self.context.YOLO_counter_model, 0.6, 0.3 + ) + detections = [ + (det, self.estimate_pose(pcl_msg, det)) + for det in detections.detected_objects + if det.name in self.context.target_object_remappings.keys() + ] + satisfied_points = self.context.shapely.are_points_in_polygon_2d( + counter_corners, [[pose[0], pose[1]] for (_, pose) in detections] + ).inside + given_order = [ + detections[i] for i in range(0, len(detections)) if satisfied_points[i] + ] rospy.loginfo(detections) for _, pose in detections: self.context.publish_object_pose(*pose, "map") @@ -71,18 +88,34 @@ def execute(self, userdata): given_order[:] = [x if x != "biscuits" else "granola" for x in given_order] if sorted(order) == sorted(given_order): - return 'correct' + return "correct" missing_items = list((Counter(order) - Counter(given_order)).elements()) - missing_items_string = ', '.join([f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" for item, count in Counter(missing_items).items()]).replace(', ', ', and ', len(missing_items) - 2) + missing_items_string = ", ".join( + [ + f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" + for item, count in Counter(missing_items).items() + ] + ).replace(", ", ", and ", len(missing_items) - 2) invalid_items = list((Counter(given_order) - Counter(order)).elements()) - invalid_items_string = ', '.join([f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" for item, count in Counter(invalid_items).items()]).replace(', ', ', and ', len(invalid_items) - 2) + invalid_items_string = ", ".join( + [ + f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" + for item, count in Counter(invalid_items).items() + ] + ).replace(", ", ", and ", len(invalid_items) - 2) rospy.loginfo(f"Order: {order}, Given order: {given_order}") if not len(invalid_items): - self.context.voice_controller.sync_tts(f"You didn't give me {missing_items_string} which I asked for. Please correct the order.") + self.context.voice_controller.sync_tts( + f"You didn't give me {missing_items_string} which I asked for. Please correct the order." + ) elif not len(missing_items): - self.context.voice_controller.sync_tts(f"You have given me {invalid_items_string} which I didn't ask for. Please correct the order.") + self.context.voice_controller.sync_tts( + f"You have given me {invalid_items_string} which I didn't ask for. Please correct the order." + ) else: - self.context.voice_controller.sync_tts(f"You have given me {invalid_items_string} which I didn't ask for, and didn't give me {missing_items_string} which I asked for. Please correct the order.") - return 'incorrect' + self.context.voice_controller.sync_tts( + f"You have given me {invalid_items_string} which I didn't ask for, and didn't give me {missing_items_string} which I asked for. Please correct the order." + ) + return "incorrect" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/deliver_order.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/deliver_order.py index 8e0bda918..3e9695af6 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/deliver_order.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/deliver_order.py @@ -6,9 +6,10 @@ from play_motion_msgs.msg import PlayMotionGoal import json + class DeliverOrder(smach.State): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) + smach.StateMachine.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): @@ -16,14 +17,18 @@ def execute(self, userdata): location = rospy.get_param(f"/tables/{self.context.current_table}/location") position = location["position"] orientation = location["orientation"] - #self.context.base_controller.sync_to_radius(self.context.tables[self.context.current_table]["people"][0][0], self.context.tables[self.context.current_table]["people"][0][1], 0.5) - #self.context.base_controller.sync_face_to(self.context.tables[self.context.current_table]["people"][0][0], self.context.tables[self.context.current_table]["people"][0][1]) - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + # self.context.base_controller.sync_to_radius(self.context.tables[self.context.current_table]["people"][0][0], self.context.tables[self.context.current_table]["people"][0][1], 0.5) + # self.context.base_controller.sync_face_to(self.context.tables[self.context.current_table]["people"][0][0], self.context.tables[self.context.current_table]["people"][0][1]) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) self.context.base_controller.rotate(np.pi) pm_goal = PlayMotionGoal(motion_name="load_unload", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - self.context.voice_controller.async_tts("I'll give you some time to unload the order...") + self.context.voice_controller.async_tts( + "I'll give you some time to unload the order..." + ) rospy.sleep(rospy.Duration(10.0)) pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_counter.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_counter.py index 89b816058..b51534942 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_counter.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_counter.py @@ -3,15 +3,19 @@ import rospy from geometry_msgs.msg import Pose, Point, Quaternion + class GoToCounter(smach.State): - def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): - self.context.voice_controller.async_tts("I am going to the counter to retrieve the order") + self.context.voice_controller.async_tts( + "I am going to the counter to retrieve the order" + ) position = rospy.get_param("counter/location/position") orientation = rospy.get_param("counter/location/orientation") - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) - return 'done' \ No newline at end of file + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_table.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_table.py index 4db3ff4cf..339d72be0 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_table.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/go_to_table.py @@ -4,25 +4,43 @@ from geometry_msgs.msg import Pose, Point, Quaternion import numpy as np + class GoToTable(smach.State): - def __init__(self, context): - smach.State.__init__(self, outcomes=['done', 'skip']) + smach.State.__init__(self, outcomes=["done", "skip"]) self.context = context def execute(self, userdata): robot_x, robot_y = self.context.base_controller.get_pose() - tables_need_serving = [(label, rospy.get_param(f"/tables/{label}")) for label, table in self.context.tables.items() if table["status"] == "needs serving"] + tables_need_serving = [ + (label, rospy.get_param(f"/tables/{label}")) + for label, table in self.context.tables.items() + if table["status"] == "needs serving" + ] if not tables_need_serving: - return 'skip' - closest_table = min(tables_need_serving, key=lambda table: np.linalg.norm([table[1]["location"]["position"]["x"] - robot_x, table[1]["location"]["position"]["y"] - robot_y])) + return "skip" + closest_table = min( + tables_need_serving, + key=lambda table: np.linalg.norm( + [ + table[1]["location"]["position"]["x"] - robot_x, + table[1]["location"]["position"]["y"] - robot_y, + ] + ), + ) label, table = closest_table if label == self.context.current_table: - self.context.voice_controller.sync_tts("Lucky me, I am already at the table which needs serving!") + self.context.voice_controller.sync_tts( + "Lucky me, I am already at the table which needs serving!" + ) else: - self.context.voice_controller.sync_tts(f"I am going to {label}, which needs serving") + self.context.voice_controller.sync_tts( + f"I am going to {label}, which needs serving" + ) position = table["location"]["position"] orientation = table["location"]["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) self.context.current_table = label - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/load_order.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/load_order.py index 2bbaf0e91..13ac0961d 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/load_order.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/load_order.py @@ -5,18 +5,22 @@ import json import rospy -class LoadOrder(smach.State): +class LoadOrder(smach.State): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) + smach.StateMachine.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): - self.context.voice_controller.sync_tts("That's the right order, I'll turn around so that you can load it") + self.context.voice_controller.sync_tts( + "That's the right order, I'll turn around so that you can load it" + ) self.context.base_controller.rotate(np.pi) pm_goal = PlayMotionGoal(motion_name="load_unload", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - self.context.voice_controller.sync_tts("I'll give you some time to load the order...") + self.context.voice_controller.sync_tts( + "I'll give you some time to load the order..." + ) rospy.sleep(rospy.Duration(10.0)) - self.context.start_head_manager("head_manager", '') - return 'done' + self.context.start_head_manager("head_manager", "") + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/make_order.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/make_order.py index 75a1c736c..d0661e7d4 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/make_order.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/make_order.py @@ -5,16 +5,23 @@ from play_motion_msgs.msg import PlayMotionGoal from std_msgs.msg import String -class MakeOrder(smach.State): +class MakeOrder(smach.State): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) + smach.StateMachine.__init__(self, outcomes=["done"]) self.context = context - self.tablet_pub = rospy.Publisher("/tablet/screen", String, queue_size=10) + self.tablet_pub = rospy.Publisher("/tablet/screen", String, queue_size=10) def execute(self, userdata): self.context.stop_head_manager("head_manager") order = self.context.tables[self.context.current_table]["order"] - order_string = ', '.join([f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" for item, count in Counter(order).items()]).replace(', ', ', and ', len(order)-2) - self.context.voice_controller.sync_tts(f"Please get me {order_string}. Make sure you place the items in clear view of the robot.") - return 'done' + order_string = ", ".join( + [ + f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" + for item, count in Counter(order).items() + ] + ).replace(", ", ", and ", len(order) - 2) + self.context.voice_controller.sync_tts( + f"Please get me {order_string}. Make sure you place the items in clear view of the robot." + ) + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/start.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/start.py index 90988f7f4..2152cb6a6 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/start.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/start.py @@ -2,13 +2,15 @@ import rospy from std_msgs.msg import Int16 + class Start(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context + def execute(self, userdata): self.context.datahub_stop_phase.publish(Int16(1)) self.context.voice_controller.sync_tts("Starting Phase 2.") self.context.datahub_start_phase.publish(Int16(2)) rospy.loginfo(f"Context: {str(self.context)}") - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/take_order.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/take_order.py index 04b3aacc2..9fcac428a 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/take_order.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/take_order.py @@ -10,17 +10,19 @@ from coffee_shop_ui.msg import Order from std_msgs.msg import String + class TakeOrder(smach.State): - def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context - self.tablet_pub = rospy.Publisher("/tablet/screen", String, queue_size=10) + self.tablet_pub = rospy.Publisher("/tablet/screen", String, queue_size=10) def listen(self): resp = self.context.speech(True) if not resp.success: - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.listen() resp = json.loads(resp.json_response) rospy.loginfo(resp) @@ -30,12 +32,16 @@ def get_order(self): resp = self.listen() if resp["intent"]["name"] != "make_order": rospy.logwarn("The intent was wrong") - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.get_order() items = resp["entities"].get("item", []) if not items: rospy.logwarn("There were no items") - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.get_order() quantities = resp["entities"].get("CARDINAL", []) quantified_items = [] @@ -54,21 +60,27 @@ def get_order(self): items = [] for quantity, item in quantified_items: if item not in self.context.target_object_remappings.keys(): - matches = difflib.get_close_matches(item.lower(), self.context.target_object_remappings.keys()) + matches = difflib.get_close_matches( + item.lower(), self.context.target_object_remappings.keys() + ) if matches: item = matches[0] else: continue - items.extend([item.lower()]*quantity) + items.extend([item.lower()] * quantity) if not items: - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.get_order() return items def affirm(self): resp = self.listen() if resp["intent"]["name"] not in ["affirm", "deny"]: - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.affirm() return resp["intent"]["name"] == "affirm" @@ -76,53 +88,85 @@ def execute(self, userdata): self.context.stop_head_manager("head_manager") if self.context.tablet: - self.context.voice_controller.sync_tts("Hello, I'm TIAGo, I'll be serving you today. Please use the tablet to make your order.") + self.context.voice_controller.sync_tts( + "Hello, I'm TIAGo, I'll be serving you today. Please use the tablet to make your order." + ) pm_goal = PlayMotionGoal(motion_name="tablet", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) self.tablet_pub.publish(String("order")) - order = rospy.wait_for_message("/tablet/order", Order).products + order = rospy.wait_for_message("/tablet/order", Order).products else: ph_goal = PointHeadGoal() ph_goal.max_velocity = 1.0 - ph_goal.pointing_frame = 'head_2_link' + ph_goal.pointing_frame = "head_2_link" ph_goal.pointing_axis = Point(1.0, 0.0, 0.0) - ph_goal.target.header.frame_id = 'map' + ph_goal.target.header.frame_id = "map" ph_goal.target.point = Point(*self.context.get_interaction_person()) pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) if len(self.context.tables[self.context.current_table]["people"]) == 1: self.context.point_head_client.send_goal_and_wait(ph_goal) - self.context.voice_controller.sync_tts("Hello, I'm TIAGo, I'll be serving you today.") - self.context.voice_controller.sync_tts("Please state your order after the beep - this indicates that I am listening.") + self.context.voice_controller.sync_tts( + "Hello, I'm TIAGo, I'll be serving you today." + ) + self.context.voice_controller.sync_tts( + "Please state your order after the beep - this indicates that I am listening." + ) elif len(self.context.tables[self.context.current_table]["people"]) == 2: - self.context.voice_controller.sync_tts("Greetings to both of you, I'm TIAGo, I'll be serving you today.") + self.context.voice_controller.sync_tts( + "Greetings to both of you, I'm TIAGo, I'll be serving you today." + ) self.context.point_head_client.send_goal_and_wait(ph_goal) - self.context.voice_controller.sync_tts("I choose you to be the one in charge.") - self.context.voice_controller.sync_tts("Please state the order for the two of you after the beep - this indicates that I am listening.") + self.context.voice_controller.sync_tts( + "I choose you to be the one in charge." + ) + self.context.voice_controller.sync_tts( + "Please state the order for the two of you after the beep - this indicates that I am listening." + ) else: - self.context.voice_controller.sync_tts("Salutations to all of you, I'm TIAGo, I'll be serving you today.") + self.context.voice_controller.sync_tts( + "Salutations to all of you, I'm TIAGo, I'll be serving you today." + ) self.context.point_head_client.send_goal_and_wait(ph_goal) - self.context.voice_controller.sync_tts("I choose you to be the one in charge.") - self.context.voice_controller.sync_tts("Please state the order for the group after the beep - this indicates that I am listening.") + self.context.voice_controller.sync_tts( + "I choose you to be the one in charge." + ) + self.context.voice_controller.sync_tts( + "Please state the order for the group after the beep - this indicates that I am listening." + ) order = [] while True: order.extend(self.get_order()) - items_string = ', '.join([f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" for item, count in Counter(order).items()]).replace(', ', ', and ', len(order)-2) + items_string = ", ".join( + [ + f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" + for item, count in Counter(order).items() + ] + ).replace(", ", ", and ", len(order) - 2) - self.context.voice_controller.sync_tts(f"You asked for {items_string} so far, can I get you anything else? Please answer yes or no after the beep.") + self.context.voice_controller.sync_tts( + f"You asked for {items_string} so far, can I get you anything else? Please answer yes or no after the beep." + ) if self.affirm(): - self.context.voice_controller.sync_tts("Okay, please state the additional items after the beep.") + self.context.voice_controller.sync_tts( + "Okay, please state the additional items after the beep." + ) else: break - order_string = ', '.join([f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" for item, count in Counter(order).items()]).replace(', ', ', and ', len(order)-2) + order_string = ", ".join( + [ + f"{count} {self.context.target_object_remappings[item] if count == 1 else self.context.target_object_remappings[item]+'s'}" + for item, count in Counter(order).items() + ] + ).replace(", ", ", and ", len(order) - 2) self.context.voice_controller.sync_tts(f"Your order is {order_string}") self.context.tables[self.context.current_table]["order"] = order pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - self.context.start_head_manager("head_manager", '') - return 'done' + self.context.start_head_manager("head_manager", "") + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/wait_for_order.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/wait_for_order.py index dc2359404..b040641e1 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/wait_for_order.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_2/states/wait_for_order.py @@ -6,17 +6,19 @@ import difflib from std_msgs.msg import Empty, String -class WaitForOrder(smach.State): +class WaitForOrder(smach.State): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) + smach.StateMachine.__init__(self, outcomes=["done"]) self.context = context - self.tablet_pub = rospy.Publisher("/tablet/screen", String, queue_size=10) + self.tablet_pub = rospy.Publisher("/tablet/screen", String, queue_size=10) def listen(self): resp = self.context.speech(True) if not resp.success: - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.listen() resp = json.loads(resp.json_response) rospy.loginfo(resp) @@ -25,23 +27,29 @@ def listen(self): def affirm(self): resp = self.listen() if resp["intent"]["name"] not in ["affirm", "deny"]: - self.context.voice_controller.sync_tts(self.context.get_random_retry_utterance()) + self.context.voice_controller.sync_tts( + self.context.get_random_retry_utterance() + ) return self.affirm() return resp["intent"]["name"] == "affirm" def execute(self, userdata): if self.context.tablet: - self.context.voice_controller.sync_tts("Please press 'done' when you are ready for me to check the order.") + self.context.voice_controller.sync_tts( + "Please press 'done' when you are ready for me to check the order." + ) pm_goal = PlayMotionGoal(motion_name="tablet", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) self.tablet_pub.publish(String("done")) rospy.wait_for_message("/tablet/done", Empty) - return 'done' + return "done" else: pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) while True: rospy.sleep(rospy.Duration(5.0)) - self.context.voice_controller.sync_tts("Is the order ready to be checked? Please answer with yes or no after the beep.") + self.context.voice_controller.sync_tts( + "Is the order ready to be checked? Please answer with yes or no after the beep." + ) if self.affirm(): - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/phase_3.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/phase_3.py index d4d0e9e8f..e2804b401 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/phase_3.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/phase_3.py @@ -1,16 +1,61 @@ import smach -from .states import GoToWaitLocation, LookForPerson, GoToPerson, GreetPerson, GuidePerson, Start, LookForPersonLaser, GoCloserToPerson +from .states import ( + GoToWaitLocation, + LookForPerson, + GoToPerson, + GreetPerson, + GuidePerson, + Start, + LookForPersonLaser, + GoCloserToPerson, +) + class Phase3(smach.StateMachine): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['done']) + smach.StateMachine.__init__(self, outcomes=["done"]) with self: - smach.StateMachine.add('START_PHASE_3', Start(context), transitions={'done':'GO_TO_WAIT_LOCATION'}) - smach.StateMachine.add('GO_TO_WAIT_LOCATION', GoToWaitLocation(context), transitions={'done' : 'done', 'not done' : 'LOOK_FOR_PERSON_LASER'}) - smach.StateMachine.add('LOOK_FOR_PERSON_LASER', LookForPersonLaser(context), transitions={'found' : 'GO_CLOSER_TO_PERSON', 'not found' : 'LOOK_FOR_PERSON_LASER'}) - smach.StateMachine.add('GO_CLOSER_TO_PERSON', GoCloserToPerson(context), transitions={'done' : 'LOOK_FOR_PERSON'}) - smach.StateMachine.add('LOOK_FOR_PERSON', LookForPerson(context), transitions={'found' : 'GO_TO_PERSON', 'not found' : 'LOOK_FOR_PERSON'}) - smach.StateMachine.add('GO_TO_PERSON', GoToPerson(context), transitions={'done' : 'GREET_PERSON'}) - smach.StateMachine.add('GREET_PERSON', GreetPerson(context), transitions={'done' : 'GUIDE_PERSON'}) - smach.StateMachine.add('GUIDE_PERSON', GuidePerson(context), transitions={'done' : 'GO_TO_WAIT_LOCATION'}) + smach.StateMachine.add( + "START_PHASE_3", + Start(context), + transitions={"done": "GO_TO_WAIT_LOCATION"}, + ) + smach.StateMachine.add( + "GO_TO_WAIT_LOCATION", + GoToWaitLocation(context), + transitions={"done": "done", "not done": "LOOK_FOR_PERSON_LASER"}, + ) + smach.StateMachine.add( + "LOOK_FOR_PERSON_LASER", + LookForPersonLaser(context), + transitions={ + "found": "GO_CLOSER_TO_PERSON", + "not found": "LOOK_FOR_PERSON_LASER", + }, + ) + smach.StateMachine.add( + "GO_CLOSER_TO_PERSON", + GoCloserToPerson(context), + transitions={"done": "LOOK_FOR_PERSON"}, + ) + smach.StateMachine.add( + "LOOK_FOR_PERSON", + LookForPerson(context), + transitions={"found": "GO_TO_PERSON", "not found": "LOOK_FOR_PERSON"}, + ) + smach.StateMachine.add( + "GO_TO_PERSON", + GoToPerson(context), + transitions={"done": "GREET_PERSON"}, + ) + smach.StateMachine.add( + "GREET_PERSON", + GreetPerson(context), + transitions={"done": "GUIDE_PERSON"}, + ) + smach.StateMachine.add( + "GUIDE_PERSON", + GuidePerson(context), + transitions={"done": "GO_TO_WAIT_LOCATION"}, + ) diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/__init__.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/__init__.py index 7b783d85c..ca59274bd 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/__init__.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/__init__.py @@ -5,4 +5,4 @@ from .go_to_wait_location import GoToWaitLocation from .start import Start from .go_closer_to_person import GoCloserToPerson -from .look_for_person_laser import LookForPersonLaser \ No newline at end of file +from .look_for_person_laser import LookForPersonLaser diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_closer_to_person.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_closer_to_person.py index 7548e0fab..5e8eeb707 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_closer_to_person.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_closer_to_person.py @@ -3,14 +3,19 @@ from geometry_msgs.msg import Pose, Point, Quaternion import rospy + class GoCloserToPerson(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): - self.context.voice_controller.async_tts("I think there is a customer waiting. I will go and investigate.") + self.context.voice_controller.async_tts( + "I think there is a customer waiting. I will go and investigate." + ) location = rospy.get_param("/wait/approach1") position, orientation = location["position"], location["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) - return 'done' \ No newline at end of file + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_person.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_person.py index a925b0b15..15f16947c 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_person.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_person.py @@ -3,15 +3,20 @@ from geometry_msgs.msg import Pose, Point, Quaternion import rospy + class GoToPerson(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): location = rospy.get_param("/wait/approach2") position, orientation = location["position"], location["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) if self.context.new_customer_pose: - self.context.base_controller.sync_face_to(self.context.new_customer_pose[0], self.context.new_customer_pose[1]) - return 'done' \ No newline at end of file + self.context.base_controller.sync_face_to( + self.context.new_customer_pose[0], self.context.new_customer_pose[1] + ) + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_wait_location.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_wait_location.py index b7515e0be..8ba02a796 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_wait_location.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/go_to_wait_location.py @@ -3,18 +3,24 @@ import rospy from geometry_msgs.msg import Pose, Point, Quaternion + class GoToWaitLocation(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done', 'not done']) + smach.State.__init__(self, outcomes=["done", "not done"]) self.context = context self.done = False def execute(self, userdata): wait_location = rospy.get_param("/wait") - position, orientation = wait_location["location"]["position"], wait_location["location"]["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + position, orientation = ( + wait_location["location"]["position"], + wait_location["location"]["orientation"], + ) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) if not self.done: self.done = True - return 'not done' + return "not done" else: - return 'done' \ No newline at end of file + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/greet_person.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/greet_person.py index cd9517a1c..e4ae41446 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/greet_person.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/greet_person.py @@ -7,7 +7,7 @@ class GreetPerson(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): @@ -21,8 +21,10 @@ def execute(self, userdata): ph_goal.target.point = Point(*self.context.new_customer_pose) self.context.point_head_client.send_goal_and_wait(ph_goal) """ - self.context.voice_controller.sync_tts("Hi there! My name is TIAGO. Please follow me, I'll guide you to a table.") + self.context.voice_controller.sync_tts( + "Hi there! My name is TIAGO. Please follow me, I'll guide you to a table." + ) pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - self.context.start_head_manager("head_manager", '') - return 'done' \ No newline at end of file + self.context.start_head_manager("head_manager", "") + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/guide_person.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/guide_person.py index 414804b7c..7028bb6f6 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/guide_person.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/guide_person.py @@ -15,9 +15,10 @@ from coffee_shop.srv import TfTransform, TfTransformRequest import numpy as np + class GuidePerson(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def estimate_pose(self, pcl_msg, detection): @@ -29,18 +30,31 @@ def estimate_pose(self, pcl_msg, detection): tf_req.target_frame = String("map") tf_req.point = centroid response = self.context.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) - + return np.array( + [ + response.target_point.point.x, + response.target_point.point.y, + response.target_point.point.z, + ] + ) def perform_detection(self, pcl_msg, polygon, filter, model): cv_im = pcl_msg_to_cv2(pcl_msg) img_msg = self.context.bridge.cv2_to_imgmsg(cv_im) detections = self.context.yolo(img_msg, model, 0.5, 0.3) - detections = [(det, self.estimate_pose(pcl_msg, det)) for det in detections.detected_objects if det.name in filter] + detections = [ + (det, self.estimate_pose(pcl_msg, det)) + for det in detections.detected_objects + if det.name in filter + ] rospy.loginfo(f"All: {[(det.name, pose) for det, pose in detections]}") rospy.loginfo(f"Boundary: {polygon}") - satisfied_points = self.context.shapely.are_points_in_polygon_2d(polygon, [[pose[0], pose[1]] for (_, pose) in detections]).inside - detections = [detections[i] for i in range(0, len(detections)) if satisfied_points[i]] + satisfied_points = self.context.shapely.are_points_in_polygon_2d( + polygon, [[pose[0], pose[1]] for (_, pose) in detections] + ).inside + detections = [ + detections[i] for i in range(0, len(detections)) if satisfied_points[i] + ] rospy.loginfo(f"Filtered: {[(det.name, pose) for det, pose in detections]}") return detections @@ -48,32 +62,69 @@ def execute(self, userdata): self.context.stop_head_manager("head_manager") robot_x, robot_y = self.context.base_controller.get_pose() utter_clean_phrase = False - empty_tables = [(label, rospy.get_param(f"/tables/{label}")) for label, table in self.context.tables.items() if table["status"] == "ready"] + empty_tables = [ + (label, rospy.get_param(f"/tables/{label}")) + for label, table in self.context.tables.items() + if table["status"] == "ready" + ] if not empty_tables: - needs_cleaning_tables = [(label, rospy.get_param(f"/tables/{label}")) for label, table in self.context.tables.items() if table["status"] == "needs cleaning"] + needs_cleaning_tables = [ + (label, rospy.get_param(f"/tables/{label}")) + for label, table in self.context.tables.items() + if table["status"] == "needs cleaning" + ] if needs_cleaning_tables: - closest_table = min(needs_cleaning_tables, key=lambda table: np.linalg.norm([table[1]["location"]["position"]["x"] - robot_x, table[1]["location"]["position"]["y"] - robot_y])) + closest_table = min( + needs_cleaning_tables, + key=lambda table: np.linalg.norm( + [ + table[1]["location"]["position"]["x"] - robot_x, + table[1]["location"]["position"]["y"] - robot_y, + ] + ), + ) utter_clean_phrase = True else: - self.context.voice_controller.sync_tts("Sorry, we don't have any free tables at the moment. Please come back later.") - return 'done' + self.context.voice_controller.sync_tts( + "Sorry, we don't have any free tables at the moment. Please come back later." + ) + return "done" else: - closest_table = min(empty_tables, key=lambda table: np.linalg.norm([table[1]["location"]["position"]["x"] - robot_x, table[1]["location"]["position"]["y"] - robot_y])) + closest_table = min( + empty_tables, + key=lambda table: np.linalg.norm( + [ + table[1]["location"]["position"]["x"] - robot_x, + table[1]["location"]["position"]["y"] - robot_y, + ] + ), + ) label, table = closest_table self.context.current_table = label - position, orientation = table["location"]["position"], table["location"]["orientation"] - self.context.base_controller.sync_to_pose(Pose(position=Point(**position), orientation=Quaternion(**orientation))) + position, orientation = ( + table["location"]["position"], + table["location"]["orientation"], + ) + self.context.base_controller.sync_to_pose( + Pose(position=Point(**position), orientation=Quaternion(**orientation)) + ) pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) if utter_clean_phrase: - self.context.voice_controller.sync_tts("Please be seated, I will wait for you to sit down! Someone will come to clean this table for you") + self.context.voice_controller.sync_tts( + "Please be seated, I will wait for you to sit down! Someone will come to clean this table for you" + ) else: - self.context.voice_controller.sync_tts("Please be seated, I will wait for you to sit down!") - - self.person_polygon = rospy.get_param(f"/tables/{self.context.current_table}/persons_cuboid") + self.context.voice_controller.sync_tts( + "Please be seated, I will wait for you to sit down!" + ) + + self.person_polygon = rospy.get_param( + f"/tables/{self.context.current_table}/persons_cuboid" + ) motions = ["look_left", "look_right"] customer_seated = False @@ -81,8 +132,20 @@ def execute(self, userdata): for motion in motions: pm_goal = PlayMotionGoal(motion_name=motion, skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - customer_seated = len(self.perform_detection(pcl_msg, self.person_polygon, ["person"], self.context.YOLO_person_model)) > 0 + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) + customer_seated = ( + len( + self.perform_detection( + pcl_msg, + self.person_polygon, + ["person"], + self.context.YOLO_person_model, + ) + ) + > 0 + ) if customer_seated: break if customer_seated: @@ -92,5 +155,5 @@ def execute(self, userdata): self.context.play_motion_client.send_goal_and_wait(pm_goal) self.context.tables[label]["status"] = "needs serving" - self.context.start_head_manager("head_manager", '') - return 'done' + self.context.start_head_manager("head_manager", "") + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person.py index 8760c23a4..96563debd 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person.py @@ -11,9 +11,10 @@ from common_math import pcl_msg_to_cv2 from play_motion_msgs.msg import PlayMotionGoal + class LookForPerson(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['found', 'not found']) + smach.State.__init__(self, outcomes=["found", "not found"]) self.context = context def estimate_pose(self, pcl_msg, cv_im, detection): @@ -32,13 +33,19 @@ def estimate_pose(self, pcl_msg, cv_im, detection): x, y, z = np.nanmean(xyz_points, axis=0) centroid = PointStamped() - centroid.point = Point(x,y,z) + centroid.point = Point(x, y, z) centroid.header = pcl_msg.header tf_req = TfTransformRequest() tf_req.target_frame = String("map") tf_req.point = centroid response = self.context.tf(tf_req) - return np.array([response.target_point.point.x, response.target_point.point.y, response.target_point.point.z]) + return np.array( + [ + response.target_point.point.x, + response.target_point.point.y, + response.target_point.point.z, + ] + ) def execute(self, userdata): self.context.stop_head_manager("head_manager") @@ -50,18 +57,26 @@ def execute(self, userdata): pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) cv_im = pcl_msg_to_cv2(pcl_msg) img_msg = self.context.bridge.cv2_to_imgmsg(cv_im) - detections = self.context.yolo(img_msg, self.context.YOLO_person_model, 0.3, 0.3) - detections = [(det, self.estimate_pose(pcl_msg, cv_im, det)) for det in detections.detected_objects if det.name == "person"] - satisfied_points = self.context.shapely.are_points_in_polygon_2d(corners, [[pose[0], pose[1]] for (_, pose) in detections]).inside + detections = self.context.yolo( + img_msg, self.context.YOLO_person_model, 0.3, 0.3 + ) + detections = [ + (det, self.estimate_pose(pcl_msg, cv_im, det)) + for det in detections.detected_objects + if det.name == "person" + ] + satisfied_points = self.context.shapely.are_points_in_polygon_2d( + corners, [[pose[0], pose[1]] for (_, pose) in detections] + ).inside if len(detections): for i in range(0, len(detections)): pose = detections[i][1] self.context.publish_person_pose(*pose, "map") if satisfied_points[i]: self.context.new_customer_pose = pose.tolist() - return 'found' + return "found" rospy.sleep(rospy.Duration(1.0)) - self.context.start_head_manager("head_manager", '') + self.context.start_head_manager("head_manager", "") - return 'not found' \ No newline at end of file + return "not found" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person_laser.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person_laser.py index 871be5731..de436ff35 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person_laser.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/look_for_person_laser.py @@ -12,28 +12,33 @@ from coffee_shop.srv import LatestTransformRequest, ApplyTransformRequest import time + def timeit_rospy(method): - """Decorator for timing ROS methods""" - def timed(*args, **kw): - ts = time.time() - result = method(*args, **kw) - te = time.time() - rospy.loginfo('%r %2.2f s' % (method.__name__, (te - ts))) - return result - return timed + """Decorator for timing ROS methods""" -class LookForPersonLaser(smach.State): + def timed(*args, **kw): + ts = time.time() + result = method(*args, **kw) + te = time.time() + rospy.loginfo("%r %2.2f s" % (method.__name__, (te - ts))) + return result + + return timed + +class LookForPersonLaser(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['found', 'not found']) + smach.State.__init__(self, outcomes=["found", "not found"]) self.context = context self.camera = PinholeCameraModel() - self.camera.fromCameraInfo(rospy.wait_for_message("/xtion/rgb/camera_info", CameraInfo)) + self.camera.fromCameraInfo( + rospy.wait_for_message("/xtion/rgb/camera_info", CameraInfo) + ) self.corners = rospy.get_param("/wait/cuboid") @timeit_rospy def get_points_and_pixels_from_laser(self, msg): - """ Converts a LaserScan message to a collection of points in camera frame, with their corresponding pixel values in a flat array. The method pads out the points to add vertical "pillars" to the point cloud. + """Converts a LaserScan message to a collection of points in camera frame, with their corresponding pixel values in a flat array. The method pads out the points to add vertical "pillars" to the point cloud. Args: msg (LaserScan): ROS Laser Scan message from /scan topic. @@ -43,20 +48,22 @@ def get_points_and_pixels_from_laser(self, msg): """ # First get the laser scan points, and then convert to camera frame pcl_msg = lg.LaserProjection().projectLaser(msg) - pcl_points = [p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True)] - + pcl_points = [ + p for p in pc2.read_points(pcl_msg, field_names=("x, y, z"), skip_nans=True) + ] + tf_req = LatestTransformRequest() tf_req.target_frame = "xtion_rgb_optical_frame" - tf_req.from_frame= "base_laser_link" + tf_req.from_frame = "base_laser_link" t = self.context.tf_latest(tf_req) padded_points = [] pixels = [] for point in pcl_points: # Pad out the points to add vertical "pillars" to the point cloud - for z in np.linspace(0., 1., 5): + for z in np.linspace(0.0, 1.0, 5): padded_points.append(Point(x=point[0], y=point[1], z=z)) - + apply_req = ApplyTransformRequest() apply_req.points = padded_points apply_req.transform = t.transform @@ -65,7 +72,7 @@ def get_points_and_pixels_from_laser(self, msg): padded_converted_points = [] for p in res.new_points: pt = (p.x, p.y, p.z) - u,v = self.camera.project3dToPixel(pt) + u, v = self.camera.project3dToPixel(pt) # Filter out points that are outside the camera frame if u >= 0 and u < 640 and v >= 0 and v < 480: pixels.append(u) @@ -73,10 +80,10 @@ def get_points_and_pixels_from_laser(self, msg): padded_converted_points.append(pt) return padded_converted_points, pixels - + @timeit_rospy def convert_points_to_map_frame(self, points, from_frame="xtion_rgb_optical_frame"): - """ Converts a list of points in camera frame to a list of points in map frame. + """Converts a list of points in camera frame to a list of points in map frame. Args: points (List): A list of tuples containing points in camera frame. @@ -85,11 +92,13 @@ def convert_points_to_map_frame(self, points, from_frame="xtion_rgb_optical_fram """ tf_req = LatestTransformRequest() tf_req.target_frame = "map" - tf_req.from_frame= from_frame + tf_req.from_frame = from_frame t = self.context.tf_latest(tf_req) apply_req = ApplyTransformRequest() - apply_req.points = [Point(x=point[0], y=point[1], z=point[2]) for point in points] + apply_req.points = [ + Point(x=point[0], y=point[1], z=point[2]) for point in points + ] apply_req.transform = t.transform res = self.context.tf_apply(apply_req) converted_points = [(point.x, point.y, point.z) for point in res.new_points] @@ -100,20 +109,28 @@ def execute(self, userdata): self.context.stop_head_manager("head_manager") pm_goal = PlayMotionGoal(motion_name="back_to_default", skip_planning=True) self.context.play_motion_client.send_goal_and_wait(pm_goal) - + lsr_scan = rospy.wait_for_message("/scan", LaserScan) img_msg = rospy.wait_for_message("/xtion/rgb/image_raw", Image) - + points, pixels = self.get_points_and_pixels_from_laser(lsr_scan) - detections = self.context.yolo(img_msg,self.context.YOLO_person_model, 0.3, 0.3) - + detections = self.context.yolo( + img_msg, self.context.YOLO_person_model, 0.3, 0.3 + ) + for detection in detections.detected_objects: if detection.name == "person": - decision = self.context.shapely.are_points_in_polygon_2d_flatarr(detection.xyseg, pixels) + decision = self.context.shapely.are_points_in_polygon_2d_flatarr( + detection.xyseg, pixels + ) idx = [idx for idx, el in enumerate(decision.inside) if el] - filtered_points = self.convert_points_to_map_frame([points[i] for i in idx]) + filtered_points = self.convert_points_to_map_frame( + [points[i] for i in idx] + ) if self.corners is not None: - waiting_area = self.context.shapely.are_points_in_polygon_2d(self.corners, [[p[0], p[1]] for p in filtered_points]) + waiting_area = self.context.shapely.are_points_in_polygon_2d( + self.corners, [[p[0], p[1]] for p in filtered_points] + ) idx = [idx for idx, el in enumerate(waiting_area.inside) if el] points_inside_area = [filtered_points[i] for i in idx] if len(points_inside_area): @@ -121,15 +138,15 @@ def execute(self, userdata): self.context.publish_person_pose(*point, "map") self.context.new_customer_pose = point.tolist() - return 'found' + return "found" else: - #mean of filtered points + # mean of filtered points point = np.mean(filtered_points, axis=0) self.context.publish_person_pose(*point, "map") self.context.new_customer_pose = point.tolist() - return 'found' + return "found" - self.context.start_head_manager("head_manager", '') + self.context.start_head_manager("head_manager", "") - return 'not found' \ No newline at end of file + return "not found" diff --git a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/start.py b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/start.py index 328408907..0a79d7492 100644 --- a/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/start.py +++ b/tasks/coffee_shop/src/coffee_shop/phases/phase_3/states/start.py @@ -2,9 +2,10 @@ import rospy from std_msgs.msg import Int16 + class Start(smach.State): def __init__(self, context): - smach.State.__init__(self, outcomes=['done']) + smach.State.__init__(self, outcomes=["done"]) self.context = context def execute(self, userdata): @@ -13,4 +14,4 @@ def execute(self, userdata): self.context.datahub_start_phase.publish(Int16(3)) rospy.loginfo(f"Context: {str(self.context)}") self.context.voice_controller.async_tts("I am going to wait for a new customer") - return 'done' + return "done" diff --git a/tasks/coffee_shop/src/coffee_shop/state_machine.py b/tasks/coffee_shop/src/coffee_shop/state_machine.py index 5e46ddcab..befa53358 100644 --- a/tasks/coffee_shop/src/coffee_shop/state_machine.py +++ b/tasks/coffee_shop/src/coffee_shop/state_machine.py @@ -2,10 +2,17 @@ import smach from coffee_shop.phases import Phase1, Phase2, Phase3 + class CoffeeShop(smach.StateMachine): def __init__(self, context): - smach.StateMachine.__init__(self, outcomes=['end']) + smach.StateMachine.__init__(self, outcomes=["end"]) with self: - smach.StateMachine.add('PHASE_1', Phase1(context), transitions={'done': 'PHASE_2'}) - smach.StateMachine.add('PHASE_2', Phase2(context), transitions={'done': 'PHASE_3'}) - smach.StateMachine.add('PHASE_3', Phase3(context), transitions={'done': 'end'}) + smach.StateMachine.add( + "PHASE_1", Phase1(context), transitions={"done": "PHASE_2"} + ) + smach.StateMachine.add( + "PHASE_2", Phase2(context), transitions={"done": "PHASE_3"} + ) + smach.StateMachine.add( + "PHASE_3", Phase3(context), transitions={"done": "end"} + ) diff --git a/tasks/gpsr/scripts/main.py b/tasks/gpsr/scripts/main.py index 4c0e15f39..780b91a4e 100644 --- a/tasks/gpsr/scripts/main.py +++ b/tasks/gpsr/scripts/main.py @@ -4,7 +4,7 @@ import sys from typing import Dict from gpsr.load_known_data import GPSRDataLoader -from gpsr.state_machine_factory import build_state_machine +from gpsr.state_machine_factory import GPSRStateMachineFactory from gpsr.regex_command_parser import Configuration from gpsr.states import CommandParserStateMachine @@ -34,7 +34,7 @@ def main(): command_parser_sm.execute() parsed_command: Dict = command_parser_sm.userdata.parsed_command rospy.loginfo(f"Parsed command: {parsed_command}") - sm = build_state_machine(parsed_command) + sm = GPSRStateMachineFactory(parsed_command).build_state_machine() sm.execute() diff --git a/tasks/gpsr/src/gpsr/regex_command_parser.py b/tasks/gpsr/src/gpsr/regex_command_parser.py index ea091f052..88777e44d 100644 --- a/tasks/gpsr/src/gpsr/regex_command_parser.py +++ b/tasks/gpsr/src/gpsr/regex_command_parser.py @@ -36,11 +36,11 @@ def list_to_regex(list: List[str], key: Union[str, None] = None): "greet": ["greet", "salute", "say hello to", "introduce yourself to"], # "remember": ["meet", "contact", "get to know", "get acquainted with"], <--- LOOKS UNUSED "count": ["tell me how many"], - "describe": ["tell me how", "describe"], - "offer": ["offer"], + # "describe": ["tell me how", "describe"], <---- LOOKS UNUSED + # "offer": ["offer"], <---- LOOKS UNUSED "follow": ["follow"], "guide": ["guide", "escort", "take", "lead"], - "accompany": ["accompany"], + # "accompany": ["accompany"], <---- LOOKS UNUSED } diff --git a/tasks/gpsr/src/gpsr/state_machine_factory.py b/tasks/gpsr/src/gpsr/state_machine_factory.py index d7bc33e4d..0208686dd 100644 --- a/tasks/gpsr/src/gpsr/state_machine_factory.py +++ b/tasks/gpsr/src/gpsr/state_machine_factory.py @@ -1,78 +1,151 @@ #!/usr/bin/env python3 import rospy import smach -from smach_ros import ServiceState from typing import Dict, List -from lasr_skills import GoToLocation, FindNamedPerson +from lasr_skills import GoToLocation, FindNamedPerson # type: ignore from gpsr.states import Talk -STATE_COUNT = 0 - - -def increment_state_count() -> int: - global STATE_COUNT - STATE_COUNT += 1 - return STATE_COUNT - - -def build_state_machine(parsed_command: Dict) -> smach.StateMachine: - """Constructs the parameterized state machine for the GPSR task, - given the parsed command. - - Args: - parsed_command (Dict): parsed command. - - Returns: - smach.StateMachine: paramaterized state machine ready to be executed. - """ - command_verbs: List[str] = parsed_command["commands"] - command_params: List[Dict] = parsed_command["command_params"] - sm = smach.StateMachine(outcomes=["succeeded", "failed"]) - with sm: - for command_verb, command_param in zip(command_verbs, command_params): - if command_verb == "greet": - if "name" in command_param: - location_param_room = ( - f"/gpsr/arena/rooms/{command_param['location']}" - ) - location_param_pose = f"{location_param_room}/pose" - sm.add( - f"STATE_{increment_state_count()}", - GoToLocation(location_param=location_param_pose), - transitions={ - "succeeded": f"STATE_{STATE_COUNT + 1}", - "failed": "failed", - }, - ) - sm.add( - f"STATE_{increment_state_count()}", - FindNamedPerson( - name=command_param["name"], - location_param=location_param_room, - ), - transitions={ - "succeeded": f"STATE_{STATE_COUNT + 1}", - "failed": "failed", - }, - ) - elif "clothes" in command_param: - pass - else: - raise ValueError( - "Greet command received with no name or clothes in command parameters" - ) - elif command_verb == "talk": - if "gesture" in command_param: - pass - elif "talk" in command_param: - sm.add( - f"STATE_{increment_state_count()}", - Talk(command_param["talk"]), - transitions={"succeeded": "succeeded", "failed": "failed"}, - ) + +class GPSRStateMachineFactory: + def __init__(self, parsed_command: Dict): + """Stores the parsed command, initializes the state count, + and initalises the state machine. + + Args: + parsed_command (Dict): parsed command output by the + command parser. + """ + self.state_count: int = 0 + self.parsed_command: dict = parsed_command + self.sm = smach.StateMachine(outcomes=["succeeded", "failed"]) + + def _increment_state_count(self): + self.state_count += 1 + return self.state_count + + def _handle_take_command(self, command_param: Dict): + raise NotImplementedError("Take command not implemented") + + def _handle_place_command(self, command_param: Dict): + raise NotImplementedError("Place command not implemented") + + def _handle_deliver_command(self, command_param: Dict): + raise NotImplementedError("Deliver command not implemented") + + def _handle_bring_command(self, command_param: Dict): + raise NotImplementedError("Bring command not implemented") + + def _handle_go_command(self, command_param: Dict): + raise NotImplementedError("Go command not implemented") + + def _handle_find_command(self, command_param: Dict): + raise NotImplementedError("Find command not implemented") + + def _handle_talk_command(self, command_param: Dict): + if "gesture" in command_param: + raise NotImplementedError("Talk command with gesture not implemented") + elif "text" in command_param: + self.sm.add( + f"STATE_{self._increment_state_count()}", + Talk(text=command_param["text"]), + transitions={ + "succeeded": f"STATE_{self.state_count + 1}", + "failed": "failed", + }, + ) + else: + raise ValueError( + "Talk command received with no text or gesture in command parameters" + ) + + def _handle_answer_command(self, command_param: Dict): + raise NotImplementedError("Answer command not implemented") + + def _handle_meet_command(self, command_param: Dict): + raise NotImplementedError("Meet command not implemented") + + def _handle_tell_command(self, command_param: Dict): + raise NotImplementedError("Tell command not implemented") + + def _handle_greet_command(self, command_param: Dict): + if "name" in command_param: + location_param_room = f"/gpsr/arena/rooms/{command_param['location']}" + location_param_pose = f"{location_param_room}/pose" + self.sm.add( + f"STATE_{self._increment_state_count()}", + GoToLocation(location_param=location_param_pose), + transitions={ + "succeeded": f"STATE_{self.state_count + 1}", + "failed": "failed", + }, + ) + self.sm.add( + f"STATE_{self._increment_state_count()}", + FindNamedPerson( + name=command_param["name"], + location_param=location_param_room, + ), + transitions={ + "succeeded": f"STATE_{self.state_count + 1}", + "failed": "failed", + }, + ) + elif "clothes" in command_param: + raise NotImplementedError("Greet command with clothes not implemented") + else: + raise ValueError( + "Greet command received with no name or clothes in command parameters" + ) + + def _handle_count_command(self, command_params: Dict): + raise NotImplementedError("Count command not implemented") + + def _handle_follow_command(self, command_params: Dict): + raise NotImplementedError("Follow command not implemented") + + def _handle_guide_command(self, command_params: Dict): + raise NotImplementedError("Guide command not implemented") + + def build_state_machine(self) -> smach.StateMachine: + with self.sm: + command_verbs: List[str] = self.parsed_command["commands"] + command_params: List[Dict] = self.parsed_command["command_params"] + for command_verb, command_param in zip(command_verbs, command_params): + if command_verb == "take": + self._handle_take_command(command_param) + elif command_verb == "place": + self._handle_place_command(command_param) + elif command_verb == "deliver": + self._handle_deliver_command(command_param) + elif command_verb == "bring": + self._handle_bring_command(command_param) + elif command_verb == "go": + self._handle_go_command(command_param) + elif command_verb == "find": + self._handle_find_command(command_param) + elif command_verb == "talk": + self._handle_talk_command(command_param) + elif command_verb == "answer": + self._handle_answer_command(command_param) + elif command_verb == "meet": + self._handle_meet_command(command_param) + elif command_verb == "tell": + self._handle_tell_command(command_param) + elif command_verb == "greet": + self._handle_greet_command(command_param) + elif command_verb == "count": + self._handle_count_command(command_param) + elif command_verb == "follow": + self._handle_follow_command(command_param) + elif command_verb == "guide": + self._handle_guide_command(command_param) else: - raise ValueError( - "Talk command received with no gesture or talk in command parameters" - ) + raise ValueError(f"Invalid command verb: {command_verb}") + + self.sm.add( + f"STATE_{self.state_count}", + Talk(text="I have completed the task."), + transitions={"succeeded": "succeeded", "failed": "failed"}, + ) - return sm + return self.sm diff --git a/tasks/lift/scripts/datahub.py b/tasks/lift/scripts/datahub.py index 35c303ba1..607ebb6cf 100755 --- a/tasks/lift/scripts/datahub.py +++ b/tasks/lift/scripts/datahub.py @@ -8,21 +8,62 @@ DATAHUB_API = "https://ecs-mnemosyne.azurewebsites.net/api/Hub" ROBOT_ID = "019b1442-728d-48c4-1de7-08dbb68bfcf1" + def start_episode(_): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STARTEPISODE", "Episode" : 1}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STARTEPISODE", + "Episode": 1, + }, + ) + def stop_episode(_): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STOPEPISODE", "Episode" : 1}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STOPEPISODE", + "Episode": 1, + }, + ) + def start_phase(msg): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "STARTPHASE", "Episode" : 1, "Phase" : msg.data}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STARTPHASE", + "Episode": 1, + "Phase": msg.data, + }, + ) + def stop_phase(msg): - requests.post(DATAHUB_API, json={"RobotId": ROBOT_ID, "Competition": "ERL", "Action": "STOPPHASE", "Episode": 1, - "Phase": msg.data}) + requests.post( + DATAHUB_API, + json={ + "RobotId": ROBOT_ID, + "Competition": "ERL", + "Action": "STOPPHASE", + "Episode": 1, + "Phase": msg.data, + }, + ) + def ping(_): - requests.post(DATAHUB_API, json={"RobotId" : ROBOT_ID, "Competition" : "ERL", "Action" : "PING"}) + requests.post( + DATAHUB_API, json={"RobotId": ROBOT_ID, "Competition": "ERL", "Action": "PING"} + ) + rospy.Subscriber("/datahub/start_episode", Empty, start_episode) rospy.Subscriber("/datahub/stop_episode", Empty, stop_episode) @@ -30,4 +71,4 @@ def ping(_): rospy.Subscriber("/datahub/stop_phase", Int16, stop_phase) rospy.Subscriber("/datahub/ping", Int16, ping) -rospy.spin() \ No newline at end of file +rospy.spin() diff --git a/tasks/lift/setup.py b/tasks/lift/setup.py index 906c7099a..fa3dc06cf 100644 --- a/tasks/lift/setup.py +++ b/tasks/lift/setup.py @@ -4,9 +4,6 @@ from catkin_pkg.python_setup import generate_distutils_setup # fetch values from package.xml -setup_args = generate_distutils_setup( - packages=['lift'], - package_dir={'': 'src'} -) +setup_args = generate_distutils_setup(packages=["lift"], package_dir={"": "src"}) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/tasks/lift/src/lift/default.py b/tasks/lift/src/lift/default.py index 0e70c83fc..a49866d58 100644 --- a/tasks/lift/src/lift/default.py +++ b/tasks/lift/src/lift/default.py @@ -7,8 +7,16 @@ from interaction_module.srv import AudioAndTextInteraction from play_motion_msgs.msg import PlayMotionGoal, PlayMotionAction from lasr_speech.srv import Speech -from tf_module.srv import BaseTransformRequest, ApplyTransformRequest, LatestTransformRequest, BaseTransform, \ - LatestTransform, ApplyTransform, TfTransform, TfTransformRequest +from tf_module.srv import ( + BaseTransformRequest, + ApplyTransformRequest, + LatestTransformRequest, + BaseTransform, + LatestTransform, + ApplyTransform, + TfTransform, + TfTransformRequest, +) from cv_bridge3 import CvBridge from lasr_shapely import LasrShapely @@ -17,12 +25,13 @@ rasa = True + class Default: def __init__(self): rospy.loginfo("YOLO is here") - self.yolo = rospy.ServiceProxy('/yolov8/detect', YoloDetection) + self.yolo = rospy.ServiceProxy("/yolov8/detect", YoloDetection) rospy.loginfo("PM is here") - self.pm = actionlib.SimpleActionClient('/play_motion', PlayMotionAction) + self.pm = actionlib.SimpleActionClient("/play_motion", PlayMotionAction) rospy.loginfo("Controllers is here") self.voice = Voice() self.bridge = CvBridge() @@ -34,24 +43,27 @@ def __init__(self): self.cmd = CmdVelController() rospy.loginfo("Voice is here") - self.tf = rospy.ServiceProxy('tf_transform', TfTransform) + self.tf = rospy.ServiceProxy("tf_transform", TfTransform) print("TF is here") - self.tf_base = rospy.ServiceProxy('base_transform', BaseTransform) - self.tf_latest = rospy.ServiceProxy('latest_transform', LatestTransform) - self.tf_apply = rospy.ServiceProxy('apply_transform', ApplyTransform) + self.tf_base = rospy.ServiceProxy("base_transform", BaseTransform) + self.tf_latest = rospy.ServiceProxy("latest_transform", LatestTransform) + self.tf_apply = rospy.ServiceProxy("apply_transform", ApplyTransform) if rasa: rospy.wait_for_service("/lasr_speech/transcribe_and_parse") rospy.loginfo("SPEECH RASA is here") - self.speech = rospy.ServiceProxy("/lasr_speech/transcribe_and_parse", Speech) + self.speech = rospy.ServiceProxy( + "/lasr_speech/transcribe_and_parse", Speech + ) else: pass rospy.loginfo("SPEECH Dialogflow is here") - self.speech = rospy.ServiceProxy("/interaction_module", AudioAndTextInteraction) + self.speech = rospy.ServiceProxy( + "/interaction_module", AudioAndTextInteraction + ) - if not rospy.get_published_topics(namespace='/pal_head_manager'): + if not rospy.get_published_topics(namespace="/pal_head_manager"): rospy.loginfo("Is SIM ---> True") rospy.set_param("/is_simulation", True) else: rospy.loginfo("Is SIM ---> FALSE") rospy.set_param("/is_simulation", False) - diff --git a/tasks/lift/src/lift/defaults.py b/tasks/lift/src/lift/defaults.py index 5c516ecca..a76a89da4 100755 --- a/tasks/lift/src/lift/defaults.py +++ b/tasks/lift/src/lift/defaults.py @@ -2,29 +2,38 @@ import os, rospkg, shutil, rospy, sys rospy.loginfo("setting debug") -DEBUG = rospy.get_param('debug') # 3 +DEBUG = rospy.get_param("debug") # 3 rospy.loginfo("setting Debug with images ") -DEBUG_WITH_IMAGES = rospy.get_param('debug_with_images') +DEBUG_WITH_IMAGES = rospy.get_param("debug_with_images") # for matplotlib rospy.loginfo("setting Plot SHOW ") -PLOT_SHOW = rospy.get_param('plot_show') +PLOT_SHOW = rospy.get_param("plot_show") rospy.loginfo("setting Plot Save ") -PLOT_SAVE = rospy.get_param('plot_save') +PLOT_SAVE = rospy.get_param("plot_save") rospy.loginfo("setting Publish Markers ") -PUBLISH_MARKERS = rospy.get_param('publish_markers') +PUBLISH_MARKERS = rospy.get_param("publish_markers") rospy.loginfo("setting Debug Path ") DEBUG_PATH = os.getcwd() rospy.loginfo("setting Rasa ") -RASA = rospy.get_param('rasa') +RASA = rospy.get_param("rasa") -rospy.logwarn("DEBUG: {DEBUG}, DEBUG_WITH_IMAGES: {DEBUG_WITH_IMAGES}, PLOT_SHOW: {PLOT_SHOW}, PLOT_SAVE: {PLOT_SAVE}".format(**locals())) +rospy.logwarn( + "DEBUG: {DEBUG}, DEBUG_WITH_IMAGES: {DEBUG_WITH_IMAGES}, PLOT_SHOW: {PLOT_SHOW}, PLOT_SAVE: {PLOT_SAVE}".format( + **locals() + ) +) if DEBUG_WITH_IMAGES: - if not os.path.exists(os.path.join(rospkg.RosPack().get_path("lift"), "debug_lift")): + if not os.path.exists( + os.path.join(rospkg.RosPack().get_path("lift"), "debug_lift") + ): os.mkdir(os.path.join(rospkg.RosPack().get_path("lift"), "debug_lift")) - DEBUG_PATH = os.path.join(rospkg.RosPack().get_path("lift"), "debug_lift",) + DEBUG_PATH = os.path.join( + rospkg.RosPack().get_path("lift"), + "debug_lift", + ) if os.path.exists(DEBUG_PATH): shutil.rmtree(DEBUG_PATH) os.mkdir(DEBUG_PATH) @@ -32,4 +41,5 @@ # for plots in waypoitns import random -TEST = random.randint(0, 1000) \ No newline at end of file + +TEST = random.randint(0, 1000) diff --git a/tasks/lift/src/lift/main.py b/tasks/lift/src/lift/main.py index 9503ead63..596eb4374 100755 --- a/tasks/lift/src/lift/main.py +++ b/tasks/lift/src/lift/main.py @@ -7,4 +7,4 @@ lift = Lift() outcome = lift.execute() print(outcome) - rospy.spin() \ No newline at end of file + rospy.spin() diff --git a/tasks/lift/src/lift/phases/__init__.py b/tasks/lift/src/lift/phases/__init__.py index c0482038d..b664996d7 100644 --- a/tasks/lift/src/lift/phases/__init__.py +++ b/tasks/lift/src/lift/phases/__init__.py @@ -1,3 +1,3 @@ from .phase1 import Phase1 from .phase2 import Phase2 -from .phase3 import Phase3 \ No newline at end of file +from .phase3 import Phase3 diff --git a/tasks/lift/src/lift/phases/phase1/__init__.py b/tasks/lift/src/lift/phases/phase1/__init__.py index e9fb4560a..bd192c627 100644 --- a/tasks/lift/src/lift/phases/phase1/__init__.py +++ b/tasks/lift/src/lift/phases/phase1/__init__.py @@ -1 +1 @@ -from .phase1 import Phase1 \ No newline at end of file +from .phase1 import Phase1 diff --git a/tasks/lift/src/lift/phases/phase1/phase1.py b/tasks/lift/src/lift/phases/phase1/phase1.py index 0888b303a..f9621d73b 100755 --- a/tasks/lift/src/lift/phases/phase1/phase1.py +++ b/tasks/lift/src/lift/phases/phase1/phase1.py @@ -1,10 +1,15 @@ import smach -from .states import StartPhase1 +from .states import StartPhase1 + class Phase1(smach.StateMachine): def __init__(self, default): - smach.StateMachine.__init__(self, outcomes=['success']) + smach.StateMachine.__init__(self, outcomes=["success"]) with self: pass - smach.StateMachine.add('START_PHASE_1', StartPhase1(default), transitions={'success': 'success'}) \ No newline at end of file + smach.StateMachine.add( + "START_PHASE_1", + StartPhase1(default), + transitions={"success": "success"}, + ) diff --git a/tasks/lift/src/lift/phases/phase1/states/start_phase_1.py b/tasks/lift/src/lift/phases/phase1/states/start_phase_1.py index 16279fe95..529662f65 100644 --- a/tasks/lift/src/lift/phases/phase1/states/start_phase_1.py +++ b/tasks/lift/src/lift/phases/phase1/states/start_phase_1.py @@ -5,13 +5,15 @@ class StartPhase1(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default def execute(self, userdata): self.default.voice.speak("Starting Phase 1.") self.default.controllers.torso_controller.sync_reach_to(0.25) - res = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/starting/pose')) + res = self.default.controllers.base_controller.ensure_sync_to_pose( + get_pose_from_param("/starting/pose") + ) rospy.logerr(res) self.default.voice.speak("Hi, my name is Tiago.") @@ -21,6 +23,4 @@ def execute(self, userdata): self.default.voice.speak("Phase 1 is really short so let's continue!") self.default.controllers.torso_controller.sync_reach_to(0.2) - - - return 'success' + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/phase2.py b/tasks/lift/src/lift/phases/phase2/phase2.py index 1acec597d..6a78f9a5e 100644 --- a/tasks/lift/src/lift/phases/phase2/phase2.py +++ b/tasks/lift/src/lift/phases/phase2/phase2.py @@ -1,18 +1,69 @@ import smach -from .states import GoToLift, CheckOpenDoor, NavigateInLift, AnnounceArrival, WaitForPeople, FacePerson, DeclareFloor, StartPhase2, CheckAvailableExit, Negotiate, InsideLiftSM +from .states import ( + GoToLift, + CheckOpenDoor, + NavigateInLift, + AnnounceArrival, + WaitForPeople, + FacePerson, + DeclareFloor, + StartPhase2, + CheckAvailableExit, + Negotiate, + InsideLiftSM, +) + + class Phase2(smach.StateMachine): def __init__(self, default): - smach.StateMachine.__init__(self, outcomes=['success']) + smach.StateMachine.__init__(self, outcomes=["success"]) with self: - smach.StateMachine.add('START_PHASE_2', StartPhase2(default), transitions={'success': 'GO_TO_LIFT'}) - smach.StateMachine.add('GO_TO_LIFT', GoToLift(default), transitions={'success': 'ANNOUNCE_ARRIVAL'}) - smach.StateMachine.add('ANNOUNCE_ARRIVAL', AnnounceArrival(default), transitions={'success': 'CHECK_OPEN_DOOR'}) - smach.StateMachine.add('CHECK_OPEN_DOOR', CheckOpenDoor(default), transitions={'success': 'WAIT_FOR_PEOPLE', 'failed': 'CHECK_OPEN_DOOR'}) - smach.StateMachine.add('WAIT_FOR_PEOPLE', WaitForPeople(default), transitions={'success': 'NAVIGATE_IN_LIFT', 'failed': 'WAIT_FOR_PEOPLE'}) - smach.StateMachine.add('NAVIGATE_IN_LIFT', NavigateInLift(default), transitions={'success': 'FACE_PERSON'}) - smach.StateMachine.add('FACE_PERSON', FacePerson(default), transitions={'success': 'DECLARE_FLOOR', 'failed': 'FACE_PERSON'}) - smach.StateMachine.add('DECLARE_FLOOR', DeclareFloor(default), transitions={'success': 'INSIDE_LIFT_SM', 'failed': 'DECLARE_FLOOR'}) - smach.StateMachine.add('INSIDE_LIFT_SM', InsideLiftSM(default), transitions={'success': 'success'}) - - + smach.StateMachine.add( + "START_PHASE_2", + StartPhase2(default), + transitions={"success": "GO_TO_LIFT"}, + ) + smach.StateMachine.add( + "GO_TO_LIFT", + GoToLift(default), + transitions={"success": "ANNOUNCE_ARRIVAL"}, + ) + smach.StateMachine.add( + "ANNOUNCE_ARRIVAL", + AnnounceArrival(default), + transitions={"success": "CHECK_OPEN_DOOR"}, + ) + smach.StateMachine.add( + "CHECK_OPEN_DOOR", + CheckOpenDoor(default), + transitions={"success": "WAIT_FOR_PEOPLE", "failed": "CHECK_OPEN_DOOR"}, + ) + smach.StateMachine.add( + "WAIT_FOR_PEOPLE", + WaitForPeople(default), + transitions={ + "success": "NAVIGATE_IN_LIFT", + "failed": "WAIT_FOR_PEOPLE", + }, + ) + smach.StateMachine.add( + "NAVIGATE_IN_LIFT", + NavigateInLift(default), + transitions={"success": "FACE_PERSON"}, + ) + smach.StateMachine.add( + "FACE_PERSON", + FacePerson(default), + transitions={"success": "DECLARE_FLOOR", "failed": "FACE_PERSON"}, + ) + smach.StateMachine.add( + "DECLARE_FLOOR", + DeclareFloor(default), + transitions={"success": "INSIDE_LIFT_SM", "failed": "DECLARE_FLOOR"}, + ) + smach.StateMachine.add( + "INSIDE_LIFT_SM", + InsideLiftSM(default), + transitions={"success": "success"}, + ) diff --git a/tasks/lift/src/lift/phases/phase2/states/announce_arrival.py b/tasks/lift/src/lift/phases/phase2/states/announce_arrival.py index 3efcf3781..d06788822 100644 --- a/tasks/lift/src/lift/phases/phase2/states/announce_arrival.py +++ b/tasks/lift/src/lift/phases/phase2/states/announce_arrival.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 import smach, rospy + class AnnounceArrival(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default def execute(self, userdata): self.default.voice.speak("I arrived at the lift.") - return 'success' \ No newline at end of file + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/check_available_exit.py b/tasks/lift/src/lift/phases/phase2/states/check_available_exit.py index d5455f4af..94b2ab957 100644 --- a/tasks/lift/src/lift/phases/phase2/states/check_available_exit.py +++ b/tasks/lift/src/lift/phases/phase2/states/check_available_exit.py @@ -8,7 +8,7 @@ class CheckAvailableExit(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed', 'wait']) + smach.State.__init__(self, outcomes=["success", "failed", "wait"]) self.default = default def execute(self, userdata): @@ -18,7 +18,9 @@ def execute(self, userdata): self.default.controllers.head_controller.look_straight() - self.default.voice.speak("I really really want to go to the floor {}.".format(floor)) + self.default.voice.speak( + "I really really want to go to the floor {}.".format(floor) + ) answer = "no" if RASA: self.default.voice.speak("What floor is this?") @@ -27,14 +29,16 @@ def execute(self, userdata): except: floor_number = 3 - if (floor_number == floor): + if floor_number == floor: answer = "yes" new_answer = "no" if answer == "yes": - self.default.voice.speak("Great! Let's finally exit to floor {}.".format(floor)) + self.default.voice.speak( + "Great! Let's finally exit to floor {}.".format(floor) + ) self.default.controllers.torso_controller.sync_reach_to(0.2) - return 'success' + return "success" else: self.default.voice.speak("Does anyone want to exit on this floor?") if RASA: @@ -46,8 +50,8 @@ def execute(self, userdata): if new_answer == "yes": self.default.voice.speak("Great! Let's see how this will happen.") self.default.controllers.torso_controller.sync_reach_to(0.2) - return 'wait' + return "wait" self.default.voice.speak("We can continue this lift journey") self.default.controllers.torso_controller.sync_reach_to(0.2) - return 'failed' + return "failed" diff --git a/tasks/lift/src/lift/phases/phase2/states/check_open_door.py b/tasks/lift/src/lift/phases/phase2/states/check_open_door.py index b8aebe8dd..0b639c433 100644 --- a/tasks/lift/src/lift/phases/phase2/states/check_open_door.py +++ b/tasks/lift/src/lift/phases/phase2/states/check_open_door.py @@ -10,6 +10,7 @@ from narrow_space_navigation.waypoints import * from read_pcl_info.pcl_helpers import limit_laser_scan import rospy + # from tf_module.tf_transforms import tranform_pose door_open = False @@ -56,11 +57,10 @@ class CheckOpenDoor(smach.State): def __init__(self, default): # smach.State.__init__(self, outcomes=['success']) - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default - def get_joke(self): if not used_jokes: used_jokes.extend(RANDOM_LIFT_JOKES) @@ -76,17 +76,25 @@ def execute(self, userdata): # rotate to face the door self.default.voice.speak("Rotating to face the door") - self.default.controllers.base_controller.rotate_to_face_object(object_name='/door/pose') + self.default.controllers.base_controller.rotate_to_face_object( + object_name="/door/pose" + ) topic = "/counter_lift/counter" if in_lift else "/counter_door/counter" - message = "I am in the lift. Waiting for the doors to open" if in_lift else "I arrived at the door. Waiting for the doors to open" + message = ( + "I am in the lift. Waiting for the doors to open" + if in_lift + else "I arrived at the door. Waiting for the doors to open" + ) res = counter(topic=topic) self.default.voice.speak(message) if res == "counter": - return 'success' + return "success" - self.default.controllers.base_controller.rotate_to_face_object(object_name='/door/pose') + self.default.controllers.base_controller.rotate_to_face_object( + object_name="/door/pose" + ) # tell a joke self.default.voice.speak("I will tell you a joke in the meantime.") @@ -102,21 +110,21 @@ def execute(self, userdata): if not door_detected: message = "The door is open." if in_lift else "The door is open." self.default.voice.speak(message) - return 'success' + return "success" # ensure you get out of the loop and go to the next state - counter_door_open = counter(topic="/counter_door_open/counter", count_default=10) + counter_door_open = counter( + topic="/counter_door_open/counter", count_default=10 + ) if counter_door_open == "counter": # TODO: maybe change to success :) - return 'failed' + return "failed" if not door_detected: self.default.voice.speak("Great stuff! The door is open.") - return 'success' + return "success" self.default.voice.speak("I am still waiting for the door to open") rospy.sleep(1) - return 'success' - - + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/declare_floor.py b/tasks/lift/src/lift/phases/phase2/states/declare_floor.py index f924d3c4d..f91d8fb65 100644 --- a/tasks/lift/src/lift/phases/phase2/states/declare_floor.py +++ b/tasks/lift/src/lift/phases/phase2/states/declare_floor.py @@ -1,15 +1,20 @@ #!/usr/bin/env python3 import smach import rospy -from interaction_module.srv import AudioAndTextInteraction, AudioAndTextInteractionRequest, \ - AudioAndTextInteractionResponse +from interaction_module.srv import ( + AudioAndTextInteraction, + AudioAndTextInteractionRequest, + AudioAndTextInteractionResponse, +) import json from lift.defaults import TEST, PLOT_SHOW, PLOT_SAVE, DEBUG_PATH, DEBUG, RASA -#from speech_helper import listen, affirm + +# from speech_helper import listen, affirm + class DeclareFloor(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default def execute(self, userdata): @@ -20,9 +25,8 @@ def execute(self, userdata): # maybe add the counter here as well self.default.voice.speak("I would love to go to the floor {}.".format(floor)) - self.default.voice.speak("I don't see a button around, so I will assume the non-existent button is pressed.") - - return 'success' - - + self.default.voice.speak( + "I don't see a button around, so I will assume the non-existent button is pressed." + ) + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/face_person.py b/tasks/lift/src/lift/phases/phase2/states/face_person.py index ee5325b86..c7fd50612 100644 --- a/tasks/lift/src/lift/phases/phase2/states/face_person.py +++ b/tasks/lift/src/lift/phases/phase2/states/face_person.py @@ -1,28 +1,34 @@ #!/usr/bin/env python3 import smach, rospy from tiago_controllers.controllers.look_at import LookAt -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug +from lasr_object_detection_yolo.detect_objects_v8 import ( + detect_objects, + perform_detection, + debug, +) from sensor_msgs.msg import PointCloud2 HORIZONTAL = 0.8 VERTICAL = 0.3 + class FacePerson(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default - self.search_points = [(-1 * HORIZONTAL, VERTICAL), - (0, VERTICAL), - (HORIZONTAL, VERTICAL), - (HORIZONTAL, 0), - (0, 0), - (-1 * HORIZONTAL, 0), - (-1 * HORIZONTAL, -1 * VERTICAL), - (0, -1 * VERTICAL), - (HORIZONTAL, -1 * VERTICAL), - (0, 0)] - + self.search_points = [ + (-1 * HORIZONTAL, VERTICAL), + (0, VERTICAL), + (HORIZONTAL, VERTICAL), + (HORIZONTAL, 0), + (0, 0), + (-1 * HORIZONTAL, 0), + (-1 * HORIZONTAL, -1 * VERTICAL), + (0, -1 * VERTICAL), + (HORIZONTAL, -1 * VERTICAL), + (0, 0), + ] def search_for_person(self): for point in self.search_points: @@ -32,7 +38,6 @@ def search_for_person(self): return people[0], point[0] return None - def execute(self, userdata): turns = 4 for i in range(turns): @@ -40,11 +45,16 @@ def execute(self, userdata): closest_person, head_rot = self.search_for_person() self.default.controllers.torso_controller.sync_reach_to(0.25) if closest_person: - look_at = LookAt(self.default.controllers.head_controller, self.default.controllers.base_controller, self.default.cmd, "person") + look_at = LookAt( + self.default.controllers.head_controller, + self.default.controllers.base_controller, + self.default.cmd, + "person", + ) look_at.look_at(closest_person.xywh, head_rot) - return 'success' + return "success" except TypeError: - self.default.cmd.rotate(60, 360/turns, True) + self.default.cmd.rotate(60, 360 / turns, True) self.default.voice.speak("I can't see anyone!") - return 'failed' \ No newline at end of file + return "failed" diff --git a/tasks/lift/src/lift/phases/phase2/states/go_to_lift.py b/tasks/lift/src/lift/phases/phase2/states/go_to_lift.py index 9900ce02d..2010f0998 100755 --- a/tasks/lift/src/lift/phases/phase2/states/go_to_lift.py +++ b/tasks/lift/src/lift/phases/phase2/states/go_to_lift.py @@ -4,7 +4,10 @@ import rospy from geometry_msgs.msg import Pose, Point, Quaternion from tiago_controllers.helpers.pose_helpers import get_pose_from_param -from choosing_wait_position.final_lift_key_point.predict_pos import make_prediction, visualise_predictions +from choosing_wait_position.final_lift_key_point.predict_pos import ( + make_prediction, + visualise_predictions, +) from narrow_space_navigation.waypoints import * from tiago_controllers.controllers.controllers import Controllers from PIL import Image @@ -15,14 +18,13 @@ class GoToLift(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default # self.controllers = controllers # self.voice = voice def rotate_keypoint(self, keypoint, degrees, image_o, image_r): - width_o, height_o = image_o.size image_origin = [width_o / 2, height_o / 2] width_r, height_r = image_r.size @@ -34,8 +36,12 @@ def rotate_keypoint(self, keypoint, degrees, image_o, image_r): tx = x - ox ty = y - oy - x_rotate = tx * math.cos(math.radians(degrees)) - ty * math.sin(math.radians(degrees)) - y_rotate = ty * math.cos(math.radians(degrees)) + tx * math.sin(math.radians(degrees)) + x_rotate = tx * math.cos(math.radians(degrees)) - ty * math.sin( + math.radians(degrees) + ) + y_rotate = ty * math.cos(math.radians(degrees)) + tx * math.sin( + math.radians(degrees) + ) # x = x_rotate + 480 # y = y_rotate + 640 @@ -65,7 +71,9 @@ def execute(self, userdata): w = Waypoint() # get the lift info - warped, analysis, M = w.get_lift_information(is_lift=False, is_sim=rospy.get_param("/is_simulation")) + warped, analysis, M = w.get_lift_information( + is_lift=False, is_sim=rospy.get_param("/is_simulation") + ) # save to the NN format image = Image.fromarray(warped) @@ -81,7 +89,6 @@ def execute(self, userdata): image.save(DEBUG_PATH + "/zoe_predict_pos_test" + str(TEST) + ".jpg") # image.save(DEBUG_PATH + "/zoe_predict_pos_test_before_rotate" + str(TEST) + ".jpg") - bbox, keypoint = make_prediction(image_path) if keypoint is None and bbox is None: keypoint = analysis[3] @@ -92,10 +99,8 @@ def execute(self, userdata): print(f"keypoint: {keypoint}") print(type(keypoint)) - # self.default.voice.speak("Let me talk to the audience.") - # transform to the global frame global_points = w.local_to_global_points(M=M, points=keypoint, is_lift=True) p = Pose() @@ -108,13 +113,16 @@ def execute(self, userdata): state = self.default.controllers.base_controller.sync_to_pose(p) - if DEBUG > 3: print(f" The sync to pose res in go to lift: {state}") # ensure sync to pose if not state: - state = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/wait_centre/pose')) - rospy.loginfo("The sync to pose res in go to lift wait centre is {}".format(state)) - - return 'success' + state = self.default.controllers.base_controller.ensure_sync_to_pose( + get_pose_from_param("/wait_centre/pose") + ) + rospy.loginfo( + "The sync to pose res in go to lift wait centre is {}".format(state) + ) + + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/inside_lift_sm.py b/tasks/lift/src/lift/phases/phase2/states/inside_lift_sm.py index 49c1657f2..50c8e27eb 100644 --- a/tasks/lift/src/lift/phases/phase2/states/inside_lift_sm.py +++ b/tasks/lift/src/lift/phases/phase2/states/inside_lift_sm.py @@ -11,17 +11,51 @@ class InsideLiftSM(smach.StateMachine): def __init__(self, default): - smach.StateMachine.__init__(self, outcomes=['success']) + smach.StateMachine.__init__(self, outcomes=["success"]) with self: - smach.StateMachine.add('CHECK_OPEN_DOOR', CheckOpenDoor(default), transitions={'success': 'FACE_PERSON', 'failed': 'CHECK_OPEN_DOOR'}) - smach.StateMachine.add('FACE_PERSON', FacePerson(default), transitions={'success': 'CHECK_AVAILABLE_EXIT', 'failed': 'FACE_PERSON'}) - smach.StateMachine.add('CHECK_AVAILABLE_EXIT', CheckAvailableExit(default), transitions={'success':'NEGOTIATE', 'failed': 'CHECK_OPEN_DOOR', 'wait': 'SCHEDULE_GOING_OUT'}) + smach.StateMachine.add( + "CHECK_OPEN_DOOR", + CheckOpenDoor(default), + transitions={"success": "FACE_PERSON", "failed": "CHECK_OPEN_DOOR"}, + ) + smach.StateMachine.add( + "FACE_PERSON", + FacePerson(default), + transitions={ + "success": "CHECK_AVAILABLE_EXIT", + "failed": "FACE_PERSON", + }, + ) + smach.StateMachine.add( + "CHECK_AVAILABLE_EXIT", + CheckAvailableExit(default), + transitions={ + "success": "NEGOTIATE", + "failed": "CHECK_OPEN_DOOR", + "wait": "SCHEDULE_GOING_OUT", + }, + ) - smach.StateMachine.add('SCHEDULE_GOING_OUT', ScheduleGoingOut(default), transitions={'success':'NAVIGATE_IN_LIFT', 'failed': 'CHECK_OPEN_DOOR'}) - smach.StateMachine.add('NAVIGATE_IN_LIFT', NavigateInLift(default), transitions={'success': 'CHECK_OPEN_DOOR'}) + smach.StateMachine.add( + "SCHEDULE_GOING_OUT", + ScheduleGoingOut(default), + transitions={ + "success": "NAVIGATE_IN_LIFT", + "failed": "CHECK_OPEN_DOOR", + }, + ) + smach.StateMachine.add( + "NAVIGATE_IN_LIFT", + NavigateInLift(default), + transitions={"success": "CHECK_OPEN_DOOR"}, + ) - smach.StateMachine.add('NEGOTIATE', Negotiate(default), transitions={'success':'success', 'failed': 'NEGOTIATE'}) + smach.StateMachine.add( + "NEGOTIATE", + Negotiate(default), + transitions={"success": "success", "failed": "NEGOTIATE"}, + ) if __name__ == "__main__": diff --git a/tasks/lift/src/lift/phases/phase2/states/navigate_in_lift.py b/tasks/lift/src/lift/phases/phase2/states/navigate_in_lift.py index d302dff37..f61d26ffa 100644 --- a/tasks/lift/src/lift/phases/phase2/states/navigate_in_lift.py +++ b/tasks/lift/src/lift/phases/phase2/states/navigate_in_lift.py @@ -10,9 +10,10 @@ import rospy from lift.defaults import DEBUG + class NavigateInLift(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default @@ -32,7 +33,6 @@ def safe_clusters_info(self, analytics, w, M): print("centers in safe") print(rospy.get_param("/lift/centers")) - def execute(self, userdata): w = Waypoint() @@ -40,7 +40,9 @@ def execute(self, userdata): is_from_schedule = rospy.get_param("/from_schedule") if not is_from_schedule: - status = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/wait_in_front_lift_centre/pose')) + status = self.default.controllers.base_controller.ensure_sync_to_pose( + get_pose_from_param("/wait_in_front_lift_centre/pose") + ) # get the lift information warped, analytics, M = w.get_lift_information(is_lift=True, is_sim=True) @@ -50,8 +52,10 @@ def execute(self, userdata): if analytics[1] == 0: # if the lift is empty # go to predetermined place - state = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/lift_centre/pose')) - return 'success' + state = self.default.controllers.base_controller.ensure_sync_to_pose( + get_pose_from_param("/lift_centre/pose") + ) + return "success" # get the narrow space navigation service s = NarrowSpaceNavSrv() @@ -76,4 +80,4 @@ def execute(self, userdata): self.default.voice.speak("I have arrived at the lift.") - return 'success' + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/negotiate.py b/tasks/lift/src/lift/phases/phase2/states/negotiate.py index ccf0a9f2b..037a2d7ad 100644 --- a/tasks/lift/src/lift/phases/phase2/states/negotiate.py +++ b/tasks/lift/src/lift/phases/phase2/states/negotiate.py @@ -7,24 +7,31 @@ from geometry_msgs.msg import PoseWithCovarianceStamped from std_msgs.msg import Empty from tiago_controllers.helpers.nav_map_helpers import clear_costmap -from interaction_module.srv import AudioAndTextInteraction, AudioAndTextInteractionRequest, \ - AudioAndTextInteractionResponse +from interaction_module.srv import ( + AudioAndTextInteraction, + AudioAndTextInteractionRequest, + AudioAndTextInteractionResponse, +) from tiago_controllers.helpers.nav_map_helpers import is_close_to_object, rank from lift.defaults import TEST, PLOT_SHOW, PLOT_SAVE, DEBUG_PATH, DEBUG, RASA -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug +from lasr_object_detection_yolo.detect_objects_v8 import ( + detect_objects, + perform_detection, + debug, +) from sensor_msgs.msg import PointCloud2 from lift.speech_helper import listen, affirm, hear_wait class Negotiate(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default self.head_motions = { - 'look_straight': self.default.controllers.head_controller.look_straight, - 'look_right': self.default.controllers.head_controller.look_right, - 'look_left': self.default.controllers.head_controller.look_left + "look_straight": self.default.controllers.head_controller.look_straight, + "look_right": self.default.controllers.head_controller.look_right, + "look_left": self.default.controllers.head_controller.look_left, } def is_anyone_in_front_of_me(self): @@ -32,9 +39,13 @@ def is_anyone_in_front_of_me(self): for motion in self.head_motions: self.head_motions[motion]() rospy.sleep(1) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('/corners_lift') - detections, im = perform_detection(self.default, pcl_msg, polygon, ['person']) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) + polygon = rospy.get_param("/corners_lift") + detections, im = perform_detection( + self.default, pcl_msg, polygon, ["person"] + ) if len(detections) > 0: self.default.controllers.head_controller.look_straight() return True @@ -45,23 +56,26 @@ def execute(self, userdata): is_closer_to_door = not self.is_anyone_in_front_of_me() if is_closer_to_door: - self.default.voice.speak("I am the closest to the door so I have to exit first") + self.default.voice.speak( + "I am the closest to the door so I have to exit first" + ) # clear costmap clear_costmap() # go to centre waiting area self.default.voice.speak("I will wait by the lift for you.") # TODO: here maybe switch to the wait_centre - res = self.default.controllers.base_controller.sync_to_pose(get_pose_from_param('/wait/pose')) + res = self.default.controllers.base_controller.sync_to_pose( + get_pose_from_param("/wait/pose") + ) else: self.default.voice.speak("I am not the closest to the door.") self.default.voice.speak("I will wait for you to exit first") rospy.sleep(10) - if is_closer_to_door: # clear costmap clear_costmap() # maybe take the lift info again self.default.voice.speak("Exiting the lift") - return 'success' + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/schedule_going_out.py b/tasks/lift/src/lift/phases/phase2/states/schedule_going_out.py index b53934a5f..f76bf900c 100644 --- a/tasks/lift/src/lift/phases/phase2/states/schedule_going_out.py +++ b/tasks/lift/src/lift/phases/phase2/states/schedule_going_out.py @@ -5,19 +5,23 @@ from tiago_controllers.helpers.pose_helpers import get_pose_from_param from tiago_controllers.helpers.nav_map_helpers import clear_costmap from lift.defaults import TEST, PLOT_SHOW, PLOT_SAVE, DEBUG_PATH, DEBUG, RASA -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug +from lasr_object_detection_yolo.detect_objects_v8 import ( + detect_objects, + perform_detection, + debug, +) from sensor_msgs.msg import PointCloud2 from lift.speech_helper import listen, affirm, hear_wait class ScheduleGoingOut(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default self.head_motions = { - 'look_straight': self.default.controllers.head_controller.look_straight, - 'look_right': self.default.controllers.head_controller.look_right, - 'look_left': self.default.controllers.head_controller.look_left + "look_straight": self.default.controllers.head_controller.look_straight, + "look_right": self.default.controllers.head_controller.look_right, + "look_left": self.default.controllers.head_controller.look_left, } def is_anyone_in_front_of_me(self): @@ -25,39 +29,48 @@ def is_anyone_in_front_of_me(self): for motion in self.head_motions: self.head_motions[motion]() rospy.sleep(1) - pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - polygon = rospy.get_param('/corners_lift') - detections, im = perform_detection(self.default, pcl_msg, polygon, ['person']) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) + polygon = rospy.get_param("/corners_lift") + detections, im = perform_detection( + self.default, pcl_msg, polygon, ["person"] + ) if len(detections) > 0: self.default.controllers.head_controller.look_straight() return True return False - def execute(self, userdata): is_closer_to_door = not self.is_anyone_in_front_of_me() if is_closer_to_door: - self.default.voice.speak("I am the closest to the door so I have to exit to let you out") + self.default.voice.speak( + "I am the closest to the door so I have to exit to let you out" + ) # clear costmap clear_costmap() # go to centre waiting area - state = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/wait_in_front_lift_centre/pose')) + state = self.default.controllers.base_controller.ensure_sync_to_pose( + get_pose_from_param("/wait_in_front_lift_centre/pose") + ) if not state: - return 'failed' + return "failed" # turn around self.default.voice.speak("Just minding my own business!") - self.default.controllers.base_controller.rotate_to_face_object(object_name='/door/pose') + self.default.controllers.base_controller.rotate_to_face_object( + object_name="/door/pose" + ) # wait for the person to exit rospy.sleep(2) self.default.voice.speak("I will wait a bit for you") rospy.sleep(5) - - - return 'success' + return "success" else: self.default.voice.speak("I am not the closest to the door.") - self.default.voice.speak("I will wait inside the lift because this is not my floor") + self.default.voice.speak( + "I will wait inside the lift because this is not my floor" + ) rospy.set_param("/from_schedule", True) - return 'success' + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/start_phase_2.py b/tasks/lift/src/lift/phases/phase2/states/start_phase_2.py index e096f3e7a..f15ea0480 100644 --- a/tasks/lift/src/lift/phases/phase2/states/start_phase_2.py +++ b/tasks/lift/src/lift/phases/phase2/states/start_phase_2.py @@ -1,15 +1,21 @@ #!/usr/bin/env python3 import smach from tiago_controllers.helpers.pose_helpers import get_pose_from_param + + class StartPhase2(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default def execute(self, userdata): # self.default.datahub_stop_phase.publish(Int16(1)) - self.default.voice.speak("Quickest update ever... I am starting Phase 2. I am going to the lift.") + self.default.voice.speak( + "Quickest update ever... I am starting Phase 2. I am going to the lift." + ) - self.default.controllers.base_controller.sync_to_pose(get_pose_from_param('/phase2/pose')) + self.default.controllers.base_controller.sync_to_pose( + get_pose_from_param("/phase2/pose") + ) - return 'success' \ No newline at end of file + return "success" diff --git a/tasks/lift/src/lift/phases/phase2/states/wait_for_people.py b/tasks/lift/src/lift/phases/phase2/states/wait_for_people.py index 863fa4a69..1ce4ea140 100644 --- a/tasks/lift/src/lift/phases/phase2/states/wait_for_people.py +++ b/tasks/lift/src/lift/phases/phase2/states/wait_for_people.py @@ -4,17 +4,20 @@ import json from lift.defaults import TEST, PLOT_SHOW, PLOT_SAVE, DEBUG_PATH, DEBUG, RASA from sensor_msgs.msg import PointCloud2 -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug -#from speech_helper import listen, affirm, hear_wait, get_people_number +from lasr_object_detection_yolo.detect_objects_v8 import ( + detect_objects, + perform_detection, + debug, +) +# from speech_helper import listen, affirm, hear_wait, get_people_number class WaitForPeople(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default - def safe_seg_info(self, detections): pos_people = [] for i, person in detections: @@ -40,23 +43,26 @@ def execute(self, userdata): # count = 2 # self.default.voice.speak("I couldn't hear how many people, so I'm going to guess 2") - self.default.voice.speak("I will wait a bit here until you go inside. You see, I am a very very good robot!") + self.default.voice.speak( + "I will wait a bit here until you go inside. You see, I am a very very good robot!" + ) rospy.sleep(5) - self.default.voice.speak("I will now move to the center of the lift waiting area") - state = self.default.controllers.base_controller.ensure_sync_to_pose(get_pose_from_param('/wait_in_front_lift_centre/pose')) + self.default.voice.speak( + "I will now move to the center of the lift waiting area" + ) + state = self.default.controllers.base_controller.ensure_sync_to_pose( + get_pose_from_param("/wait_in_front_lift_centre/pose") + ) - polygon = rospy.get_param('test_lift_points') + polygon = rospy.get_param("test_lift_points") pcl_msg = rospy.wait_for_message("/xtion/depth_registered/points", PointCloud2) - detections, im = perform_detection(self.default, pcl_msg, None, ["person"], "yolov8n-seg.pt") + detections, im = perform_detection( + self.default, pcl_msg, None, ["person"], "yolov8n-seg.pt" + ) print("len detections") print(len(detections)) - self.safe_seg_info(detections) - - return 'success' - - - + return "success" diff --git a/tasks/lift/src/lift/phases/phase3/phase3.py b/tasks/lift/src/lift/phases/phase3/phase3.py index 1b70fb6df..63239f0df 100644 --- a/tasks/lift/src/lift/phases/phase3/phase3.py +++ b/tasks/lift/src/lift/phases/phase3/phase3.py @@ -1,11 +1,22 @@ import smach from .states import GoToFinish, StartPhase3, Encounter + class Phase3(smach.StateMachine): def __init__(self, default): - smach.StateMachine.__init__(self, outcomes=['success']) + smach.StateMachine.__init__(self, outcomes=["success"]) with self: - smach.StateMachine.add('START_PHASE_3', StartPhase3(default), transitions={'success' : 'Encounter'}) - smach.StateMachine.add('Encounter', Encounter(default), transitions={'success' : 'GO_TO_FINISH', 'failed' : 'Encounter'}) - smach.StateMachine.add('GO_TO_FINISH', GoToFinish(default), transitions={'success' : 'success'}) + smach.StateMachine.add( + "START_PHASE_3", + StartPhase3(default), + transitions={"success": "Encounter"}, + ) + smach.StateMachine.add( + "Encounter", + Encounter(default), + transitions={"success": "GO_TO_FINISH", "failed": "Encounter"}, + ) + smach.StateMachine.add( + "GO_TO_FINISH", GoToFinish(default), transitions={"success": "success"} + ) diff --git a/tasks/lift/src/lift/phases/phase3/states/__init__.py b/tasks/lift/src/lift/phases/phase3/states/__init__.py index db9c5b558..2823fae5c 100644 --- a/tasks/lift/src/lift/phases/phase3/states/__init__.py +++ b/tasks/lift/src/lift/phases/phase3/states/__init__.py @@ -1,3 +1,3 @@ from .start_phase_3 import StartPhase3 from .go_to_finish import GoToFinish -from .encounter import Encounter \ No newline at end of file +from .encounter import Encounter diff --git a/tasks/lift/src/lift/phases/phase3/states/encounter.py b/tasks/lift/src/lift/phases/phase3/states/encounter.py index 7812eb5a3..c40f95451 100755 --- a/tasks/lift/src/lift/phases/phase3/states/encounter.py +++ b/tasks/lift/src/lift/phases/phase3/states/encounter.py @@ -6,23 +6,39 @@ import numpy as np from sensor_msgs.msg import PointCloud2 from tiago_controllers.helpers.pose_helpers import get_pose_from_param -from lasr_object_detection_yolo.detect_objects_v8 import detect_objects, perform_detection, debug, estimate_pose +from lasr_object_detection_yolo.detect_objects_v8 import ( + detect_objects, + perform_detection, + debug, + estimate_pose, +) HORIZONTAL = 0.8 VERTICAL = 0.3 + def euclidean_distance(point1, point2): - return np.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2 + (point1[2] - point2[2]) ** 2) + return np.sqrt( + (point1[0] - point2[0]) ** 2 + + (point1[1] - point2[1]) ** 2 + + (point1[2] - point2[2]) ** 2 + ) + + class Encounter(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success', 'failed']) + smach.State.__init__(self, outcomes=["success", "failed"]) self.default = default # stop head manager if "/pal_startup_control/stop" in rosservice.get_service_list(): - self.stop_head_manager = rospy.ServiceProxy("/pal_startup_control/stop", StartupStop) - self.start_head_manager = rospy.ServiceProxy("/pal_startup_control/start", StartupStart) + self.stop_head_manager = rospy.ServiceProxy( + "/pal_startup_control/stop", StartupStop + ) + self.start_head_manager = rospy.ServiceProxy( + "/pal_startup_control/start", StartupStart + ) def order_indices(self, current, previous): if len(current) == 1 and len(previous) == 1: @@ -31,7 +47,14 @@ def order_indices(self, current, previous): indices = [] if len(current) == len(previous): for person_i in range(len(current)): - diffs = list(map(lambda lo: np.linalg.norm(np.array(current[person_i]) - np.array(lo)), previous)) + diffs = list( + map( + lambda lo: np.linalg.norm( + np.array(current[person_i]) - np.array(lo) + ), + previous, + ) + ) indices.append(diffs.index(min(diffs))) return indices @@ -40,11 +63,15 @@ def execute(self, userdata): self.default.controllers.head_controller.look_straight() self.default.controllers.torso_controller.sync_reach_to(0.25) - movement = [self.default.controllers.head_controller.look_straight, - self.default.controllers.head_controller.look_right, - self.default.controllers.head_controller.look_left] + movement = [ + self.default.controllers.head_controller.look_straight, + self.default.controllers.head_controller.look_right, + self.default.controllers.head_controller.look_left, + ] - self.default.voice.speak("This is the encounter situation. I will be looking for someone who is excited to see me!!") + self.default.voice.speak( + "This is the encounter situation. I will be looking for someone who is excited to see me!!" + ) pose = get_pose_from_param("/phase3_lift/pose") polygon = rospy.get_param("/corners_arena") @@ -58,8 +85,12 @@ def execute(self, userdata): while not is_found: rospy.sleep(2) - pcl_msg = rospy.wait_for_message('/xtion/depth_registered/points', PointCloud2) - detections, im = perform_detection(self.default, pcl_msg, polygon, ['person']) + pcl_msg = rospy.wait_for_message( + "/xtion/depth_registered/points", PointCloud2 + ) + detections, im = perform_detection( + self.default, pcl_msg, polygon, ["person"] + ) pos_people = [] for j, person in detections: @@ -68,19 +99,28 @@ def execute(self, userdata): frames_locations.append(pos_people) rospy.logwarn(frames_locations) - if len(frames_locations) == 2 and len(frames_locations[0]) > 0 and\ - len(frames_locations[0]) == len(frames_locations[1]): + if ( + len(frames_locations) == 2 + and len(frames_locations[0]) > 0 + and len(frames_locations[0]) == len(frames_locations[1]) + ): rospy.loginfo("TRUE") # swap if needed - match_indices = self.order_indices(frames_locations[1], frames_locations[0]) + match_indices = self.order_indices( + frames_locations[1], frames_locations[0] + ) if not (match_indices == [0]): - frames_locations[1] = [frames_locations[1][match_i] for match_i in match_indices] + frames_locations[1] = [ + frames_locations[1][match_i] for match_i in match_indices + ] # CALC VECS rospy.loginfo(len(frames_locations[1])) for loc_vect in range(len(frames_locations[1])): - a2 = np.array(frames_locations[1][loc_vect]) - np.array(frames_locations[0][loc_vect]) + a2 = np.array(frames_locations[1][loc_vect]) - np.array( + frames_locations[0][loc_vect] + ) if np.linalg.norm(a2) > 0.05: rospy.loginfo(f"Norm { np.linalg.norm(a2)}") headPoint = np.array(frames_locations[1][loc_vect]) @@ -99,13 +139,18 @@ def execute(self, userdata): currentFrame += 1 print(headPoint) - self.default.controllers.base_controller.sync_face_to(headPoint[0], headPoint[1]) + self.default.controllers.base_controller.sync_face_to( + headPoint[0], headPoint[1] + ) self.default.controllers.torso_controller.sync_reach_to(0.2) self.default.controllers.head_controller.look_straight() - self.default.voice.speak("You seem very excited to see me. I am also excited to see you.") - return 'success' + self.default.voice.speak( + "You seem very excited to see me. I am also excited to see you." + ) + return "success" + -if __name__ == '__main__': - rospy.init_node("encounter", anonymous=True) \ No newline at end of file +if __name__ == "__main__": + rospy.init_node("encounter", anonymous=True) diff --git a/tasks/lift/src/lift/phases/phase3/states/go_to_finish.py b/tasks/lift/src/lift/phases/phase3/states/go_to_finish.py index 02df42398..d6b2fe43a 100755 --- a/tasks/lift/src/lift/phases/phase3/states/go_to_finish.py +++ b/tasks/lift/src/lift/phases/phase3/states/go_to_finish.py @@ -9,17 +9,20 @@ class GoToFinish(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default def execute(self, userdata): - rospy.loginfo('Executing state GoToFinish') - self.default.controllers.base_controller.sync_to_pose(get_pose_from_param('/starting/pose')) + rospy.loginfo("Executing state GoToFinish") + self.default.controllers.base_controller.sync_to_pose( + get_pose_from_param("/starting/pose") + ) self.default.voice.speak("Thank you for the ride!") # self.default.datahub_stop_phase.publish(Int16(3)) # self.default.datahub_stop_episode.publish(Empty()) - return 'success' + return "success" -if __name__ == '__main__': + +if __name__ == "__main__": rospy.init_node("go_to_finish", anonymous=True) diff --git a/tasks/lift/src/lift/phases/phase3/states/start_phase_3.py b/tasks/lift/src/lift/phases/phase3/states/start_phase_3.py index d64f7d601..b050e48f0 100644 --- a/tasks/lift/src/lift/phases/phase3/states/start_phase_3.py +++ b/tasks/lift/src/lift/phases/phase3/states/start_phase_3.py @@ -3,16 +3,19 @@ from std_msgs.msg import Int16 from tiago_controllers.helpers.pose_helpers import get_pose_from_param + class StartPhase3(smach.State): def __init__(self, default): - smach.State.__init__(self, outcomes=['success']) + smach.State.__init__(self, outcomes=["success"]) self.default = default def execute(self, userdata): self.default.voice.speak("A final update - I am starting Phase 3.") - res = self.default.controllers.base_controller.sync_to_pose(get_pose_from_param('/wait/pose')) + res = self.default.controllers.base_controller.sync_to_pose( + get_pose_from_param("/wait/pose") + ) # self.default.datahub_start_phase.publish(Int16(3)) - return 'success' \ No newline at end of file + return "success" diff --git a/tasks/lift/src/lift/sm.py b/tasks/lift/src/lift/sm.py index df272f32e..1be68d213 100755 --- a/tasks/lift/src/lift/sm.py +++ b/tasks/lift/src/lift/sm.py @@ -3,12 +3,19 @@ from lift.phases import Phase1, Phase2, Phase3 from lift.default import Default + class Lift(smach.StateMachine): def __init__(self): - smach.StateMachine.__init__(self, outcomes=['success']) + smach.StateMachine.__init__(self, outcomes=["success"]) self.default = Default() with self: - smach.StateMachine.add('PHASE1', Phase1(self.default), transitions={'success' : 'PHASE2'}) - smach.StateMachine.add('PHASE2', Phase2(self.default), transitions={'success' : 'PHASE3'}) - smach.StateMachine.add('PHASE3', Phase3(self.default), transitions={'success' : 'success'}) \ No newline at end of file + smach.StateMachine.add( + "PHASE1", Phase1(self.default), transitions={"success": "PHASE2"} + ) + smach.StateMachine.add( + "PHASE2", Phase2(self.default), transitions={"success": "PHASE3"} + ) + smach.StateMachine.add( + "PHASE3", Phase3(self.default), transitions={"success": "success"} + ) diff --git a/tasks/lift/src/lift/speech_helper.py b/tasks/lift/src/lift/speech_helper.py index 7d049fe0f..f2579e14c 100755 --- a/tasks/lift/src/lift/speech_helper.py +++ b/tasks/lift/src/lift/speech_helper.py @@ -15,7 +15,7 @@ def listen(default): def affirm(default): resp = listen(default) - if resp['intent']['name'] != 'affirm': + if resp["intent"]["name"] != "affirm": default.voice.speak("Sorry, I didn't get that, please say yes or no") return affirm(default) choices = resp["entities"].get("choice", None) @@ -39,14 +39,15 @@ def hear_wait(default): else: return True else: - return False def get_people_number(default): resp = listen(default) if resp["intent"]["name"] != "negotiate_lift": - default.voice.speak("Sorry, I misheard you, could you say again how many people?") + default.voice.speak( + "Sorry, I misheard you, could you say again how many people?" + ) return get_people_number(default) people = resp["entities"].get("people", []) if not people: @@ -68,11 +69,13 @@ def get_floor(default): return get_floor() floor_number = int(floor[0]["value"]) default.voice.speak("I heard that we are on floor {}".format(floor_number)) - default.voice.speak("Is this correct? Please answer yes, that is correct or no, that is wrong") + default.voice.speak( + "Is this correct? Please answer yes, that is correct or no, that is wrong" + ) answer = affirm(default) if answer == "yes": default.voice.speak("Cool stuff!") return floor_number else: - return 'failed' + return "failed" diff --git a/tasks/receptionist/setup.py b/tasks/receptionist/setup.py index 93df63a41..ee0a8ad5d 100644 --- a/tasks/receptionist/setup.py +++ b/tasks/receptionist/setup.py @@ -5,8 +5,7 @@ # fetch values from package.xml setup_args = generate_distutils_setup( - packages=['receptionist'], - package_dir={'': 'src'} + packages=["receptionist"], package_dir={"": "src"} ) -setup(**setup_args) \ No newline at end of file +setup(**setup_args) diff --git a/tasks/receptionist/src/receptionist/state_machine.py b/tasks/receptionist/src/receptionist/state_machine.py index 05e038bfb..8ab24d0d2 100755 --- a/tasks/receptionist/src/receptionist/state_machine.py +++ b/tasks/receptionist/src/receptionist/state_machine.py @@ -12,7 +12,6 @@ class Receptionist(smach.StateMachine): - def __init__( self, wait_pose: Pose, @@ -21,11 +20,9 @@ def __init__( seat_area: Polygon, host_data: dict, ): - smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"]) with self: - self.userdata.guest_data = {"host": host_data, "guest1": {}, "guest2": {}} smach.StateMachine.add( diff --git a/tasks/receptionist/src/receptionist/states/seat_guest.py b/tasks/receptionist/src/receptionist/states/seat_guest.py index 4090ea318..11da964fe 100755 --- a/tasks/receptionist/src/receptionist/states/seat_guest.py +++ b/tasks/receptionist/src/receptionist/states/seat_guest.py @@ -9,11 +9,9 @@ class SeatGuest(smach.StateMachine): - _motions: List[str] = ["look_down_left", "look_down_right", "look_down_centre"] class ProcessDetections(smach.State): - def __init__(self): smach.State.__init__( self, @@ -42,7 +40,6 @@ def execute(self, userdata) -> str: ) for seat in seat_detections: - seat_polygon: Polygon = Polygon(np.array(seat.xyseg).reshape(-1, 2)) seat_is_empty: bool = True for person_polygon in person_polygons: @@ -64,7 +61,6 @@ def __init__( ): smach.StateMachine.__init__(self, outcomes=["succeeded", "failed"]) with self: - # TODO: stop doing this self.userdata.people_detections = [] self.userdata.seat_detections = [] @@ -79,7 +75,6 @@ def __init__( ) with motion_iterator: - container_sm = smach.StateMachine( outcomes=["succeeded", "failed", "continue"], input_keys=["motion", "people_detections", "seat_detections"],