Skip to content

Commit

Permalink
Message Change
Browse files Browse the repository at this point in the history
  • Loading branch information
Benteng Ma committed Dec 6, 2023
1 parent 5fe6a93 commit 768ad46
Show file tree
Hide file tree
Showing 6 changed files with 124 additions and 2 deletions.
1 change: 1 addition & 0 deletions common/helpers/numpy2message/doc/usage.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
To send numpy arrays with messages.
19 changes: 19 additions & 0 deletions common/helpers/numpy2message/src/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import numpy as np


def numpy2message(np_array: np.ndarray) -> list[bytes, list[int], str]:
data = np_array.tobytes()
shape = list(np_array.shape)
dtype = str(np_array.dtype)
return data, shape, dtype


def message2numpy(data:bytes, shape:list[int], dtype:str) -> np.ndarray:
array_shape = tuple(shape)
array_dtype = np.dtype(dtype)

deserialized_array = np.frombuffer(data, dtype=array_dtype)
deserialized_array = deserialized_array.reshape(array_shape)

return deserialized_array

Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
# Image to run inference on
sensor_msgs/Image image_raw

uint8[] head_mask_data # For serialized array data
uint32[] head_mask_shape # To store the shape of the array
string head_mask_dtype # Data type of the array elements

uint8[] torso_mask_data
uint32[] torso_mask_shape
string torso_mask_dtype
---

# Detection result
Expand Down
62 changes: 61 additions & 1 deletion common/vision/lasr_vision_torch/nodes/service
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,27 @@ from lasr_vision_msgs.msg import FeatureWithColour, ColourPrediction
from colour_estimation import closest_colours, RGB_COLOURS, RGB_HAIR_COLOURS
from cv2_img import msg_to_cv2_img
from torch_module.helpers import binary_erosion_dilation, median_color_float
from numpy2message import message2numpy

import numpy as np
import torch
import rospy
import lasr_vision_torch


model = lasr_vision_torch.load_face_classifier_model()


def detect(request: TorchFaceFeatureDetectionRequest) -> TorchFaceFeatureDetectionResponse:
# decode the image
rospy.loginfo('Decoding')
frame = msg_to_cv2_img(request.image_raw)
torso_mask_data, torso_mask_shape, torso_mask_dtype = request.torso_mask_data, request.torso_mask_shape, request.torso_mask_dtype
head_mask_data, head_mask_shape, head_mask_dtype = request.head_mask_data, request.head_mask_shape, request.head_mask_dtype
torsal_mask = message2numpy(torso_mask_data, torso_mask_shape, torso_mask_dtype)
head_mask = message2numpy(head_mask_data, head_mask_shape, head_mask_dtype)
print(torso_mask_shape)
print(head_mask_shape)

# 'hair', 'hat', 'glasses', 'face'
input_image = torch.from_numpy(frame).permute(2, 0, 1).unsqueeze(0).float()
Expand Down Expand Up @@ -62,7 +70,59 @@ def detect(request: TorchFaceFeatureDetectionRequest) -> TorchFaceFeatureDetecti
]

return response
#test test


# def detect(request: TorchFaceFeatureDetectionRequest) -> TorchFaceFeatureDetectionResponse:
# # decode the image
# rospy.loginfo('Decoding')
# frame = msg_to_cv2_img(request.image_raw)

# # 'hair', 'hat', 'glasses', 'face'
# input_image = torch.from_numpy(frame).permute(2, 0, 1).unsqueeze(0).float()
# input_image /= 255.0
# masks_batch_pred, pred_classes = model(input_image)

# thresholds_mask = [
# 0.5, 0.75, 0.25, 0.5, # 0.5, 0.5, 0.5, 0.5,
# ]
# thresholds_pred = [
# 0.6, 0.8, 0.1, 0.5,
# ]
# erosion_iterations = 1
# dilation_iterations = 1
# categories = ['hair', 'hat', 'glasses', 'face',]

# masks_batch_pred = binary_erosion_dilation(
# masks_batch_pred, thresholds=thresholds_mask,
# erosion_iterations=erosion_iterations, dilation_iterations=dilation_iterations
# )

# median_colours = (median_color_float(
# input_image, masks_batch_pred).detach().squeeze(0)*255).numpy().astype(np.uint8)

# # discarded: masks = masks_batch_pred.detach().squeeze(0).numpy().astype(np.uint8)
# # discarded: mask_list = [masks[i,:,:] for i in range(masks.shape[0])]

# pred_classes = pred_classes.detach().squeeze(0).numpy()
# # discarded: class_list = [categories[i] for i in range(
# # pred_classes.shape[0]) if pred_classes[i].item() > thresholds_pred[i]]
# colour_list = [median_colours[i, :]
# for i in range(median_colours.shape[0])]

# response = TorchFaceFeatureDetectionResponse()
# response.detected_features = [
# FeatureWithColour(categories[i], [
# ColourPrediction(colour, distance)
# for colour, distance
# in closest_colours(colour_list[i], RGB_HAIR_COLOURS if categories[i] == 'hair' else RGB_COLOURS)
# ])
# for i
# in range(pred_classes.shape[0])
# if pred_classes[i].item() > thresholds_pred[i]
# ]

# return response
# test test

rospy.init_node('torch_service')
rospy.Service('/torch/detect/face_features', TorchFaceFeatureDetection, detect)
Expand Down
2 changes: 1 addition & 1 deletion skills/scripts/unit_test_describe_people.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,6 @@

sm.execute()

print('\n\nDetected people:', sm.userdata['people'])
# print('\n\nDetected people:', sm.userdata['people'])

rospy.signal_shutdown("down")
35 changes: 35 additions & 0 deletions skills/src/lasr_skills/describe_people.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from colour_estimation import closest_colours, RGB_COLOURS
from lasr_vision_msgs.msg import BodyPixMaskRequest, ColourPrediction, FeatureWithColour
from lasr_vision_msgs.srv import YoloDetection, BodyPixDetection, TorchFaceFeatureDetection
from numpy2message import numpy2message

from .vision import GetImage, ImageMsgToCv2

Expand Down Expand Up @@ -136,6 +137,40 @@ def execute(self, userdata):
# keep track
features = []

# process part masks
for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']):
part_mask = np.array(bodypix_mask.mask).reshape(
bodypix_mask.shape[0], bodypix_mask.shape[1])

# filter out part for current person segmentation
try:
part_mask[mask_bin == 0] = 0
except Exception:
rospy.logdebug('|> Failed to check {part} is visible')
continue

if part_mask.any():
rospy.logdebug(f'|> Person has {part} visible')
else:
rospy.logdebug(
f'|> Person does not have {part} visible')
continue

if part == 'torso':
torso_mask = part_mask
elif part == 'head':
head_mask = part_mask

torso_mask_data, torso_mask_shape, torso_mask_dtype = numpy2message(torso_mask)
head_mask_data, head_mask_shape, head_mask_dtype = numpy2message(head_mask)

full_frame = cv2_img.cv2_img_to_msg(face_region)
features.extend(self.torch_face_features(
full_frame,
torso_mask_data, torso_mask_shape, torso_mask_dtype,
head_mask_data, head_mask_shape, head_mask_dtype,
).detected_features)

# process part masks
for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']):
part_mask = np.array(bodypix_mask.mask).reshape(
Expand Down

0 comments on commit 768ad46

Please sign in to comment.