Skip to content

Commit

Permalink
fixed issues in the service
Browse files Browse the repository at this point in the history
  • Loading branch information
Benteng Ma committed Feb 23, 2024
1 parent aa8ec6e commit c65ccd1
Show file tree
Hide file tree
Showing 4 changed files with 142 additions and 143 deletions.
1 change: 0 additions & 1 deletion common/vision/lasr_vision_msgs/msg/Description.msg

This file was deleted.

6 changes: 3 additions & 3 deletions common/vision/lasr_vision_torch/nodes/service
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from lasr_vision_msgs.srv import TorchFaceFeatureDetection, TorchFaceFeatureDetectionRequest, TorchFaceFeatureDetectionResponse, TorchFaceFeatureDetectionDescriptionRequest, TorchFaceFeatureDetectionDescriptionResponse
from lasr_vision_msgs.srv import TorchFaceFeatureDetection, TorchFaceFeatureDetectionRequest, TorchFaceFeatureDetectionResponse, TorchFaceFeatureDetectionDescription, TorchFaceFeatureDetectionDescriptionRequest, TorchFaceFeatureDetectionDescriptionResponse
from lasr_vision_msgs.msg import FeatureWithColour, ColourPrediction
from cv2_img import msg_to_cv2_img
from torch_module.helpers import binary_erosion_dilation, median_color_float
Expand All @@ -25,7 +25,7 @@ def detect(request: TorchFaceFeatureDetectionDescriptionRequest) -> TorchFaceFea
torso_frame = lasr_vision_torch.extract_mask_region(full_frame, torso_mask.astype(np.uint8), expand_x=0.2, expand_y=0.0)

# class_pred, colour_pred = lasr_vision_torch.predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask, lasr_vision_torch.model, lasr_vision_torch.thresholds_mask, lasr_vision_torch.erosion_iterations, lasr_vision_torch.dilation_iterations, lasr_vision_torch.thresholds_pred)
rst_str = lasr_vision_torch.predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask, lasr_vision_torch.model, lasr_vision_torch.thresholds_mask, lasr_vision_torch.erosion_iterations, lasr_vision_torch.dilation_iterations, lasr_vision_torch.thresholds_pred)
rst_str = lasr_vision_torch.predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask,)

response = TorchFaceFeatureDetectionDescriptionRequest()
response.description = rst_str
Expand Down Expand Up @@ -97,6 +97,6 @@ def detect(request: TorchFaceFeatureDetectionDescriptionRequest) -> TorchFaceFea
# test test

rospy.init_node('torch_service')
rospy.Service('/torch/detect/face_features', TorchFaceFeatureDetection, detect)
rospy.Service('/torch/detect/face_features', TorchFaceFeatureDetectionDescription, detect)
rospy.loginfo('Torch service started')
rospy.spin()
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def extract_mask_region(frame, mask, expand_x=0.5, expand_y=0.5):
p = Predictor(model, torch.device('cpu'), CelebAMaskHQCategoriesAndAttributes)


def predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask, model, thresholds_mask, erosion_iterations, dilation_iterations, thresholds_pred):
def predict_frame(head_frame, torso_frame, full_frame, head_mask, torso_mask,):
full_frame = cv2.cvtColor(full_frame, cv2.COLOR_BGR2RGB)
head_frame = cv2.cvtColor(head_frame, cv2.COLOR_BGR2RGB)
torso_frame = cv2.cvtColor(torso_frame, cv2.COLOR_BGR2RGB)
Expand Down
276 changes: 138 additions & 138 deletions skills/src/lasr_skills/describe_people.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def __init__(self):
'succeeded': 'FEATURE_EXTRACTION'})
smach.StateMachine.add('FEATURE_EXTRACTION', self.FeatureExtraction(), transitions={
'succeeded': 'succeeded'})

class SegmentYolo(smach.State):
'''
Segment using YOLO
Expand Down Expand Up @@ -140,7 +140,7 @@ def execute(self, userdata):
rospy.loginfo("COORD_XY:::%s" % str(neck_coord))
xyz = userdata.xyz
# xyz = np.nanmean(xyz, axis=2)
rospy.loginfo("COORD_Z:::%s" % str(xyz[neck_coord[0]][neck_coord[1]]))
# rospy.loginfo("COORD_Z:::%s" % str(xyz[neck_coord[0]][neck_coord[1]]))
# point_head_client(xyz, neck_coord[0], neck_coord[1], client)
return 'succeeded'
except rospy.ServiceException as e:
Expand All @@ -161,140 +161,140 @@ def __init__(self):
'/torch/detect/face_features', TorchFaceFeatureDetectionDescription)

def execute(self, userdata):
try:
if len(userdata.people_detections) == 0:
rospy.logerr("Couldn't find anyone!")
return 'failed'
elif len(userdata.people_detections) == 1:
rospy.logdebug("There is one person.")
else:
rospy.logdebug(
f"There are {len(userdata.people_detections)} people.")

img = userdata.img
height, width, _ = img.shape

people = []

for person in userdata.people_detections:
rospy.logdebug(
f"\n\nFound person with confidence {person.confidence}!")

# mask for this person
mask_image = np.zeros((height, width), np.uint8)
contours = np.array(person.xyseg).reshape(-1, 2)
cv2.fillPoly(mask_image, pts=np.int32(
[contours]), color=(255, 255, 255))
mask_bin = mask_image > 128

# # keep track
# features = []

# process part masks
for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']):
part_mask = np.array(bodypix_mask.mask).reshape(
bodypix_mask.shape[0], bodypix_mask.shape[1])

# filter out part for current person segmentation
try:
part_mask[mask_bin == 0] = 0
except Exception:
rospy.logdebug('|> Failed to check {part} is visible')
continue

if part_mask.any():
rospy.logdebug(f'|> Person has {part} visible')
else:
rospy.logdebug(
f'|> Person does not have {part} visible')
continue

if part == 'torso':
torso_mask = part_mask
elif part == 'head':
head_mask = part_mask

torso_mask_data, torso_mask_shape, torso_mask_dtype = numpy2message(torso_mask)
head_mask_data, head_mask_shape, head_mask_dtype = numpy2message(head_mask)

full_frame = cv2_img.cv2_img_to_msg(img)
# features.extend(self.torch_face_features(
# full_frame,
# head_mask_data, head_mask_shape, head_mask_dtype,
# torso_mask_data, torso_mask_shape, torso_mask_dtype,
# ).detected_features)

rst = self.torch_face_features(
full_frame,
head_mask_data, head_mask_shape, head_mask_dtype,
torso_mask_data, torso_mask_shape, torso_mask_dtype,
).description

# # process part masks
# for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']):
# part_mask = np.array(bodypix_mask.mask).reshape(
# bodypix_mask.shape[0], bodypix_mask.shape[1])

# # filter out part for current person segmentation
# try:
# part_mask[mask_bin == 0] = 0
# except Exception:
# rospy.logdebug('|> Failed to check {part} is visible')
# continue

# if part_mask.any():
# rospy.logdebug(f'|> Person has {part} visible')
# else:
# rospy.logdebug(
# f'|> Person does not have {part} visible')
# continue

# # do colour processing on the torso
# if part == 'torso':
# try:
# features.append(FeatureWithColour("torso", [
# ColourPrediction(colour, distance)
# for colour, distance
# in closest_colours(np.median(img[part_mask == 1], axis=0), RGB_COLOURS)
# ]))
# except Exception as e:
# rospy.logerr(f"Failed to process colour: {e}")

# # do feature extraction on the head
# if part == 'head':
# try:
# # crop out face
# face_mask = np.array(userdata.bodypix_masks[1].mask).reshape(
# userdata.bodypix_masks[1].shape[0], userdata.bodypix_masks[1].shape[1])

# mask_image_only_face = mask_image.copy()
# mask_image_only_face[face_mask == 0] = 0

# face_region = cv2_img.extract_mask_region(
# img, mask_image_only_face)
# if face_region is None:
# raise Exception(
# "Failed to extract mask region")

# msg = cv2_img.cv2_img_to_msg(face_region)
# features.extend(self.torch_face_features(
# msg, False).detected_features)
# except Exception as e:
# rospy.logerr(f"Failed to process extraction: {e}")

people.append({
'detection': person,
'features': rst
})

# Userdata:
# - people
# - - detection (YOLO)
# - parts
# - - part
# - mask

userdata['people'] = people
except Exception:
#try:
if len(userdata.people_detections) == 0:
rospy.logerr("Couldn't find anyone!")
return 'failed'
return 'succeeded'
elif len(userdata.people_detections) == 1:
rospy.logdebug("There is one person.")
else:
rospy.logdebug(
f"There are {len(userdata.people_detections)} people.")

img = userdata.img
height, width, _ = img.shape

people = []

for person in userdata.people_detections:
rospy.logdebug(
f"\n\nFound person with confidence {person.confidence}!")

# mask for this person
mask_image = np.zeros((height, width), np.uint8)
contours = np.array(person.xyseg).reshape(-1, 2)
cv2.fillPoly(mask_image, pts=np.int32(
[contours]), color=(255, 255, 255))
mask_bin = mask_image > 128

# # keep track
# features = []

# process part masks
for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']):
part_mask = np.array(bodypix_mask.mask).reshape(
bodypix_mask.shape[0], bodypix_mask.shape[1])

# filter out part for current person segmentation
try:
part_mask[mask_bin == 0] = 0
except Exception:
rospy.logdebug('|> Failed to check {part} is visible')
continue

if part_mask.any():
rospy.logdebug(f'|> Person has {part} visible')
else:
rospy.logdebug(
f'|> Person does not have {part} visible')
continue

if part == 'torso':
torso_mask = part_mask
elif part == 'head':
head_mask = part_mask

torso_mask_data, torso_mask_shape, torso_mask_dtype = numpy2message(torso_mask)
head_mask_data, head_mask_shape, head_mask_dtype = numpy2message(head_mask)

full_frame = cv2_img.cv2_img_to_msg(img)
# features.extend(self.torch_face_features(
# full_frame,
# head_mask_data, head_mask_shape, head_mask_dtype,
# torso_mask_data, torso_mask_shape, torso_mask_dtype,
# ).detected_features)

rst = self.torch_face_features(
full_frame,
head_mask_data, head_mask_shape, head_mask_dtype,
torso_mask_data, torso_mask_shape, torso_mask_dtype,
).description

# # process part masks
# for (bodypix_mask, part) in zip(userdata.bodypix_masks, ['torso', 'head']):
# part_mask = np.array(bodypix_mask.mask).reshape(
# bodypix_mask.shape[0], bodypix_mask.shape[1])

# # filter out part for current person segmentation
# try:
# part_mask[mask_bin == 0] = 0
# except Exception:
# rospy.logdebug('|> Failed to check {part} is visible')
# continue

# if part_mask.any():
# rospy.logdebug(f'|> Person has {part} visible')
# else:
# rospy.logdebug(
# f'|> Person does not have {part} visible')
# continue

# # do colour processing on the torso
# if part == 'torso':
# try:
# features.append(FeatureWithColour("torso", [
# ColourPrediction(colour, distance)
# for colour, distance
# in closest_colours(np.median(img[part_mask == 1], axis=0), RGB_COLOURS)
# ]))
# except Exception as e:
# rospy.logerr(f"Failed to process colour: {e}")

# # do feature extraction on the head
# if part == 'head':
# try:
# # crop out face
# face_mask = np.array(userdata.bodypix_masks[1].mask).reshape(
# userdata.bodypix_masks[1].shape[0], userdata.bodypix_masks[1].shape[1])

# mask_image_only_face = mask_image.copy()
# mask_image_only_face[face_mask == 0] = 0

# face_region = cv2_img.extract_mask_region(
# img, mask_image_only_face)
# if face_region is None:
# raise Exception(
# "Failed to extract mask region")

# msg = cv2_img.cv2_img_to_msg(face_region)
# features.extend(self.torch_face_features(
# msg, False).detected_features)
# except Exception as e:
# rospy.logerr(f"Failed to process extraction: {e}")

people.append({
'detection': person,
'features': rst
})

# Userdata:
# - people
# - - detection (YOLO)
# - parts
# - - part
# - mask

userdata['people'] = people
# except Exception:
# return 'failed'
# return 'succeeded'

0 comments on commit c65ccd1

Please sign in to comment.