Skip to content

Commit

Permalink
cleaned usused comments
Browse files Browse the repository at this point in the history
  • Loading branch information
Benteng Ma committed Mar 11, 2024
1 parent d43e5ab commit feb4e80
Showing 1 changed file with 0 additions and 64 deletions.
64 changes: 0 additions & 64 deletions common/vision/lasr_vision_feature_extraction/nodes/service
Original file line number Diff line number Diff line change
Expand Up @@ -29,73 +29,9 @@ def detect(request: TorchFaceFeatureDetectionDescriptionRequest) -> TorchFaceFea

response = TorchFaceFeatureDetectionDescriptionResponse()
response.description = rst_str
# response.detected_features = str(class_pred) + str(colour_pred)
# response.detected_features = []
# for c in ['hair', 'hat', 'glasses', 'cloth',]:
# # colour_pred[c] = {k: v[0] for k, v in colour_pred[c].items()}
# sorted_list = sorted(colour_pred[c].items(), key=lambda item: item[1], reverse=True)
# # rospy.loginfo(str(sorted_list))
# if len(sorted_list) > 3:
# sorted_list = sorted_list[0:3]
# sorted_list = [k for k, v in sorted_list]
# # rospy.loginfo(str(colour_pred[c]))
# response.detected_features.append(FeatureWithColour(c, class_pred[c], sorted_list))
return response


# def detect(request: TorchFaceFeatureDetectionRequest) -> TorchFaceFeatureDetectionResponse:
# # decode the image
# rospy.loginfo('Decoding')
# frame = msg_to_cv2_img(request.image_raw)

# # 'hair', 'hat', 'glasses', 'face'
# input_image = torch.from_numpy(frame).permute(2, 0, 1).unsqueeze(0).float()
# input_image /= 255.0
# masks_batch_pred, pred_classes = lasr_vision_feature_extraction.model(input_image)

# thresholds_mask = [
# 0.5, 0.75, 0.25, 0.5, # 0.5, 0.5, 0.5, 0.5,
# ]
# thresholds_pred = [
# 0.6, 0.8, 0.1, 0.5,
# ]
# erosion_iterations = 1
# dilation_iterations = 1
# categories = ['hair', 'hat', 'glasses', 'face',]

# masks_batch_pred = binary_erosion_dilation(
# masks_batch_pred, thresholds=thresholds_mask,
# erosion_iterations=erosion_iterations, dilation_iterations=dilation_iterations
# )

# median_colours = (median_color_float(
# input_image, masks_batch_pred).detach().squeeze(0)*255).numpy().astype(np.uint8)

# # discarded: masks = masks_batch_pred.detach().squeeze(0).numpy().astype(np.uint8)
# # discarded: mask_list = [masks[i,:,:] for i in range(masks.shape[0])]

# pred_classes = pred_classes.detach().squeeze(0).numpy()
# # discarded: class_list = [categories[i] for i in range(
# # pred_classes.shape[0]) if pred_classes[i].item() > thresholds_pred[i]]
# colour_list = [median_colours[i, :]
# for i in range(median_colours.shape[0])]

# response = TorchFaceFeatureDetectionResponse()
# # response.detected_features = [
# # FeatureWithColour(categories[i], [
# # ColourPrediction(colour, distance)
# # for colour, distance
# # in closest_colours(colour_list[i], HAIR_COLOURS if categories[i] == 'hair' else COLOURS)
# # ])
# # for i
# # in range(pred_classes.shape[0])
# # if pred_classes[i].item() > thresholds_pred[i]
# # ]
# response.detected_features = "feature"

# return response
# test test

rospy.init_node('torch_service')
rospy.Service('/torch/detect/face_features', TorchFaceFeatureDetectionDescription, detect)
rospy.loginfo('Torch service started')
Expand Down

0 comments on commit feb4e80

Please sign in to comment.