Skip to content

Commit

Permalink
Don't do ego compensation
Browse files Browse the repository at this point in the history
  • Loading branch information
holger-motional committed Jul 27, 2020
1 parent eaf012a commit d51f0be
Showing 1 changed file with 9 additions and 26 deletions.
35 changes: 9 additions & 26 deletions python-sdk/nuimages/nuimages.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,33 +522,16 @@ def get_depth(self,
pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
pc.translate(np.array(cs_record['translation']))

# Second step: transform from ego to the global frame.
poserecord = self.get('ego_pose', pointsensor['ego_pose_token'])
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
pc.translate(np.array(poserecord['translation']))

# TODO: DEBUG: Print time offset between cam and lidar
cam_name = cam['filename'].split('__')[1]
time_offset_ego = self.get('ego_pose', cam['ego_pose_token'])['timestamp'] - cam['timestamp']
time_offset_lidar = self.get('ego_pose', pointsensor['ego_pose_token'])['timestamp'] - cam['timestamp']
print('%s\t%f\t%f' % (cam_name, time_offset_ego, time_offset_lidar))

# Third step: transform from global into the ego vehicle frame for the timestamp of the image.
if False: # TODO: Debug only: Try mapping into a future ego pose
cur_cam = cam
target_time = int(cur_cam['timestamp'] + 2e5) # Move 200ms forward
cur_target_diff = np.abs(target_time - cur_cam['timestamp'])
poserecord = self.get('ego_pose', cur_cam['ego_pose_token'])
while cur_cam['next'] != '':
cur_cam = self.get('sample_data', cur_cam['next'])
new_target_diff = np.abs(target_time - cur_cam['timestamp'])
if new_target_diff < cur_target_diff:
poserecord = self.get('ego_pose', cur_cam['ego_pose_token'])
cur_target_diff = new_target_diff
else:
if False: # TODO: For debugging purposes, stay in the ego vehicle frame
# Second step: transform from ego to the global frame.
poserecord = self.get('ego_pose', pointsensor['ego_pose_token'])
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
pc.translate(np.array(poserecord['translation']))

# Third step: transform from global into the ego vehicle frame for the timestamp of the image.
poserecord = self.get('ego_pose', cam['ego_pose_token'])
pc.translate(-np.array(poserecord['translation']))
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)
pc.translate(-np.array(poserecord['translation']))
pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

# Fourth step: transform from ego into the camera.
cs_record = self.get('calibrated_sensor', cam['calibrated_sensor_token'])
Expand Down

0 comments on commit d51f0be

Please sign in to comment.