Skip to content

Commit

Permalink
Using consistent torch.int32 when casting in YOLOTransform
Browse files Browse the repository at this point in the history
  • Loading branch information
zhiqwang committed Feb 2, 2022
1 parent 36ba3aa commit 0338fb8
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions yolort/models/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,10 +169,10 @@ def _onnx_batch_images(self, images: List[Tensor]) -> Tensor:
max_size = []
for i in range(1, images[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32))
max_size.append(max_size_i.to(torch.int64))
max_size.append(max_size_i.to(torch.int32))
stride = self.size_divisible
max_size[0] = (torch.ceil((max_size[0].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64)
max_size[0] = (torch.ceil((max_size[0].to(torch.float32)) / stride) * stride).to(torch.int32)
max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int32)

# work around for
# batched_imgs[i, :channel, dh : dh + img_h, dw : dw + img_w].copy_(img)
Expand Down

0 comments on commit 0338fb8

Please sign in to comment.