diff --git a/models/autoencoder_kl.py b/models/autoencoder_kl.py index 21c8f64fd916..7e3b925df714 100644 --- a/models/autoencoder_kl.py +++ b/models/autoencoder_kl.py @@ -281,6 +281,20 @@ def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[Decod @apply_forward_hook def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + """ + Decode a batch of images. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + + """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices)