From 3465edde47efa64b615aa12c9c6d6c659319b75a Mon Sep 17 00:00:00 2001 From: stano Date: Mon, 2 Oct 2023 19:42:32 +0300 Subject: [PATCH] Add docstring for the AutoencoderKL's decode (#5242) * Add docstring for the AutoencoderKL's decode #5230 * Follow the style guidelines in AutoencoderKL's decode #5230 --------- Co-authored-by: stano <> --- models/autoencoder_kl.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/models/autoencoder_kl.py b/models/autoencoder_kl.py index 21c8f64fd916..7e3b925df714 100644 --- a/models/autoencoder_kl.py +++ b/models/autoencoder_kl.py @@ -281,6 +281,20 @@ def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[Decod @apply_forward_hook def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + """ + Decode a batch of images. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + + """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices)