Skip to content

Commit

Permalink
[Community Pipelines] add textual inversion support for stable_diffus…
Browse files Browse the repository at this point in the history
…ion_ipex (huggingface#5571)
  • Loading branch information
miaojinc authored and Jimmy committed Apr 26, 2024
1 parent 4dbd8b2 commit 3e50f5f
Showing 1 changed file with 10 additions and 1 deletion.
11 changes: 10 additions & 1 deletion examples/community/stable_diffusion_ipex.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer

from diffusers.configuration_utils import FrozenDict
from diffusers.loaders import TextualInversionLoaderMixin
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
Expand Down Expand Up @@ -61,7 +62,7 @@
"""


class StableDiffusionIPEXPipeline(DiffusionPipeline):
class StableDiffusionIPEXPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
r"""
Pipeline for text-to-image generation using Stable Diffusion on IPEX.
Expand Down Expand Up @@ -454,6 +455,10 @@ def _encode_prompt(
batch_size = prompt_embeds.shape[0]

if prompt_embeds is None:
# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)

text_inputs = self.tokenizer(
prompt,
padding="max_length",
Expand Down Expand Up @@ -514,6 +519,10 @@ def _encode_prompt(
else:
uncond_tokens = negative_prompt

# textual inversion: procecss multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)

max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
Expand Down

0 comments on commit 3e50f5f

Please sign in to comment.