From d238e5a02afdb49b30722721db6a2440376db7e2 Mon Sep 17 00:00:00 2001 From: Tanuj Rai Date: Tue, 27 May 2025 08:31:22 +0530 Subject: [PATCH 1/2] Update pipeline_flux_fill.py --- src/diffusers/pipelines/flux/pipeline_flux_fill.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/flux/pipeline_flux_fill.py b/src/diffusers/pipelines/flux/pipeline_flux_fill.py index 3c3e92c7d2a7..d1720312ccd4 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_fill.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_fill.py @@ -219,7 +219,8 @@ def __init__( tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, - scheduler=scheduler, + scheduler=scheduler + ip_adapter=ip_adapter, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible @@ -240,6 +241,14 @@ def __init__( ) self.default_sample_size = 128 + def load_ip_adapter(self, ip_adapter_path): + try: + self.ip_adapter = torch.load(ip_adapter_path) + print(f"[FluxFillPipeline] IP Adapter loaded from: {ip_adapter_path}") + except Exception as e: + print(f"[FluxFillPipeline] Failed to load IP Adapter: {str(e}") + raise + # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds def _get_t5_prompt_embeds( self, From 050cd10dbdf8efe5142b3f86545a628b7e419c79 Mon Sep 17 00:00:00 2001 From: Tanuj Rai Date: Sat, 14 Jun 2025 10:49:21 +0530 Subject: [PATCH 2/2] Update pipeline_flux_fill.py --- src/diffusers/pipelines/flux/pipeline_flux_fill.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/flux/pipeline_flux_fill.py b/src/diffusers/pipelines/flux/pipeline_flux_fill.py index d1720312ccd4..7d05ae2b383c 100644 --- a/src/diffusers/pipelines/flux/pipeline_flux_fill.py +++ b/src/diffusers/pipelines/flux/pipeline_flux_fill.py @@ -219,7 +219,7 @@ def __init__( tokenizer=tokenizer, tokenizer_2=tokenizer_2, transformer=transformer, - scheduler=scheduler + scheduler=scheduler, ip_adapter=ip_adapter, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8