From dd8cb2b9cfc8a229aff4857e98b2a1f30bc0273d Mon Sep 17 00:00:00 2001 From: stevhliu Date: Tue, 1 Jul 2025 12:41:55 -0700 Subject: [PATCH 1/4] draft --- .../en/using-diffusers/other-formats.md | 52 ++++++++++--------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/docs/source/en/using-diffusers/other-formats.md b/docs/source/en/using-diffusers/other-formats.md index df3df92f0693..1b271cc4a5cc 100644 --- a/docs/source/en/using-diffusers/other-formats.md +++ b/docs/source/en/using-diffusers/other-formats.md @@ -70,40 +70,44 @@ pipeline = StableDiffusionPipeline.from_single_file( -#### LoRA files +#### LoRAs -[LoRA](https://hf.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) is a lightweight adapter that is fast and easy to train, making them especially popular for generating images in a certain way or style. These adapters are commonly stored in a safetensors file, and are widely popular on model sharing platforms like [civitai](https://civitai.com/). - -LoRAs are loaded into a base model with the [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] method. +[LoRAs](../tutorials/using_peft_for_inference) are lightweight checkpoints fine-tuned to generate images or video in a specific style. If you are using a checkpoint trained with a Diffusers training script, the LoRA configuration is automatically saved as metadata in a safetensors file. When the safetensors file is loaded, the metadata is parsed to correctly configure the LoRA and avoids missing or incorrect LoRA configurations. ```py -from diffusers import StableDiffusionXLPipeline import torch +from diffusers import FluxPipeline -# base model -pipeline = StableDiffusionXLPipeline.from_pretrained( - "Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16" +pipeline = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 ).to("cuda") +pipeline.load_lora_weights("linoyts/yarn_art_Flux_LoRA") -# download LoRA weights -!wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors - -# load LoRA weights -pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors") -prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop" -negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" +network_alphas, metadata = FluxPipeline.lora_state_dict( + "linoyts/yarn_art_Flux_LoRA", + return_lora_metadata=True +) -image = pipeline( - prompt=prompt, - negative_prompt=negative_prompt, - generator=torch.manual_seed(0), -).images[0] -image +print("LoRA metadata:") +for key, value in metadata.items(): + print(f" {key}: {value}") ``` -
- -
+For LoRAs that aren't trained with Diffusers, you can still inject the metadata as long as it is a safetensors file. + +```py +import torch +from diffusers import FluxPipeline + +pipeline = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 +).to("cuda") +pipeline.load_lora_weights("linoyts/yarn_art_Flux_LoRA") +pipeline.save_lora_weights( + transformer_lora_adapter_metadata={"r": 16, "lora_alpha": 16}, + text_encoder_lora_adapter_metadata={"r": 8, "lora_alpha": 8} +) +``` ### ckpt From b188ca581aafc748b3943970f205c4aa228ccfba Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 2 Jul 2025 14:47:48 -0700 Subject: [PATCH 2/4] hub image --- .../en/using-diffusers/other-formats.md | 23 ++++--------------- 1 file changed, 5 insertions(+), 18 deletions(-) diff --git a/docs/source/en/using-diffusers/other-formats.md b/docs/source/en/using-diffusers/other-formats.md index 1b271cc4a5cc..309c9a34b934 100644 --- a/docs/source/en/using-diffusers/other-formats.md +++ b/docs/source/en/using-diffusers/other-formats.md @@ -74,26 +74,13 @@ pipeline = StableDiffusionPipeline.from_single_file( [LoRAs](../tutorials/using_peft_for_inference) are lightweight checkpoints fine-tuned to generate images or video in a specific style. If you are using a checkpoint trained with a Diffusers training script, the LoRA configuration is automatically saved as metadata in a safetensors file. When the safetensors file is loaded, the metadata is parsed to correctly configure the LoRA and avoids missing or incorrect LoRA configurations. -```py -import torch -from diffusers import FluxPipeline +The easiest way to inspect the metadata, if available, is by clicking on the Safetensors logo next to the weights. -pipeline = FluxPipeline.from_pretrained( - "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 -).to("cuda") -pipeline.load_lora_weights("linoyts/yarn_art_Flux_LoRA") - -network_alphas, metadata = FluxPipeline.lora_state_dict( - "linoyts/yarn_art_Flux_LoRA", - return_lora_metadata=True -) - -print("LoRA metadata:") -for key, value in metadata.items(): - print(f" {key}: {value}") -``` +
+ +
-For LoRAs that aren't trained with Diffusers, you can still inject the metadata as long as it is a safetensors file. +For LoRAs that aren't trained with Diffusers, you can still save the metadata as long as it is a safetensors file. ```py import torch From 22c13d46314b6942d30bca542a5acd92bd37948a Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 2 Jul 2025 15:03:40 -0700 Subject: [PATCH 3/4] update --- docs/source/en/using-diffusers/other-formats.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/using-diffusers/other-formats.md b/docs/source/en/using-diffusers/other-formats.md index 309c9a34b934..29f5f37009b4 100644 --- a/docs/source/en/using-diffusers/other-formats.md +++ b/docs/source/en/using-diffusers/other-formats.md @@ -80,7 +80,7 @@ The easiest way to inspect the metadata, if available, is by clicking on the Saf -For LoRAs that aren't trained with Diffusers, you can still save the metadata as long as it is a safetensors file. +For LoRAs that aren't trained with Diffusers, you can still save metadata with the `transformer_lora_adapter_metadata` and `text_encoder_lora_adapter_metadata` arguments in [`~FluxLoraLoaderMixin.save_lora_weights`] as long as it is a safetensors file. ```py import torch From 428a3d5e6a81206135f45092149d3d97e9c7bf3f Mon Sep 17 00:00:00 2001 From: stevhliu Date: Wed, 2 Jul 2025 15:15:15 -0700 Subject: [PATCH 4/4] fix --- docs/source/en/using-diffusers/other-formats.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/using-diffusers/other-formats.md b/docs/source/en/using-diffusers/other-formats.md index 29f5f37009b4..11afbf29d3f2 100644 --- a/docs/source/en/using-diffusers/other-formats.md +++ b/docs/source/en/using-diffusers/other-formats.md @@ -80,7 +80,7 @@ The easiest way to inspect the metadata, if available, is by clicking on the Saf -For LoRAs that aren't trained with Diffusers, you can still save metadata with the `transformer_lora_adapter_metadata` and `text_encoder_lora_adapter_metadata` arguments in [`~FluxLoraLoaderMixin.save_lora_weights`] as long as it is a safetensors file. +For LoRAs that aren't trained with Diffusers, you can still save metadata with the `transformer_lora_adapter_metadata` and `text_encoder_lora_adapter_metadata` arguments in [`~loaders.FluxLoraLoaderMixin.save_lora_weights`] as long as it is a safetensors file. ```py import torch