huggingface.co
Pythonスクリプト
import torch
from diffusers import FluxPipeline
import gc
def flush():
gc.collect()
torch.cuda.empty_cache()
model_id = "black-forest-labs/FLUX.1-dev"
prompt="a photo of f5h8_woman holding a sign that says 'I LOVE LoRA!"
pipeline = FluxPipeline.from_pretrained(
model_id,
transformer=None,
vae=None
).to("cuda")
with torch.no_grad():
prompt_embeds, pooled_prompt_embeds, text_ids = pipeline.encode_prompt(
prompt=prompt,
prompt_2=None,
)
del pipeline
flush()
pipeline = FluxPipeline.from_pretrained(
model_id,
text_encoder=None,
text_encoder_2=None,
tokenizer=None,
tokenizer_2=None,
torch_dtype=torch.bfloat16
)
pipeline.load_lora_weights("anzu-flux-LoRA_v22.safetensors")
pipeline.enable_sequential_cpu_offload()
seed = 12345
generator = torch.Generator().manual_seed(seed)
image = pipeline(
prompt_embeds=prompt_embeds.bfloat16(),
pooled_prompt_embeds=pooled_prompt_embeds.bfloat16(),
width=1024,
height=1024,
num_inference_steps=27,
generator=generator,
guidance_scale=3.5,
joint_attention_kwargs={"scale": 1.0},
).images[0]
image.save(f"lora_result_seed{seed}.jpg")
結果