【Diffusers】画像生成においてモデル間の比較が簡単にできるようにPythonスクリプトを書きました。

はじめに

目的はタイトルにある通りです。

今回はBeautiful Realistic Asians Version 5, 6, 7の3つを比べてみました。

結果

左から version 5→6→7 です。

並べてくれるところまで自動でできるようにスクリプトを書いています。






Pythonスクリプト

今回のスクリプトは汎用性があります。

プロンプトを固定してモデル間の比較をするなら非常に役に立つと思います。

from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
import torch
from compel import Compel, DiffusersTextualInversionManager
import os
import cv2
import numpy as np
import time

n_samples = 5
model_safetensors = {
    "version5": "beautifulRealistic_brav5.safetensors",
    "version6": "beautifulRealistic_v60.safetensors",
    "version7": "beautifulRealistic_v7.safetensors"
}

def txt2img(model_name:str) -> None:
    model_fname = model_safetensors[model_name]
    pipe = StableDiffusionPipeline.from_single_file(
        f"safetensors/{model_fname}",
        load_safety_checker=False,
        #extract_ema=True,
        torch_dtype=torch.float16
    )
    pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
    pipe.load_textual_inversion("embeddings/easynegative.safetensors", token="EasyNegative")
    pipe.to("cuda")
    #pipe.enable_model_cpu_offload()

    prompt = "(high resolution)++, 8k+, photorealistic+, attractive, highly detailed, photo of pretty Japanese woman, short hair"
    negative_prompt = "EasyNegative, (Worst Quality)++, (low quality)+"

    textual_inversion_manager = DiffusersTextualInversionManager(pipe)
    compel_proc = Compel(
        tokenizer=pipe.tokenizer,
        text_encoder=pipe.text_encoder,
        textual_inversion_manager=textual_inversion_manager,
        truncate_long_prompts=False)

    prompt_embeds = compel_proc([prompt])
    negative_prompt_embeds = compel_proc([negative_prompt])

    os.makedirs(model_name, exist_ok=True)

    for i in range(n_samples):
        seed = 1000000 * (i + 1) 
        generator = torch.manual_seed(seed)
        image = pipe(
            prompt_embeds=prompt_embeds,
            negative_prompt_embeds = negative_prompt_embeds,
            generator=generator,
            num_inference_steps=25,
            width=768,
            height=768,
        ).images[0]
        image.save(os.path.join(model_name, f"{i}.png"))

def stack() -> None:
    os.makedirs("stack_image", exist_ok=True)
    print(" -> ".join(model_safetensors.keys()))
    for i in range(n_samples):
        images_list = []
        for key in model_safetensors.keys():
            images_list.append(cv2.imread(os.path.join(key, f"{i}.png")))
        stack_image = np.hstack(images_list)
        cv2.imwrite(os.path.join("stack_image", f"{i}.png"), stack_image)

if __name__ == "__main__":
    start = time.time()

    for model in model_safetensors:
        txt2img(model)
    stack()
    
    end = time.time()
    print(f"処理時間: {end - start:.5f}秒")

SDXLでもやってみました

「hadukiMix」「fudukiMix」というモデルを公開してくれている「kotajiro001」さんが新たに「nagatsukiMix」というモデルを公開してくれています。

そちらの比較も行いました。

結果はGoogle Bloggerに載せています。
support-touchsp.blogspot.com
support-touchsp.blogspot.com

from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler
import torch
import os
import cv2
import numpy as np
import time


n_samples = 5
model_safetensors = {
    "hadukiMix": "hadukiMix_v15.safetensors",
    "fudukiMix": "fudukiMix_v15.safetensors",
    "nagatsukiMix": "nagatsukiMix_v10.safetensors"
}

def txt2img(model_name:str) -> None:
    model_fname = model_safetensors[model_name]
    pipe = StableDiffusionXLPipeline.from_single_file(
        f"safetensors/{model_fname}",
        load_safety_checker=False,
        extract_ema=True,
        torch_dtype=torch.float16 
    )

    pipe.scheduler = DPMSolverMultistepScheduler.from_config(
        pipe.scheduler.config,
        algorithm_type="sde-dpmsolver++",
        use_karras_sigmas=True
    )
    
    pipe.to("cuda")

    prompt = "portrait of young japanese girl, 25yo, 8k, detailed, standing on street, smiling, plain white t-shirt, eye level angle"

    os.makedirs(model_name, exist_ok=True)

    for i in range(n_samples):
        seed = 100000 * (i + 1)
        generator = torch.manual_seed(seed)
        image = pipe(
            prompt=prompt,
            generator=generator,
            num_inference_steps = 30,
            width=1152,
            height=896
        ).images[0]

        image.save(os.path.join(model_name, f"{i}.png"))

def stack() -> None:
    os.makedirs("stack_image", exist_ok=True)
    print(" -> ".join(model_safetensors.keys()))
    for i in range(n_samples):
        images_list = []
        for key in model_safetensors.keys():
            images_list.append(cv2.imread(os.path.join(key, f"{i}.png")))
        stack_image = np.hstack(images_list)
        cv2.imwrite(os.path.join("stack_image", f"{i}.png"), stack_image)

if __name__ == "__main__":
    start = time.time()
    
    for model in model_safetensors:
        txt2img(model)
    stack()
    
    end = time.time()
    print(f"処理時間: {end - start:.5f}秒")





このエントリーをはてなブックマークに追加