【Magic Clothing】768のサイズで学習されたウエイトが公開されたのでそれを使って顔写真と服の画像からバーチャル試着

はじめに

以前もMagic Clothingの記事を書きました。
touch-sp.hatenablog.com
今回は新しいウエイトが公開されたので使用してみました。

目的

命題:『この顔の人物にこの服を着させて下さい』



用意したのはこの2枚の画像のみです。

結果




以前より画質が良くなっているのが明らかです。

顔の再現にはIP-Adapter FaceIDPlusV2を使いました。

PC環境

Windowsで問題なく動作しました。

Windows 11
CUDA 11.8
Python 3.11

Python環境構築

pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118
pip install numpy==1.25.1 diffusers==0.25.1 opencv-python==4.8.0.76 transformers==4.31.0 gradio==4.16.0 safetensors==0.3.1 controlnet-aux==0.0.6 accelerate==0.21.0
pip install onnxruntime-gpu==1.17.1 insightface==0.7.3

使用したPythonスクリプト

import torch
from diffusers import UniPCMultistepScheduler, AutoencoderKL
from diffusers.pipelines import StableDiffusionPipeline
import gradio as gr
import argparse

parser = argparse.ArgumentParser(description='oms diffusion')
parser.add_argument('--model_path', type=str, default="oms_diffusion_768.safetensors")
parser.add_argument('--pipe_path', type=str, default="model/Realistic_Vision_V4.0_noVAE")
parser.add_argument('--faceid_version', type=str, default="FaceIDPlusV2", choices=['FaceID', 'FaceIDPlus', 'FaceIDPlusV2'])

args = parser.parse_args()

device = "cuda"

vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(args.pipe_path, vae=vae, torch_dtype=torch.float16)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)

if args.faceid_version == "FaceID":
    ip_lora = "./checkpoints/ipadapter_faceid/ip-adapter-faceid_sd15_lora.safetensors"
    ip_ckpt = "./checkpoints/ipadapter_faceid/ip-adapter-faceid_sd15.bin"
    pipe.load_lora_weights(ip_lora)
    pipe.fuse_lora()
    from garment_adapter.garment_ipadapter_faceid import IPAdapterFaceID

    ip_model = IPAdapterFaceID(pipe, args.model_path, ip_ckpt, device)
else:
    if args.faceid_version == "FaceIDPlus":
        ip_ckpt = "./checkpoints/ipadapter_faceid/ip-adapter-faceid-plus_sd15.bin"
        ip_lora = "./checkpoints/ipadapter_faceid/ip-adapter-faceid-plus_sd15_lora.safetensors"
        v2 = False
    else:
        ip_ckpt = "./checkpoints/ipadapter_faceid/ip-adapter-faceid-plusv2_sd15.bin"
        ip_lora = "./checkpoints/ipadapter_faceid/ip-adapter-faceid-plusv2_sd15_lora.safetensors"
        v2 = True

    pipe.load_lora_weights(ip_lora)
    pipe.fuse_lora()
    image_encoder_path = "clip/CLIP-ViT-H-14-laion2B-s32B-b79K"
    from garment_adapter.garment_ipadapter_faceid import IPAdapterFaceIDPlus as IPAdapterFaceID

    ip_model = IPAdapterFaceID(pipe, args.model_path, image_encoder_path, ip_ckpt, device)


def process(cloth_image, face_img, cloth_mask_image, prompt, a_prompt, n_prompt, num_samples, width, height, sample_steps, scale, seed):
    if args.faceid_version == "FaceID":
        result = ip_model.generate(cloth_image, face_img, cloth_mask_image, prompt, a_prompt, n_prompt, num_samples, seed, scale, sample_steps, height, width)
    else:
        result = ip_model.generate(cloth_image, face_img, cloth_mask_image, prompt, a_prompt, n_prompt, num_samples, seed, scale, sample_steps, height, width, shortcut=v2)
    if result is None:
        raise gr.Error("Face detection failed.")
    else:
        images, cloth_mask_image = result
    return images, cloth_mask_image


block = gr.Blocks().queue()
with block:
    with gr.Row():
        gr.Markdown("##You can enlarge image resolution to get better face, but the cloth maybe lose control, we will release high-resolution checkpoint soon##")
    with gr.Row():
        with gr.Column():
            face_img = gr.Image(label="face Image", type="pil")
            cloth_image = gr.Image(label="cloth Image", type="pil")
            cloth_mask_image = gr.Image(label="cloth mask Image, if not support, will be produced by inner segment algorithm", type="pil")
            prompt = gr.Textbox(label="Prompt", value='a photography of a model')
            run_button = gr.Button(value="Run")
            with gr.Accordion("Advanced options", open=False):
                num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=4, step=1)
                height = gr.Slider(label="Height", minimum=256, maximum=768, value=640, step=64)
                width = gr.Slider(label="Width", minimum=192, maximum=576, value=384, step=64)
                sample_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
                scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=10., value=2.5, step=0.1)
                seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=1234)
                a_prompt = gr.Textbox(label="Added Prompt", value='best quality, high quality')
                n_prompt = gr.Textbox(label="Negative Prompt", value='bare, monochrome, lowres, bad anatomy, worst quality, low quality')

        with gr.Column():
            result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery")
            cloth_seg_image = gr.Image(label="cloth mask", type="pil", width=192, height=256)

    ips = [cloth_image, face_img, cloth_mask_image, prompt, a_prompt, n_prompt, num_samples, width, height, sample_steps, scale, seed]
    run_button.click(fn=process, inputs=ips, outputs=[result_gallery, cloth_seg_image])

block.launch()





このエントリーをはてなブックマークに追加