This commit is contained in:
Qing
2023-12-19 13:16:30 +08:00
parent f27fc51e34
commit 141936a937
18 changed files with 479 additions and 358 deletions

View File

@@ -7,7 +7,7 @@ import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.schema import HDStrategy, SDSampler, FREEUConfig
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
@@ -16,178 +16,127 @@ save_dir.mkdir(exist_ok=True, parents=True)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize("cpu_textencoder", [True, False])
@pytest.mark.parametrize("disable_nsfw", [True, False])
def test_runway_sd_1_5_ddim(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw
):
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback,
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize(
"sampler", [SDSampler.pndm, SDSampler.k_lms, SDSampler.k_euler, SDSampler.k_euler_a]
"sampler",
[
SDSampler.ddim,
SDSampler.pndm,
SDSampler.k_lms,
SDSampler.k_euler,
SDSampler.k_euler_a,
SDSampler.lcm,
],
)
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [True])
def test_runway_sd_1_5(sd_device, strategy, sampler, cpu_textencoder, disable_nsfw):
def callback(i, t, latents):
print(f"sd_step_{i}")
def test_runway_sd_1_5_all_samplers(
sd_device,
sampler,
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
sd_steps = 30
model = ModelManager(
name="sd1.5",
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback,
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
HDStrategy.ORIGINAL, prompt="a fox sitting on a bench", sd_steps=sd_steps
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
f"runway_sd_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize("sd_prevent_unmasked_area", [False, True])
def test_runway_sd_1_5_negative_prompt(
sd_device, strategy, sampler, sd_prevent_unmasked_area
):
def callback(i, t, latents):
pass
@pytest.mark.parametrize("sampler", [SDSampler.lcm])
def test_runway_sd_lcm_lora(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 20
sd_steps = 5
model = ModelManager(
name="sd1.5",
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
strategy,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
prompt="Face of a fox, high resolution, sitting on a park bench",
negative_prompt="orange, yellow, small",
sd_sampler=sampler,
sd_match_histograms=True,
sd_prevent_unmasked_area=sd_prevent_unmasked_area,
)
name = f"{sampler}_negative_prompt"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}_prevent_unmasked_area_{sd_prevent_unmasked_area}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [False])
def test_runway_sd_1_5_sd_scale(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 20
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_scale=0.85
sd_guidance_scale=2,
sd_lcm_lora=True,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}_sdscale.png",
f"runway_sd_1_5_lcm_lora.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_freeu(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
model = ModelManager(
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
hf_access_token="",
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(
strategy,
prompt="face of a fox, sitting on a bench",
sd_steps=sd_steps,
sd_guidance_scale=7.5,
sd_freeu=True,
sd_freeu_config=FREEUConfig(),
)
cfg.sd_sampler = sampler
assert_equal(
model,
cfg,
f"runway_sd_1_5_freeu.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_sd_strength(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 20
sd_steps = 30
model = ModelManager(
name="sd1.5",
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
)
@@ -205,6 +154,33 @@ def test_runway_sd_sd_strength(sd_device, strategy, sampler):
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_norm_sd_model(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 30
model = ModelManager(
name="runwayml/stable-diffusion-v1-5",
device=torch.device(sd_device),
hf_access_token="",
disable_nsfw=True,
sd_cpu_textencoder=False,
)
cfg = get_config(strategy, prompt="face of a fox, sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
assert_equal(
model,
cfg,
f"runway_{sd_device}_norm_sd_model.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
@@ -212,19 +188,16 @@ def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 20
sd_steps = 30
model = ModelManager(
name="sd1.5",
name="runwayml/stable-diffusion-inpainting",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_scale=0.85
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
@@ -239,28 +212,27 @@ def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize(
"local_model_path",
"name",
[
"/Users/cwq/data/models/sd-v1-5-inpainting.ckpt",
"/Users/cwq/data/models/sd-v1-5-inpainting.safetensors",
"sd-v1-5-inpainting.ckpt",
"sd-v1-5-inpainting.safetensors",
"v1-5-pruned-emaonly.safetensors",
],
)
def test_local_file_path(sd_device, sampler, local_model_path):
def test_local_file_path(sd_device, sampler, name):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
sd_steps = 30
model = ModelManager(
name="sd1.5",
name=name,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path=local_model_path,
cpu_offload=False,
)
cfg = get_config(
HDStrategy.ORIGINAL,
@@ -269,7 +241,7 @@ def test_local_file_path(sd_device, sampler, local_model_path):
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_{Path(local_model_path).stem}"
name = f"device_{sd_device}_{sampler}_{name}"
assert_equal(
model,