sdxl support cpu_text_encoder

This commit is contained in:
Qing
2024-01-10 13:34:11 +08:00
parent 05a15b2e1f
commit 38b6edacf0
5 changed files with 80 additions and 9 deletions

View File

@@ -8,6 +8,7 @@ class CPUTextEncoderWrapper(PreTrainedModel):
def __init__(self, text_encoder, torch_dtype):
super().__init__(text_encoder.config)
self.config = text_encoder.config
# cpu not support float16
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
self.torch_dtype = torch_dtype
@@ -16,11 +17,15 @@ class CPUTextEncoderWrapper(PreTrainedModel):
def __call__(self, x, **kwargs):
input_device = x.device
return [
self.text_encoder(x.to(self.text_encoder.device), **kwargs)[0]
.to(input_device)
.to(self.torch_dtype)
]
original_output = self.text_encoder(x.to(self.text_encoder.device), **kwargs)
for k, v in original_output.items():
if isinstance(v, tuple):
original_output[k] = [
v[i].to(input_device).to(self.torch_dtype) for i in range(len(v))
]
else:
original_output[k] = v.to(input_device).to(self.torch_dtype)
return original_output
@property
def dtype(self):

View File

@@ -9,6 +9,7 @@ from loguru import logger
from iopaint.schema import InpaintRequest, ModelType
from .base import DiffusionInpaintModel
from .helper.cpu_text_encoder import CPUTextEncoderWrapper
from .utils import handle_from_pretrained_exceptions, get_torch_dtype, enable_low_mem
@@ -37,11 +38,11 @@ class SDXL(DiffusionInpaintModel):
)
else:
model_kwargs = {**kwargs.get("pipe_components", {})}
if 'vae' not in model_kwargs:
if "vae" not in model_kwargs:
vae = AutoencoderKL.from_pretrained(
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype
)
model_kwargs['vae'] = vae
model_kwargs["vae"] = vae
self.model = handle_from_pretrained_exceptions(
StableDiffusionXLInpaintPipeline.from_pretrained,
pretrained_model_name_or_path=self.model_id_or_path,
@@ -58,7 +59,13 @@ class SDXL(DiffusionInpaintModel):
else:
self.model = self.model.to(device)
if kwargs["sd_cpu_textencoder"]:
logger.warning("Stable Diffusion XL not support run TextEncoder on CPU")
logger.info("Run Stable Diffusion TextEncoder on CPU")
self.model.text_encoder = CPUTextEncoderWrapper(
self.model.text_encoder, torch_dtype
)
self.model.text_encoder_2 = CPUTextEncoderWrapper(
self.model.text_encoder_2, torch_dtype
)
self.callback = kwargs.pop("callback", None)