add back enable_attention_slicing for mps device

This commit is contained in:
Qing
2024-01-08 21:49:18 +08:00
parent 3b40671e33
commit 5da47ee035
7 changed files with 20 additions and 2 deletions

View File

@@ -34,6 +34,8 @@ class InstructPix2Pix(DiffusionInpaintModel):
self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
self.name, variant="fp16", torch_dtype=torch_dtype, **model_kwargs
)
if torch.backends.mps.is_available():
self.model.enable_attention_slicing()
if kwargs.get("cpu_offload", False) and use_gpu:
logger.info("Enable sequential cpu offload")