add back enable_attention_slicing for mps device

This commit is contained in:
Qing
2024-01-08 21:49:18 +08:00
parent 3b40671e33
commit 5da47ee035
7 changed files with 20 additions and 2 deletions

View File

@@ -96,6 +96,9 @@ class ControlNet(DiffusionInpaintModel):
**model_kwargs,
)
if torch.backends.mps.is_available():
self.model.enable_attention_slicing()
if kwargs.get("cpu_offload", False) and use_gpu:
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)