add back enable_attention_slicing for mps device

This commit is contained in:
Qing
2024-01-08 21:49:18 +08:00
parent 3b40671e33
commit 5da47ee035
7 changed files with 20 additions and 2 deletions

View File

@@ -32,6 +32,9 @@ class PaintByExample(DiffusionInpaintModel):
self.name, torch_dtype=torch_dtype, **model_kwargs
)
if torch.backends.mps.is_available():
self.model.enable_attention_slicing()
# TODO: gpu_id
if kwargs.get("cpu_offload", False) and use_gpu:
self.model.image_encoder = self.model.image_encoder.to(device)