add enable_low_mem
This commit is contained in:
@@ -9,7 +9,7 @@ from loguru import logger
|
||||
from iopaint.schema import InpaintRequest, ModelType
|
||||
|
||||
from .base import DiffusionInpaintModel
|
||||
from .utils import handle_from_pretrained_exceptions, get_torch_dtype
|
||||
from .utils import handle_from_pretrained_exceptions, get_torch_dtype, enable_low_mem
|
||||
|
||||
|
||||
class SDXL(DiffusionInpaintModel):
|
||||
@@ -47,10 +47,7 @@ class SDXL(DiffusionInpaintModel):
|
||||
variant="fp16",
|
||||
)
|
||||
|
||||
if torch.backends.mps.is_available():
|
||||
# MPS: Recommended RAM < 64 GB https://huggingface.co/docs/diffusers/optimization/mps
|
||||
# CUDA: Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch 2.0 or xFormers. https://huggingface.co/docs/diffusers/v0.25.0/en/api/pipelines/stable_diffusion/image_variation#diffusers.StableDiffusionImageVariationPipeline.enable_attention_slicing
|
||||
self.model.enable_attention_slicing()
|
||||
enable_low_mem(self.model, kwargs.get("low_mem", False))
|
||||
|
||||
if kwargs.get("cpu_offload", False) and use_gpu:
|
||||
logger.info("Enable sequential cpu offload")
|
||||
|
||||
Reference in New Issue
Block a user