clean code: get_torch_dtype; mps use float16 by default
This commit is contained in:
@@ -9,7 +9,7 @@ from loguru import logger
|
||||
from iopaint.schema import InpaintRequest, ModelType
|
||||
|
||||
from .base import DiffusionInpaintModel
|
||||
from .utils import handle_from_pretrained_exceptions
|
||||
from .utils import handle_from_pretrained_exceptions, get_torch_dtype
|
||||
|
||||
|
||||
class SDXL(DiffusionInpaintModel):
|
||||
@@ -22,10 +22,7 @@ class SDXL(DiffusionInpaintModel):
|
||||
def init_model(self, device: torch.device, **kwargs):
|
||||
from diffusers.pipelines import StableDiffusionXLInpaintPipeline
|
||||
|
||||
fp16 = not kwargs.get("no_half", False)
|
||||
|
||||
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
|
||||
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
|
||||
use_gpu, torch_dtype = get_torch_dtype(device, kwargs.get("no_half", False))
|
||||
|
||||
if self.model_info.model_type == ModelType.DIFFUSERS_SDXL:
|
||||
num_in_channels = 4
|
||||
|
||||
Reference in New Issue
Block a user