sdxl support cpu_text_encoder
This commit is contained in:
@@ -8,6 +8,7 @@ class CPUTextEncoderWrapper(PreTrainedModel):
|
||||
def __init__(self, text_encoder, torch_dtype):
|
||||
super().__init__(text_encoder.config)
|
||||
self.config = text_encoder.config
|
||||
# cpu not support float16
|
||||
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
|
||||
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
|
||||
self.torch_dtype = torch_dtype
|
||||
@@ -16,11 +17,15 @@ class CPUTextEncoderWrapper(PreTrainedModel):
|
||||
|
||||
def __call__(self, x, **kwargs):
|
||||
input_device = x.device
|
||||
return [
|
||||
self.text_encoder(x.to(self.text_encoder.device), **kwargs)[0]
|
||||
.to(input_device)
|
||||
.to(self.torch_dtype)
|
||||
]
|
||||
original_output = self.text_encoder(x.to(self.text_encoder.device), **kwargs)
|
||||
for k, v in original_output.items():
|
||||
if isinstance(v, tuple):
|
||||
original_output[k] = [
|
||||
v[i].to(input_device).to(self.torch_dtype) for i in range(len(v))
|
||||
]
|
||||
else:
|
||||
original_output[k] = v.to(input_device).to(self.torch_dtype)
|
||||
return original_output
|
||||
|
||||
@property
|
||||
def dtype(self):
|
||||
|
||||
Reference in New Issue
Block a user