## 主要更新 - ✨ 更新所有依赖到最新稳定版本 - 📝 添加详细的项目文档和模型推荐 - 🔧 配置 VSCode Cloud Studio 预览功能 - 🐛 修复 PyTorch API 弃用警告 ## 依赖更新 - diffusers: 0.27.2 → 0.35.2 - gradio: 4.21.0 → 5.46.0 - peft: 0.7.1 → 0.18.0 - Pillow: 9.5.0 → 11.3.0 - fastapi: 0.108.0 → 0.116.2 ## 新增文件 - CLAUDE.md - 项目架构和开发指南 - UPGRADE_NOTES.md - 详细的升级说明 - .vscode/preview.yml - 预览配置 - .vscode/LAUNCH_GUIDE.md - 启动指南 - .gitignore - 更新的忽略规则 ## 代码修复 - 修复 iopaint/model/ldm.py 中的 torch.cuda.amp.autocast() 弃用警告 ## 文档更新 - README.md - 添加模型推荐和使用指南 - 完整的项目源码(iopaint/) - Web 前端源码(web_app/) 🤖 Generated with Claude Code
93 lines
2.9 KiB
Python
93 lines
2.9 KiB
Python
import torch
|
|
import numpy as np
|
|
|
|
|
|
class AbstractDistribution:
|
|
def sample(self):
|
|
raise NotImplementedError()
|
|
|
|
def mode(self):
|
|
raise NotImplementedError()
|
|
|
|
|
|
class DiracDistribution(AbstractDistribution):
|
|
def __init__(self, value):
|
|
self.value = value
|
|
|
|
def sample(self):
|
|
return self.value
|
|
|
|
def mode(self):
|
|
return self.value
|
|
|
|
|
|
class DiagonalGaussianDistribution(object):
|
|
def __init__(self, parameters, deterministic=False):
|
|
self.parameters = parameters
|
|
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
|
|
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
|
self.deterministic = deterministic
|
|
self.std = torch.exp(0.5 * self.logvar)
|
|
self.var = torch.exp(self.logvar)
|
|
if self.deterministic:
|
|
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
|
|
|
|
def sample(self):
|
|
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
|
|
return x
|
|
|
|
def kl(self, other=None):
|
|
if self.deterministic:
|
|
return torch.Tensor([0.])
|
|
else:
|
|
if other is None:
|
|
return 0.5 * torch.sum(torch.pow(self.mean, 2)
|
|
+ self.var - 1.0 - self.logvar,
|
|
dim=[1, 2, 3])
|
|
else:
|
|
return 0.5 * torch.sum(
|
|
torch.pow(self.mean - other.mean, 2) / other.var
|
|
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
|
|
dim=[1, 2, 3])
|
|
|
|
def nll(self, sample, dims=[1,2,3]):
|
|
if self.deterministic:
|
|
return torch.Tensor([0.])
|
|
logtwopi = np.log(2.0 * np.pi)
|
|
return 0.5 * torch.sum(
|
|
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
|
dim=dims)
|
|
|
|
def mode(self):
|
|
return self.mean
|
|
|
|
|
|
def normal_kl(mean1, logvar1, mean2, logvar2):
|
|
"""
|
|
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
|
|
Compute the KL divergence between two gaussians.
|
|
Shapes are automatically broadcasted, so batches can be compared to
|
|
scalars, among other use cases.
|
|
"""
|
|
tensor = None
|
|
for obj in (mean1, logvar1, mean2, logvar2):
|
|
if isinstance(obj, torch.Tensor):
|
|
tensor = obj
|
|
break
|
|
assert tensor is not None, "at least one argument must be a Tensor"
|
|
|
|
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
|
# Tensors, but it does not work for torch.exp().
|
|
logvar1, logvar2 = [
|
|
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
|
|
for x in (logvar1, logvar2)
|
|
]
|
|
|
|
return 0.5 * (
|
|
-1.0
|
|
+ logvar2
|
|
- logvar1
|
|
+ torch.exp(logvar1 - logvar2)
|
|
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
|
|
)
|