-
Notifications
You must be signed in to change notification settings - Fork 5
/
forward_noising.py
64 lines (51 loc) · 2.17 KB
/
forward_noising.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from dataloader import load_transformed_dataset, show_tensor_image
def linear_beta_schedule(timesteps, start=0.0001, end=0.02):
return torch.linspace(start, end, timesteps)
def get_index_from_list(vals, time_step, x_shape):
"""
Returns a specific index t of a passed list of values vals
while considering the batch dimension.
"""
batch_size = time_step.shape[0]
out = vals.gather(-1, time_step.cpu())
return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))).to(time_step.device)
def forward_diffusion_sample(x_0, time_step, device="cpu"):
"""
Takes an image and a timestep as input and
returns the noisy version of it
"""
noise = torch.randn_like(x_0)
sqrt_alphas_cumprod_t = get_index_from_list(sqrt_alphas_cumprod, time_step, x_0.shape)
sqrt_one_minus_alphas_cumprod_t = get_index_from_list(sqrt_one_minus_alphas_cumprod, time_step, x_0.shape)
# mean + variance
return sqrt_alphas_cumprod_t.to(device) * x_0.to(device) + sqrt_one_minus_alphas_cumprod_t.to(device) * noise.to(
device
), noise.to(device)
# Define beta schedule
T = 300
betas = linear_beta_schedule(timesteps=T)
# Pre-calculate different hyperparameters (alpha and beta) for closed form
alphas = 1.0 - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.0)
sqrt_recip_alphas = torch.sqrt(1.0 / alphas)
sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)
sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod)
posterior_variance = betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
if __name__ == "__main__":
# Simulate forward diffusion
dataloader = load_transformed_dataset()
image = next(iter(dataloader))[0]
plt.figure(figsize=(15, 15))
plt.axis("off")
num_images = 10
stepsize = int(T / num_images)
for idx in range(0, T, stepsize):
time_step = torch.Tensor([idx]).type(torch.int64)
plt.subplot(1, num_images + 1, int(idx / stepsize) + 1)
img, noise = forward_diffusion_sample(image, time_step)
show_tensor_image(img)
plt.show()