-
Notifications
You must be signed in to change notification settings - Fork 104
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #65 from atong01/cifar_10_FID_5
Provide a new Cifar10 example, FID computation and trained model weights
- Loading branch information
Showing
12 changed files
with
811 additions
and
521 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -171,3 +171,5 @@ slurm*.out | |
*.jpg | ||
|
||
notebooks/figures/ | ||
|
||
.DS_Store |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,105 @@ | ||
# Inspired from https://github.com/w86763777/pytorch-ddpm/tree/master. | ||
|
||
# Authors: Kilian Fatras | ||
# Alexander Tong | ||
|
||
import os | ||
import sys | ||
|
||
import matplotlib.pyplot as plt | ||
import torch | ||
from absl import app, flags | ||
from cleanfid import fid | ||
from torchdiffeq import odeint | ||
from torchdyn.core import NeuralODE | ||
|
||
from torchcfm.models.unet.unet import UNetModelWrapper | ||
|
||
FLAGS = flags.FLAGS | ||
# UNet | ||
flags.DEFINE_integer("num_channel", 128, help="base channel of UNet") | ||
|
||
# Training | ||
flags.DEFINE_bool("parallel", False, help="multi gpu training") | ||
flags.DEFINE_string("input_dir", "./results", help="output_directory") | ||
flags.DEFINE_string("model", "otcfm", help="flow matching model type") | ||
flags.DEFINE_integer("integration_steps", 100, help="number of inference steps") | ||
flags.DEFINE_string("integration_method", "dopri5", help="integration method to use") | ||
flags.DEFINE_integer("step", 400000, help="training steps") | ||
flags.DEFINE_integer("num_gen", 50000, help="number of samples to generate") | ||
flags.DEFINE_float("tol", 1e-5, help="Integrator tolerance (absolute and relative)") | ||
FLAGS(sys.argv) | ||
|
||
|
||
# Define the model | ||
use_cuda = torch.cuda.is_available() | ||
device = torch.device("cuda:0" if use_cuda else "cpu") | ||
|
||
new_net = UNetModelWrapper( | ||
dim=(3, 32, 32), | ||
num_res_blocks=2, | ||
num_channels=FLAGS.num_channel, | ||
channel_mult=[1, 2, 2, 2], | ||
num_heads=4, | ||
num_head_channels=64, | ||
attention_resolutions="16", | ||
dropout=0.1, | ||
).to(device) | ||
|
||
|
||
# Load the model | ||
PATH = f"{FLAGS.input_dir}/{FLAGS.model}/cifar10_weights_step_{FLAGS.step}.pt" | ||
print("path: ", PATH) | ||
checkpoint = torch.load(PATH) | ||
state_dict = checkpoint["ema_model"] | ||
try: | ||
new_net.load_state_dict(state_dict) | ||
except RuntimeError: | ||
from collections import OrderedDict | ||
|
||
new_state_dict = OrderedDict() | ||
for k, v in state_dict.items(): | ||
new_state_dict[k[7:]] = v | ||
new_net.load_state_dict(new_state_dict) | ||
new_net.eval() | ||
|
||
|
||
# Define the integration method if euler is used | ||
if FLAGS.integration_method == "euler": | ||
node = NeuralODE(new_net, solver=FLAGS.integration_method) | ||
|
||
|
||
def gen_1_img(unused_latent): | ||
with torch.no_grad(): | ||
x = torch.randn(500, 3, 32, 32).to(device) | ||
if FLAGS.integration_method == "euler": | ||
print("Use method: ", FLAGS.integration_method) | ||
t_span = torch.linspace(0, 1, FLAGS.integration_steps + 1).to(device) | ||
traj = node.trajectory(x, t_span=t_span) | ||
else: | ||
print("Use method: ", FLAGS.integration_method) | ||
t_span = torch.linspace(0, 1, 2).to(device) | ||
traj = odeint( | ||
new_net, x, t_span, rtol=FLAGS.tol, atol=FLAGS.tol, method=FLAGS.integration_method | ||
) | ||
traj = traj[-1, :] # .view([-1, 3, 32, 32]).clip(-1, 1) | ||
img = (traj * 127.5 + 128).clip(0, 255).to(torch.uint8) # .permute(1, 2, 0) | ||
return img | ||
|
||
|
||
print("Start computing FID") | ||
score = fid.compute_fid( | ||
gen=gen_1_img, | ||
dataset_name="cifar10", | ||
batch_size=500, | ||
dataset_res=32, | ||
num_gen=FLAGS.num_gen, | ||
dataset_split="train", | ||
mode="legacy_tensorflow", | ||
) | ||
print() | ||
print("FID has been computed") | ||
# print() | ||
# print("Total NFE: ", new_net.nfe) | ||
print() | ||
print("FID: ", score) |
Oops, something went wrong.