-
Notifications
You must be signed in to change notification settings - Fork 29
/
fit_scaling.py
187 lines (162 loc) · 4.89 KB
/
fit_scaling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
os.environ["AUTOGRAPH_VERBOSITY"] = "1"
import logging
# Set up logger
logger = logging.getLogger()
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter(
fmt="%(asctime)s (%(levelname)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel("INFO")
import torch
from gemnet.model.gemnet import GemNet
from gemnet.training.trainer import Trainer
from gemnet.training.metrics import Metrics
from gemnet.training.data_container import DataContainer
from gemnet.training.data_provider import DataProvider
import yaml
import ast
from tqdm import trange
from gemnet.model.utils import write_json
from gemnet.model.layers.scaling import AutomaticFit
def run(
nBatches,
num_spherical,
num_radial,
num_blocks,
emb_size_atom,
emb_size_edge,
emb_size_trip,
emb_size_quad,
emb_size_rbf,
emb_size_cbf,
emb_size_sbf,
num_before_skip,
num_after_skip,
num_concat,
num_atom,
emb_size_bil_quad,
emb_size_bil_trip,
triplets_only,
forces_coupled,
direct_forces,
mve,
cutoff,
int_cutoff,
envelope_exponent,
extensive,
output_init,
scale_file,
data_seed,
val_dataset,
tfseed,
batch_size,
comment,
overwrite_mode=1,
**kwargs,
):
"""
Run this function to automatically fit all scaling factors in the network.
"""
torch.manual_seed(tfseed)
def init(scale_file):
# initialize file
# same for all models
preset = {"comment": comment}
write_json(scale_file, preset)
if os.path.exists(scale_file):
print(f"Already found existing file: {scale_file}")
if str(overwrite_mode) == "1":
print("Selected: Overwrite the current file.")
init(scale_file)
elif str(overwrite_mode) == "2":
print("Selected: Only fit unfitted variables.")
else:
print("Selected: Exit script")
return
else:
init(scale_file)
AutomaticFit.set2fitmode()
logging.info("Initialize model")
model = GemNet(
num_spherical=num_spherical,
num_radial=num_radial,
num_blocks=num_blocks,
emb_size_atom=emb_size_atom,
emb_size_edge=emb_size_edge,
emb_size_trip=emb_size_trip,
emb_size_quad=emb_size_quad,
emb_size_rbf=emb_size_rbf,
emb_size_cbf=emb_size_cbf,
emb_size_sbf=emb_size_sbf,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_concat=num_concat,
num_atom=num_atom,
emb_size_bil_quad=emb_size_bil_quad,
emb_size_bil_trip=emb_size_bil_trip,
num_targets=2 if mve else 1,
cutoff=cutoff,
int_cutoff=int_cutoff,
envelope_exponent=envelope_exponent,
forces_coupled=forces_coupled,
direct_forces=True, # evaluates faster
triplets_only=triplets_only,
activation="swish",
extensive=extensive,
output_init=output_init,
scale_file=scale_file,
)
logging.info("Load dataset")
# Initialize validation datasets
val_data_container = DataContainer(
val_dataset, cutoff=cutoff, int_cutoff=int_cutoff, triplets_only=triplets_only
)
val_data_provider = DataProvider(
val_data_container,
0,
nBatches * batch_size,
batch_size,
seed=data_seed,
shuffle=True,
random_split=True,
)
# Initialize datasets
dataset_iter = val_data_provider.get_dataset("val")
logging.info("Prepare training")
# Initialize trainer
trainer = Trainer(model, mve=mve)
metrics = Metrics("train", trainer.tracked_metrics, None)
# Training loop
logging.info("Start training")
while not AutomaticFit.fitting_completed():
for step in trange(0, nBatches, desc="Training..."):
trainer.test_on_batch(dataset_iter, metrics)
current_var = AutomaticFit.activeVar
if current_var is not None:
current_var.fit() # fit current variable
else:
print("Found no variable to fit. Something went wrong!")
logging.info(f"\n Fitting done. Results saved to: {scale_file}")
if __name__ == "__main__":
config_path = "config.yaml"
with open('config.yaml', 'r') as c:
config = yaml.safe_load(c)
# For strings that yaml doesn't parse (e.g. None)
for key, val in config.items():
if type(val) is str:
try:
config[key] = ast.literal_eval(val)
except (ValueError, SyntaxError):
pass
nBatches = 25 ## number of batches to use to fit a single variable
config["scale_file"] = "scaling_factors.json"
config["batch_size"] = 32
config["direct_forces"] = True
config["triplets_only"] = False
run(nBatches, **config)