-
Notifications
You must be signed in to change notification settings - Fork 39
/
train.py
447 lines (379 loc) · 16.5 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
import os
import gc
import random
import pprint
from six.moves import range
from markdown2 import markdown
from time import gmtime, strftime
from timeit import default_timer as timer
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import options
from dataloader import VisDialDataset
from torch.utils.data import DataLoader
from eval_utils.rank_answerer import rankABot
from eval_utils.rank_questioner import rankQBot
from utils import utilities as utils
from utils.visualize import VisdomVisualize
#---------------------------------------------------------------------------------------
# Setup
#---------------------------------------------------------------------------------------
# Read the command line options
params = options.readCommandLine()
# Seed rng for reproducibility
random.seed(params['randomSeed'])
torch.manual_seed(params['randomSeed'])
if params['useGPU']:
torch.cuda.manual_seed_all(params['randomSeed'])
# Setup dataloader
splits = ['train', 'val', 'test']
dataset = VisDialDataset(params, splits)
# Params to transfer from dataset
transfer = ['vocabSize', 'numOptions', 'numRounds']
for key in transfer:
if hasattr(dataset, key):
params[key] = getattr(dataset, key)
# Create save path and checkpoints folder
os.makedirs('checkpoints', exist_ok=True)
os.mkdir(params['savePath'])
# Loading Modules
parameters = []
aBot = None
qBot = None
# Loading A-Bot
if params['trainMode'] in ['sl-abot', 'rl-full-QAf']:
aBot, loadedParams, optim_state = utils.loadModel(params, 'abot')
for key in loadedParams:
params[key] = loadedParams[key]
parameters.extend(aBot.parameters())
# Loading Q-Bot
if params['trainMode'] in ['sl-qbot', 'rl-full-QAf']:
qBot, loadedParams, optim_state = utils.loadModel(params, 'qbot')
for key in loadedParams:
params[key] = loadedParams[key]
if params['trainMode'] == 'rl-full-QAf' and params['freezeQFeatNet']:
qBot.freezeFeatNet()
# Filtering parameters which require a gradient update
parameters.extend(filter(lambda p: p.requires_grad, qBot.parameters()))
# parameters.extend(qBot.parameters())
# Setup pytorch dataloader
dataset.split = 'train'
dataloader = DataLoader(
dataset,
batch_size=params['batchSize'],
shuffle=False,
num_workers=params['numWorkers'],
drop_last=True,
collate_fn=dataset.collate_fn,
pin_memory=False)
# Initializing visdom environment for plotting data
viz = VisdomVisualize(
enable=bool(params['enableVisdom']),
env_name=params['visdomEnv'],
server=params['visdomServer'],
port=params['visdomServerPort'])
pprint.pprint(params)
viz.addText(pprint.pformat(params, indent=4))
# Setup optimizer
if params['continue']:
# Continuing from a loaded checkpoint restores the following
startIterID = params['ckpt_iterid'] + 1 # Iteration ID
lRate = params['ckpt_lRate'] # Learning rate
print("Continuing training from iterId[%d]" % startIterID)
else:
# Beginning training normally, without any checkpoint
lRate = params['learningRate']
startIterID = 0
optimizer = optim.Adam(parameters, lr=lRate)
if params['continue']: # Restoring optimizer state
print("Restoring optimizer state dict from checkpoint")
optimizer.load_state_dict(optim_state)
runningLoss = None
mse_criterion = nn.MSELoss(reduce=False)
numIterPerEpoch = dataset.numDataPoints['train'] // params['batchSize']
print('\n%d iter per epoch.' % numIterPerEpoch)
if params['useCurriculum']:
if params['continue']:
rlRound = max(0, 9 - (startIterID // numIterPerEpoch))
else:
rlRound = params['numRounds'] - 1
else:
rlRound = 0
#---------------------------------------------------------------------------------------
# Training
#---------------------------------------------------------------------------------------
def batch_iter(dataloader):
for epochId in range(params['numEpochs']):
for idx, batch in enumerate(dataloader):
yield epochId, idx, batch
start_t = timer()
for epochId, idx, batch in batch_iter(dataloader):
# Keeping track of iterId and epoch
iterId = startIterID + idx + (epochId * numIterPerEpoch)
epoch = iterId // numIterPerEpoch
gc.collect()
# Moving current batch to GPU, if available
if dataset.useGPU:
batch = {key: v.cuda() if hasattr(v, 'cuda') \
else v for key, v in batch.items()}
image = Variable(batch['img_feat'], requires_grad=False)
caption = Variable(batch['cap'], requires_grad=False)
captionLens = Variable(batch['cap_len'], requires_grad=False)
gtQuestions = Variable(batch['ques'], requires_grad=False)
gtQuesLens = Variable(batch['ques_len'], requires_grad=False)
gtAnswers = Variable(batch['ans'], requires_grad=False)
gtAnsLens = Variable(batch['ans_len'], requires_grad=False)
options = Variable(batch['opt'], requires_grad=False)
optionLens = Variable(batch['opt_len'], requires_grad=False)
gtAnsId = Variable(batch['ans_id'], requires_grad=False)
# Initializing optimizer and losses
optimizer.zero_grad()
loss = 0
qBotLoss = 0
aBotLoss = 0
rlLoss = 0
featLoss = 0
qBotRLLoss = 0
aBotRLLoss = 0
predFeatures = None
initialGuess = None
numRounds = params['numRounds']
# numRounds = 1 # Override for debugging lesser rounds of dialog
# Setting training modes for both bots and observing captions, images where needed
if aBot:
aBot.train(), aBot.reset()
aBot.observe(-1, image=image, caption=caption, captionLens=captionLens)
if qBot:
qBot.train(), qBot.reset()
qBot.observe(-1, caption=caption, captionLens=captionLens)
# Q-Bot image feature regression ('guessing') only occurs if Q-Bot is present
if params['trainMode'] in ['sl-qbot', 'rl-full-QAf']:
initialGuess = qBot.predictImage()
prevFeatDist = mse_criterion(initialGuess, image)
featLoss += torch.mean(prevFeatDist)
prevFeatDist = torch.mean(prevFeatDist,1)
# Iterating over dialog rounds
for round in range(numRounds):
'''
Loop over rounds of dialog. Currently three modes of training are
supported:
sl-abot :
Supervised pre-training of A-Bot model using cross
entropy loss with ground truth answers
sl-qbot :
Supervised pre-training of Q-Bot model using cross
entropy loss with ground truth questions for the
dialog model and mean squared error loss for image
feature regression (i.e. image prediction)
rl-full-QAf :
RL-finetuning of A-Bot and Q-Bot in a cooperative
setting where the common reward is the difference
in mean squared error between the current and
previous round of Q-Bot's image prediction.
Annealing: In order to ease in the RL objective,
fine-tuning starts with first N-1 rounds of SL
objective and last round of RL objective - the
number of RL rounds are increased by 1 after
every epoch until only RL objective is used for
all rounds of dialog.
'''
# Tracking components which require a forward pass
# A-Bot dialog model
forwardABot = (params['trainMode'] == 'sl-abot'
or (params['trainMode'] == 'rl-full-QAf'
and round < rlRound))
# Q-Bot dialog model
forwardQBot = (params['trainMode'] == 'sl-qbot'
or (params['trainMode'] == 'rl-full-QAf'
and round < rlRound))
# Q-Bot feature regression network
forwardFeatNet = (forwardQBot or params['trainMode'] == 'rl-full-QAf')
# Answerer Forward Pass
if forwardABot:
# Observe Ground Truth (GT) question
aBot.observe(
round,
ques=gtQuestions[:, round],
quesLens=gtQuesLens[:, round])
# Observe GT answer for teacher forcing
aBot.observe(
round,
ans=gtAnswers[:, round],
ansLens=gtAnsLens[:, round])
ansLogProbs = aBot.forward()
# Cross Entropy (CE) Loss for Ground Truth Answers
aBotLoss += utils.maskedNll(ansLogProbs,
gtAnswers[:, round].contiguous())
# Questioner Forward Pass (dialog model)
if forwardQBot:
# Observe GT question for teacher forcing
qBot.observe(
round,
ques=gtQuestions[:, round],
quesLens=gtQuesLens[:, round])
quesLogProbs = qBot.forward()
# Cross Entropy (CE) Loss for Ground Truth Questions
qBotLoss += utils.maskedNll(quesLogProbs,
gtQuestions[:, round].contiguous())
# Observe GT answer for updating dialog history
qBot.observe(
round,
ans=gtAnswers[:, round],
ansLens=gtAnsLens[:, round])
# In order to stay true to the original implementation, the feature
# regression network makes predictions before dialog begins and for
# the first 9 rounds of dialog. This can be set to 10 if needed.
MAX_FEAT_ROUNDS = 9
# Questioner feature regression network forward pass
if forwardFeatNet and round < MAX_FEAT_ROUNDS:
# Make an image prediction after each round
predFeatures = qBot.predictImage()
featDist = mse_criterion(predFeatures, image)
featDist = torch.mean(featDist)
featLoss += featDist
# A-Bot and Q-Bot interacting in RL rounds
if params['trainMode'] == 'rl-full-QAf' and round >= rlRound:
# Run one round of conversation
questions, quesLens = qBot.forwardDecode(inference='sample')
qBot.observe(round, ques=questions, quesLens=quesLens)
aBot.observe(round, ques=questions, quesLens=quesLens)
answers, ansLens = aBot.forwardDecode(inference='sample')
aBot.observe(round, ans=answers, ansLens=ansLens)
qBot.observe(round, ans=answers, ansLens=ansLens)
# Q-Bot makes a guess at the end of each round
predFeatures = qBot.predictImage()
# Computing reward based on Q-Bot's predicted image
featDist = mse_criterion(predFeatures, image)
featDist = torch.mean(featDist,1)
reward = prevFeatDist.detach() - featDist
prevFeatDist = featDist
qBotRLLoss = qBot.reinforce(reward)
if params['rlAbotReward']:
aBotRLLoss = aBot.reinforce(reward)
rlLoss += torch.mean(aBotRLLoss)
rlLoss += torch.mean(qBotRLLoss)
# Loss coefficients
rlCoeff = 1
rlLoss = rlLoss * rlCoeff
featLoss = featLoss * params['featLossCoeff']
# Averaging over rounds
qBotLoss = (params['CELossCoeff'] * qBotLoss) / numRounds
aBotLoss = (params['CELossCoeff'] * aBotLoss) / numRounds
featLoss = featLoss / numRounds #/ (numRounds+1)
rlLoss = rlLoss / numRounds
# Total loss
loss = qBotLoss + aBotLoss + rlLoss + featLoss
loss.backward()
optimizer.step()
# Tracking a running average of loss
if runningLoss is None:
runningLoss = loss.data[0]
else:
runningLoss = 0.95 * runningLoss + 0.05 * loss.data[0]
# Decay learning rate
if lRate > params['minLRate']:
for gId, group in enumerate(optimizer.param_groups):
optimizer.param_groups[gId]['lr'] *= params['lrDecayRate']
lRate *= params['lrDecayRate']
if iterId % 10 == 0: # Plot learning rate till saturation
viz.linePlot(iterId, lRate, 'learning rate', 'learning rate')
# RL Annealing: Every epoch after the first, decrease rlRound
if iterId % numIterPerEpoch == 0 and iterId > 0:
if params['trainMode'] == 'rl-full-QAf':
rlRound = max(0, rlRound - 1)
print('Using rl starting at round {}'.format(rlRound))
# Print every now and then
if iterId % 10 == 0:
end_t = timer() # Keeping track of iteration(s) time
curEpoch = float(iterId) / numIterPerEpoch
timeStamp = strftime('%a %d %b %y %X', gmtime())
printFormat = '[%s][Ep: %.2f][Iter: %d][Time: %5.2fs][Loss: %.3g]'
printFormat += '[lr: %.3g]'
printInfo = [
timeStamp, curEpoch, iterId, end_t - start_t, loss.data[0], lRate
]
start_t = end_t
print(printFormat % tuple(printInfo))
# Update line plots
if isinstance(aBotLoss, Variable):
viz.linePlot(iterId, aBotLoss.data[0], 'aBotLoss', 'train CE')
if isinstance(qBotLoss, Variable):
viz.linePlot(iterId, qBotLoss.data[0], 'qBotLoss', 'train CE')
if isinstance(rlLoss, Variable):
viz.linePlot(iterId, rlLoss.data[0], 'rlLoss', 'train')
if isinstance(featLoss, Variable):
viz.linePlot(iterId, featLoss.data[0], 'featLoss',
'train FeatureRegressionLoss')
viz.linePlot(iterId, loss.data[0], 'loss', 'train loss')
viz.linePlot(iterId, runningLoss, 'loss', 'running train loss')
# Evaluate every epoch
if iterId % (numIterPerEpoch // 1) == 0:
# Keeping track of epochID
curEpoch = float(iterId) / numIterPerEpoch
epochId = (1.0 * iterId / numIterPerEpoch) + 1
# Set eval mode
if aBot:
aBot.eval()
if qBot:
qBot.eval()
if params['enableVisdom']:
# Printing visdom environment name in terminal
print("Currently on visdom env [%s]" % (params['visdomEnv']))
# Mapping iteration count to epoch count
viz.linePlot(iterId, epochId, 'iter x epoch', 'epochs')
print('Performing validation...')
if aBot and 'ques' in batch:
print("aBot Validation:")
# NOTE: A-Bot validation is slow, so adjust exampleLimit as needed
rankMetrics = rankABot(
aBot,
dataset,
'val',
scoringFunction=utils.maskedNll,
exampleLimit=25 * params['batchSize'])
for metric, value in rankMetrics.items():
viz.linePlot(
epochId, value, 'val - aBot', metric, xlabel='Epochs')
if 'logProbsMean' in rankMetrics:
logProbsMean = params['CELossCoeff'] * rankMetrics[
'logProbsMean']
viz.linePlot(iterId, logProbsMean, 'aBotLoss', 'val CE')
if params['trainMode'] == 'sl-abot':
valLoss = logProbsMean
viz.linePlot(iterId, valLoss, 'loss', 'val loss')
if qBot:
print("qBot Validation:")
rankMetrics, roundMetrics = rankQBot(qBot, dataset, 'val')
for metric, value in rankMetrics.items():
viz.linePlot(
epochId, value, 'val - qBot', metric, xlabel='Epochs')
viz.linePlot(iterId, epochId, 'iter x epoch', 'epochs')
if 'logProbsMean' in rankMetrics:
logProbsMean = params['CELossCoeff'] * rankMetrics[
'logProbsMean']
viz.linePlot(iterId, logProbsMean, 'qBotLoss', 'val CE')
if 'featLossMean' in rankMetrics:
featLossMean = params['featLossCoeff'] * (
rankMetrics['featLossMean'])
viz.linePlot(iterId, featLossMean, 'featLoss',
'val FeatureRegressionLoss')
if 'logProbsMean' in rankMetrics and 'featLossMean' in rankMetrics:
if params['trainMode'] == 'sl-qbot':
valLoss = logProbsMean + featLossMean
viz.linePlot(iterId, valLoss, 'loss', 'val loss')
# Save the model after every epoch
if iterId % numIterPerEpoch == 0:
params['ckpt_iterid'] = iterId
params['ckpt_lRate'] = lRate
if aBot:
saveFile = os.path.join(params['savePath'],
'abot_ep_%d.vd' % curEpoch)
print('Saving model: ' + saveFile)
utils.saveModel(aBot, optimizer, saveFile, params)
if qBot:
saveFile = os.path.join(params['savePath'],
'qbot_ep_%d.vd' % curEpoch)
print('Saving model: ' + saveFile)
utils.saveModel(qBot, optimizer, saveFile, params)