forked from bbrattoli/JigsawPuzzlePytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
JigsawTrain.py
207 lines (168 loc) · 7.62 KB
/
JigsawTrain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 12:16:31 2017
@author: Biagio Brattoli
"""
import os, sys, numpy as np
import argparse
from time import time
from tqdm import tqdm
import tensorflow # needs to call tensorflow before torch, otherwise crush
sys.path.append('Utils')
from logger import Logger
import torch
import torch.nn as nn
from torch.autograd import Variable
sys.path.append('Dataset')
from JigsawNetwork import Network
from TrainingUtils import adjust_learning_rate, compute_accuracy
parser = argparse.ArgumentParser(description='Train JigsawPuzzleSolver on Imagenet')
parser.add_argument('data', type=str, help='Path to Imagenet folder')
parser.add_argument('--model', default=None, type=str, help='Path to pretrained model')
parser.add_argument('--classes', default=1000, type=int, help='Number of permutation to use')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--epochs', default=70, type=int, help='number of total epochs for training')
parser.add_argument('--iter_start', default=0, type=int, help='Starting iteration count')
parser.add_argument('--batch', default=256, type=int, help='batch size')
parser.add_argument('--checkpoint', default='checkpoints/', type=str, help='checkpoint folder')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate for SGD optimizer')
parser.add_argument('--cores', default=0, type=int, help='number of CPU core for loading')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set, No training')
args = parser.parse_args()
#from ImageDataLoader import DataLoader
from JigsawImageLoader import DataLoader
def main():
if args.gpu is not None:
print(('Using GPU %d'%args.gpu))
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
else:
print('CPU mode')
print('Process number: %d'%(os.getpid()))
## DataLoader initialize ILSVRC2012_train_processed
trainpath = args.data+'/ILSVRC2012_img_train'
if os.path.exists(trainpath+'_255x255'):
trainpath += '_255x255'
train_data = DataLoader(trainpath,args.data+'/ilsvrc12_train.txt',
classes=args.classes)
train_loader = torch.utils.data.DataLoader(dataset=train_data,
batch_size=args.batch,
shuffle=True,
num_workers=args.cores)
valpath = args.data+'/ILSVRC2012_img_val'
if os.path.exists(valpath+'_255x255'):
valpath += '_255x255'
val_data = DataLoader(valpath, args.data+'/ilsvrc12_val.txt',
classes=args.classes)
val_loader = torch.utils.data.DataLoader(dataset=val_data,
batch_size=args.batch,
shuffle=True,
num_workers=args.cores)
N = train_data.N
iter_per_epoch = train_data.N/args.batch
print('Images: train %d, validation %d'%(train_data.N,val_data.N))
# Network initialize
net = Network(args.classes)
if args.gpu is not None:
net.cuda()
############## Load from checkpoint if exists, otherwise from model ###############
if os.path.exists(args.checkpoint):
files = [f for f in os.listdir(args.checkpoint) if 'pth' in f]
if len(files)>0:
files.sort()
#print files
ckp = files[-1]
net.load_state_dict(torch.load(args.checkpoint+'/'+ckp))
args.iter_start = int(ckp.split(".")[-3].split("_")[-1])
print('Starting from: ',ckp)
else:
if args.model is not None:
net.load(args.model)
else:
if args.model is not None:
net.load(args.model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(),lr=args.lr,momentum=0.9,weight_decay = 5e-4)
logger = Logger(args.checkpoint+'/train')
logger_test = Logger(args.checkpoint+'/test')
############## TESTING ###############
if args.evaluate:
test(net,criterion,None,val_loader,0)
return
############## TRAINING ###############
print(('Start training: lr %f, batch size %d, classes %d'%(args.lr,args.batch,args.classes)))
print(('Checkpoint: '+args.checkpoint))
# Train the Model
batch_time, net_time = [], []
steps = args.iter_start
for epoch in range(int(args.iter_start/iter_per_epoch),args.epochs):
if epoch%10==0 and epoch>0:
test(net,criterion,logger_test,val_loader,steps)
lr = adjust_learning_rate(optimizer, epoch, init_lr=args.lr, step=20, decay=0.1)
end = time()
for i, (images, labels, original) in enumerate(train_loader):
batch_time.append(time()-end)
if len(batch_time)>100:
del batch_time[0]
images = Variable(images)
labels = Variable(labels)
if args.gpu is not None:
images = images.cuda()
labels = labels.cuda()
# Forward + Backward + Optimize
optimizer.zero_grad()
t = time()
outputs = net(images)
net_time.append(time()-t)
if len(net_time)>100:
del net_time[0]
prec1, prec5 = compute_accuracy(outputs.cpu().data, labels.cpu().data, topk=(1, 5))
acc = prec1[0]
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
loss = float(loss.cpu().data.numpy())
if steps%20==0:
print(('[%2d/%2d] %5d) [batch load % 2.3fsec, net %1.2fsec], LR %.5f, Loss: % 1.3f, Accuracy % 2.2f%%' %(
epoch+1, args.epochs, steps,
np.mean(batch_time), np.mean(net_time),
lr, loss,acc)))
if steps%20==0:
logger.scalar_summary('accuracy', acc, steps)
logger.scalar_summary('loss', loss, steps)
original = [im[0] for im in original]
imgs = np.zeros([9,75,75,3])
for ti, img in enumerate(original):
img = img.numpy()
imgs[ti] = np.stack([(im-im.min())/(im.max()-im.min())
for im in img],axis=2)
logger.image_summary('input', imgs, steps)
steps += 1
if steps%1000==0:
filename = '%s/jps_%03i_%06d.pth.tar'%(args.checkpoint,epoch,steps)
net.save(filename)
print('Saved: '+args.checkpoint)
end = time()
if os.path.exists(args.checkpoint+'/stop.txt'):
# break without using CTRL+C
break
def test(net,criterion,logger,val_loader,steps):
print('Evaluating network.......')
accuracy = []
net.eval()
for i, (images, labels, _) in enumerate(val_loader):
images = Variable(images)
if args.gpu is not None:
images = images.cuda()
# Forward + Backward + Optimize
outputs = net(images)
outputs = outputs.cpu().data
prec1, prec5 = compute_accuracy(outputs, labels, topk=(1, 5))
accuracy.append(prec1[0])
if logger is not None:
logger.scalar_summary('accuracy', np.mean(accuracy), steps)
print('TESTING: %d), Accuracy %.2f%%' %(steps,np.mean(accuracy)))
net.train()
if __name__ == "__main__":
main()