-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
186 lines (150 loc) · 6.06 KB
/
train.py
File metadata and controls
186 lines (150 loc) · 6.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
"""
Train all the basline networks
"""
import os
#os.environ['CUDA_VISIBLE_DEVICES'] ='5'
###### Collect all garbage #######
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import keras
sys.stderr = stderr
###################################
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger
from models.models import ResearchModels
from utils.data import DataSet
import time
import os.path
def train(data_type, seq_length, model, saved_model=None,target=None,
class_limit=None, image_shape=None,
load_to_memory=False, batch_size=32, nb_epoch=100,features=None,features_length=None):
# Helper: Save the model.
if data_type=='images':
checkpoints_dir='data/Weights_'+model
checkpointer = ModelCheckpoint(
filepath=os.path.join(checkpoints_dir, model + '-' + data_type + '_'+target + \
'.{epoch:03d}-{val_loss:.3f}.hdf5'),
verbose=1,
save_best_only=True)
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('data', model+'_logs', model))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('data', model+'_logs', model +'_'+target + '-' + 'training-' + \
str(timestamp) + '.log'))
else:
checkpoints_dir='data/Weights_'+features+'_'+model
checkpointer = ModelCheckpoint(
filepath=os.path.join(checkpoints_dir, features+ '_'+model + '-' + data_type + '_'+ target +\
'.{epoch:03d}-{val_loss:.3f}.hdf5'),
verbose=1,
save_best_only=True)
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('data', features+ '_'+model+'_'+target +'_logs', model))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('data', features+ '_'+model+'_'+target +'_logs', model + '-' + 'training-' + \
str(timestamp) + '.log'))
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
# Helper: Stop when we stop learning.
early_stopper = EarlyStopping(patience=15)
# Get the data and process it.
if image_shape is None:
data = DataSet(
seq_length=seq_length,
class_limit=class_limit,target=target,
features=features
)
else:
data = DataSet(
seq_length=seq_length,
class_limit=class_limit,target=target,
image_shape=image_shape
)
# Get samples per epoch.
# Multiply by 0.7 to attempt to guess how much of data.data is the train set.
steps_per_epoch = (len(data.data) * 0.7) // batch_size
if load_to_memory:
# Get data.
X, y = data.get_all_sequences_in_memory('Train', data_type)
X_test, y_test = data.get_all_sequences_in_memory('test', data_type)
else:
# Get generators.
generator = data.frame_generator(batch_size, 'Train', data_type)
val_generator = data.frame_generator(batch_size, 'test', data_type)
# Get the model.
rm = ResearchModels(len(data.classes), model, seq_length, saved_model,image_shape=image_shape,batch_size=batch_size,features_length=features_length)
# Fit!
if load_to_memory:
# Use standard fit.
rm.model.fit(
X,
y,
batch_size=batch_size,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[tb, early_stopper, csv_logger,checkpointer],
epochs=nb_epoch)
else:
# Use fit generator.
rm.model.fit_generator(
generator=generator,
steps_per_epoch=steps_per_epoch,
epochs=nb_epoch,
verbose=1,
callbacks=[tb, early_stopper, csv_logger,checkpointer],
validation_data=val_generator,
validation_steps=40,
workers=1)
def main():
"""These are the main training settings. Set each before running
this file."""
# model can be one of lstm, lrcn, mlp, conv_3d, c3d,r3d_18,r3d_34,r3d_50,r3d_101,r3d_152,DenseResNet3D,densenet_3d
os.environ['CUDA_VISIBLE_DEVICES'] ='1' # 6,2
target='situation' # views action situation
model = 'mlp'
features='IV3' #Options (resnet50, IV3,vgg16,vgg19,IR2,resnet152,densenet169,xception,efficientnetb7)
saved_model = None # None or weights file
class_limit = None #
seq_length = 16
batch_size = 128
nb_epoch = 1000
if features in ['IV3','resnet50','resnet152','densenet169','xception']:
features_length=2048
if features in['vgg16','vgg19']:
features_length=25088
if features=='IR2':
features_length=1536
if features=='efficientnetb7':
features_length=2560
if features=='densenet169':
features_length=1664
# Chose images or features and image shape based on network.nvidia-smi
if model in ['conv_3d']:
data_type = 'images'
image_shape = (100, 100, 3)
elif model in ['lstm', 'mlp','capsnet','gru']:
data_type = 'features'
image_shape = None
elif model in ['i3d','r3d_18','r3d_34','r3d_50','r3d_101','r3d_152']:
data_type = 'images'
image_shape = (224, 224, 3)
elif model in ['c3d', 'lrcn']:
data_type = 'images'
image_shape = (150, 150, 3)
elif model in ['densenetresnet_3d','densenet_3d']:
data_type = 'images'
image_shape = (112, 112, 3)
else:
raise ValueError("Invalid model. See train.py for options.")
if data_type=='images':
load_to_memory = False
else:
load_to_memory = False
print('Model = ',model,' Features = ',features)
train(data_type, seq_length, model, saved_model=saved_model,target=target,
class_limit=class_limit, image_shape=image_shape,
load_to_memory=load_to_memory, batch_size=batch_size, nb_epoch=nb_epoch,features=features,features_length=features_length)
if __name__ == '__main__':
main()