-
Notifications
You must be signed in to change notification settings - Fork 0
/
trainer.py
51 lines (49 loc) · 2.08 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os
from pytorch_lightning import Trainer
from pytorch_lightning import loggers as pl_loggers
from callbacks import build_callbacks
def build_epoch_trainer(config):
"""
train models in specific epochs
the final log file will be stored as:
<log-output>/<dataset>/<model-type>/<model-name>/<tag>
"""
log_save_dir = os.path.join(config.LOG_OUTPUT, config.DATA.DATASET, config.MODEL.TYPE)
tensorboard_logger = pl_loggers.TensorBoardLogger(name=config.MODEL.NAME,
version=config.TAG,
save_dir=log_save_dir)
callbacks = build_callbacks(config)
trainer = Trainer(
max_epochs = config.TRAIN.EPOCHS,
gpus = config.TRAIN.ACCELERATOR.GPUS,
accumulate_grad_batches = config.TRAIN.ACCUMULATION_STEPS,
callbacks = callbacks,
precision = config.TRAIN.PRECISION,
accelerator = config.TRAIN.ACCELERATOR.MODE,
logger = [tensorboard_logger],
fast_dev_run = config.DEBUG_MODE,
)
return trainer
def build_finetune_trainer(config):
"""
train models in specific steps/iterations
the final log file will be stored as:
<log-output>/<dataset>/<model-type>/<model-name>/<tag>
"""
log_save_dir = os.path.join(config.LOG_OUTPUT, config.DATA.DATASET, config.MODEL.TYPE)
tensorboard_logger = pl_loggers.TensorBoardLogger(name=config.MODEL.NAME,
version=config.TAG,
save_dir=log_save_dir)
callbacks = build_callbacks(config)
trainer = Trainer(
max_steps = config.TRAIN.STEPS,
gpus = config.TRAIN.ACCELERATOR.GPUS,
accumulate_grad_batches = config.TRAIN.ACCUMULATION_STEPS,
callbacks = callbacks,
precision = config.TRAIN.PRECISION,
accelerator = config.TRAIN.ACCELERATOR.MODE,
logger = [tensorboard_logger],
fast_dev_run = config.DEBUG_MODE,
val_check_interval = config.TRAIN.VAL_CHECK_INTERVAL
)
return trainer