-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
138 lines (115 loc) · 5.09 KB
/
train.py
File metadata and controls
138 lines (115 loc) · 5.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
import torch
import pickle
import numpy as np
from typing import List, Literal, Union
from stimulus import TaskDataset
from networks import RNN, LSTM
from tasks import SoftmaxCrossEntropy, ActorCritic
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from constants import TASKS, train_params, model_params
from pytorch_lightning.loggers import CSVLogger
class Train:
def __init__(self, network: Union[Literal["LSTM", "RNN", torch.nn.Module]] = "LSTM"):
# create a dummy loader in order to extract the stimulus properties
dummy_loader = TaskDataset(tasks=None, n_batches=0, RL=False)
self.logger = CSVLogger("logs", name="train_sweep", version=0)
# if network is "LSTM" or "RNN", then initialize the network
# else, we'll use the network (nn.Module) provided
if network in ["LSTM", "RNN"]:
add_model_params(dummy_loader)
self.network = LSTM(**model_params) if network == "LSTM" else RNN(**model_params)
self.define_optimizer()
def define_optimizer(self, sparse_plasticity: bool = False):
"""Create the network optimizer. The learning rate of network parameters not involved in top-down
context will be set to 0 if sparse_plasticity = True"""
non_top_down_lr = 0.0 if sparse_plasticity else train_params["learning_rate"]
context_params = [p for n, p in self.network.named_parameters() if "context" in n or "classifier" in n]
non_context_params = [p for n, p in self.network.named_parameters() if "context" not in n and "classifier" not in n]
optim = torch.optim.AdamW(
[
{"params": context_params, "lr": train_params["learning_rate"], "weight_decay": 0.0},
{
"params": non_context_params,
"lr": non_top_down_lr,
"weight_decay": train_params["weight_decay"],
},
],
lr=train_params["learning_rate"], weight_decay=0.0,
)
# ensure non-top down parameters are not trainable if sparse is True
if sparse_plasticity:
for n, p in self.network.named_parameters():
if "context" not in n and "classifier" not in n:
p.requires_grad = False
self.task = SoftmaxCrossEntropy(
network=self.network,
optim_config=optim,
n_logits=model_params["n_output"],
l2_penalty=train_params["l2_penalty"],
)
def create_task_loaders(self, task_set: List, n_batches_per_epoch: int = 50):
"""Create the trianing and validation stimulus data loader"""
self.task_set = task_set
train = TaskDataset(tasks=task_set, n_batches=n_batches_per_epoch, RL=train_params["RL"])
val = TaskDataset(tasks=task_set, n_batches=10, RL=train_params["RL"])
self.train_loader = DataLoader(
train,
batch_size=train_params["batch_size"],
num_workers=train_params["num_workers"],
)
self.val_loader = DataLoader(
val,
batch_size=train_params["batch_size"],
num_workers=train_params["num_workers"],
)
def train_model(self, n_epochs: int, patience: int = 3):
"""Train the model and return the saved model and results"""""
early_stopping = pl.callbacks.early_stopping.EarlyStopping(
monitor='task_acc',
min_delta=0.001,
patience=patience,
verbose=False,
mode='max',
stopping_threshold=0.999,
)
# create PyTorch Lightning Trainer
trainer = pl.Trainer(
accelerator='gpu',
max_epochs=n_epochs,
gradient_clip_val=0.5,
precision=32,
devices=1,
logger=self.logger,
callbacks=[early_stopping],
)
trainer.fit(self.task, self.train_loader, self.val_loader)
save_task_info(trainer, self.task_set)
return self.task.network
def add_model_params(task_loader, verbose=False):
"""Add model params needed from stimulus class"""
stim_prop = task_loader.get_stim_properties()
if verbose:
print("Stimulus and network properties")
for k, v in stim_prop.items():
if isinstance(v, (int, float)):
print(f"{k}: {v}")
# Add network params
for k in ["n_stimulus", "n_context", "n_output"]:
model_params[k] = stim_prop[k]
def save_task_info(trainer, tasks):
if not os.path.exists(trainer.logger.log_dir):
os.makedirs(trainer.logger.log_dir)
tasks_fn = trainer.logger.log_dir + "/tasks.pkl"
print(f"Saving task info to {tasks_fn}")
tasks_info = {"index": [], "name": [], "target_offset": [], "stim_offset": []}
for n, t in enumerate(tasks):
tasks_info["index"].append(n)
tasks_info["name"].append(t[0])
tasks_info["target_offset"].append(t[1])
if len(t) == 3:
tasks_info["stim_offset"].append(t[2])
else:
tasks_info["stim_offset"].append(-1)
pickle.dump(tasks_info, open(tasks_fn, "wb"))