Benchmarks
Benchmarks and DatasetCode Examples
"All MNIST" Example
1
# Device config
2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
3
โ€‹
4
# model
5
model = SimpleMLP(num_classes=10)
6
โ€‹
7
# Here we show all the MNIST variation we offer in the "classic" benchmarks
8
# benchmark = PermutedMNIST(n_experiences=5, seed=1)
9
benchmark = RotatedMNIST(n_experiences=5, rotations_list=[30, 60, 90, 120, 150], seed=1)
10
# benchmark = SplitMNIST(n_experiences=5, seed=1)
11
โ€‹
12
# choose some metrics and evaluation method
13
interactive_logger = InteractiveLogger()
14
โ€‹
15
eval_plugin = EvaluationPlugin(
16
accuracy_metrics(minibatch=False, epoch=True, experience=True, stream=True),
17
loss_metrics(minibatch=False, epoch=True, experience=True, stream=True),
18
timing_metrics(epoch=True, epoch_running=True),
19
cpu_usage_metrics(experience=True),
20
ExperienceForgetting(),
21
loggers=[interactive_logger])
22
โ€‹
23
# Than we can extract the parallel train and test streams
24
train_stream = benchmark.train_stream
25
test_stream = benchmark.test_stream
26
โ€‹
27
# Prepare for training & testing
28
optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9)
29
criterion = CrossEntropyLoss()
30
โ€‹
31
# Continual learning strategy
32
cl_strategy = Naive(
33
model, optimizer, criterion, train_mb_size=32, train_epochs=2,
34
test_mb_size=32, device=device, evaluator=eval_plugin
35
)
36
โ€‹
37
# train and test loop
38
results = []
39
for train_task in train_stream:
40
print("Current Classes: ", train_task.classes_in_this_experience)
41
cl_strategy.train(train_task, num_workers=4)
42
results.append(cl_strategy.eval(test_stream))
Copied!

๐Ÿค Run it on Google Colab

You can run this chapter and play with it on Google Colaboratory:
Notebook currently unavailable.
Last modified 4mo ago
Export as PDF
Copy link