在跑通开源代码的过程中,一直难以找到很合适的代码框架或代码模板,尤其对于初学者而言是不小的挑战。本人在跑代码的过程中,发现在GitHub上有一个辅助训练过程的模板。这篇文章主要记录它的使用方法,并标记来源。
代码均在jupyter notebook中跑通。需要提前配置好python, numpy, matplotlib, torch等环境。
nntools源码
"""
Source: UCSD ECE285: Machine Learning and Image Processing
Neural Network tools developed for UCSD ECE285 MLIP.
Copyright 2019. Charles Deledalle, Sneha Gupta, Anurag Paul, Inderjot Saggu.
"""
import os
import time
import torch
from torch import nn
import torch.utils.data as td
from abc import ABC, abstractmethod
class NeuralNetwork(nn.Module, ABC):
"""An abstract class representing a neural network.
All other neural network should subclass it. All subclasses should override
``forward``, that makes a prediction for its input argument, and
``criterion``, that evaluates the fit between a prediction and a desired
output. This class inherits from ``nn.Module`` and overloads the method
``named_parameters`` such that only parameters that require gradient
computation are returned. Unlike ``nn.Module``, it also provides a property
``device`` that returns the current device in which the network is stored
(assuming all network parameters are stored on the same device).
"""
def __init__(self):
super(NeuralNetwork, self).__init__()
@property
def device(self):
# This is important that this is a property and not an attribute as the
# device may change anytime if the user do ``net.to(newdevice)``.
return next(self.parameters()).device
def named_parameters(self, recurse=True):
nps = nn.Module.named_parameters(self)
for name, param in nps:
if not param.requires_grad:
continue
yield name, param
@abstractmethod
def forward(self, x):
pass
@abstractmethod # 使用@abstractmethod标记的方法必须在所有子类中实现 基类需要继承ABC
def criterion(self, y, d):
pass
class StatsManager(object):
"""
A class meant to track the loss during a neural network learning experiment.
Though not abstract, this class is meant to be overloaded to compute and
track statistics relevant for a given task. For instance, you may want to
overload its methods to keep track of the accuracy, top-5 accuracy,
intersection over union, PSNR, etc, when training a classifier, an object
detector, a denoiser, etc.
"""
def __init__(self):
self.init()
def __repr__(self):
"""Pretty printer showing the class name of the stats manager. This is
what is displayed when doing ``print(stats_manager)``.
"""
return self.__class__.__name__
def init(self):
"""Initialize/Reset all the statistics"""
self.running_loss = 0
self.number_update = 0
def accumulate(self, loss, x=None, y=None, d=None):
"""Accumulate statistics
Though the arguments x, y, d are not used in this implementation, they
are meant to be used by any subclasses. For instance they can be used
to compute and track top-5 accuracy when training a classifier.
Arguments:
loss (float): the loss obtained during the last update.
x (Tensor): the input of the network during the last update.
y (Tensor): the prediction of by the network during the last update.
d (Tensor): the desired output for the last update.
"""
self.running_loss += loss
self.number_update += 1
def summarize(self):
"""Compute statistics based on accumulated ones"""
return self.running_loss / self.number_update
class Experiment(object):
"""
A class meant to run a neural network learning experiment.
After being instantiated, the experiment can be run using the method
``run``. At each epoch, a checkpoint file will be created in the directory
``output_dir``. Two files will be present: ``checkpoint.pth.tar`` a binary
file containing the state of the experiment, and ``config.txt`` an ASCII
file describing the setting of the experiment. If ``output_dir`` does not
exist, it will be created. Otherwise, the last checkpoint will be loaded,
except if the setting does not match (in that case an exception will be
raised). The loaded experiment will be continued from where it stopped when
calling the method ``run``. The experiment can be evaluated using the method
``evaluate``.
Attributes/Properties:
epoch (integer): the number of performed epochs.
history (list): a list of statistics for each epoch.
If ``perform_validation_during_training``=False, each element of the
list is a statistic returned by the stats manager on training data.
If ``perform_validation_during_training``=True, each element of the
list is a pair. The first element of the pair is a statistic
returned by the stats manager evaluated on the training set. The
second element of the pair is a statistic returned by the stats
manager evaluated on the validation set.
Arguments:
net (NeuralNetork): a neural network.
train_set (Dataset): a training data set.
val_set (Dataset): a validation data set.
stats_manager (StatsManager): a stats manager.
output_dir (string, optional): path where to load/save checkpoints. If
None, ``output_dir`` is set to "experiment_TIMESTAMP" where
TIMESTAMP is the current time stamp as returned by ``time.time()``.
(default: None)
batch_size (integer, optional): the size of the mini batches.
(default: 16)
perform_validation_during_training (boolean, optional): if False,
statistics at each epoch are computed on the training set only.
If True, statistics at each epoch are computed on both the training
set and the validation set. (default: False)
"""
def __init__(self, net, train_set, val_set, optimizer, stats_manager,
output_dir=None, batch_size=16, perform_validation_during_training=False):
# Define data loaders
train_loader = td.DataLoader(train_set, batch_size=batch_size, shuffle=True,
drop_last=True, pin_memory=True)
val_loader = td.DataLoader(val_set, batch_size=batch_size, shuffle=False,
drop_last=True, pin_memory=True)
# Initialize history
history = []
# Define checkpoint paths
if output_dir is None:
output_dir = 'experiment_{}'.format(time.time())
os.makedirs(output_dir, exist_ok=True)
checkpoint_path = os.path.join(output_dir, "checkpoint.pth.tar")
config_path = os.path.join(output_dir, "config.txt")
# Transfer all local arguments/variables into attributes
locs = {
k: v for k, v in locals().items() if k is not 'self'}
self.__dict__.update(locs)
# Load checkpoint and check compatibility
if os.path.isfile(config_path):
with open(config_path, 'r') as f:
if f.read()[:-1] != repr(self):
raise ValueError(
"Cannot create this experiment: "
"I found a checkpoint conflicting with the current setting.")
self.load()
else:
self.save()
@property
def epoch(self):
"""Returns the number of epochs already performed."""
return len(self.history)
def setting(self):
"""Returns the setting of the experiment."""
return {
'Net': self.net,
'TrainSet': self.train_set,
'ValSet': self.val_set,
'Optimizer': self.optimizer,
'StatsManager': self.stats_manager,
'BatchSize': self.batch_size,
'PerformValidationDuringTraining': self.perform_validation_during_training}
def __repr__(self):
"""Pretty printer showing the setting of the experiment. This is what
is displayed when doing ``print(experiment)``. This is also what is
saved in the ``config.txt`` file.
"""
string = ''
for key, val in self.setting().items():
string += '{}({})\n'.format(key, val)
return string
def state_dict(self):
"""Returns the current state of the experiment."""
return {
'Net': self.net.state_dict(),
'Optimizer': self.optimizer.state_dict(),
'History': self.history}
def load_state_dict(self, checkpoint):
"""Loads the experiment from the input checkpoint."""
self.net.load_state_dict(checkpoint['Net'])
self.optimizer.load_state_dict(checkpoint['Optimizer'])
self.history = checkpoint['History']
# The following loops are used to fix a bug that was
# discussed here: https://github.com/pytorch/pytorch/issues/2830
# (it is supposed to be fixed in recent PyTorch version)
for state in self.optimizer.state.values():
for k, v