Please find below a patch that allows the training to be interrupted and resumed. The goals of the patch is as follows. If the parameters are initialized very small then restricting the number of epochs provides a way to apply regularisation: basically one can stop the growth since the parameters go from very small to their intentional size. After an interrupt an validation set error can be simmed to check if the error is increasing or decreasing.
Index: neurolab/core.py
IDEA additional info:
Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP
<+>UTF-8
===================================================================
--- neurolab/core.py (revision )
+++ neurolab/core.py (revision )
@@ -252,7 +252,7 @@
"""
- def __init__(self, Train, epochs=500, goal=0.01, show=100, **kwargs):
+ def __init__(self, Train, epochs=500, interrupt=None, goal=0.01, show=100, **kwargs):
"""
:Parameters:
Train: Train instance
@@ -274,6 +274,7 @@
self.defaults['goal'] = goal
self.defaults['show'] = show
self.defaults['epochs'] = epochs
+ self.defaults['interrupt'] = interrupt
self.defaults['train'] = kwargs
if Train.__init__.__defaults__:
#cnt = Train.__init__.func_code.co_argcount
@@ -335,16 +336,22 @@
self.error.append(err)
epoch = len(self.error)
show = self.params['show']
+ interrupt = self.params['interrupt']
if show and (epoch % show) == 0:
print("Epoch: {0}; Error: {1};".format(epoch, err))
if err < self.params['goal']:
raise TrainStop('The goal of learning is reached')
if epoch >= self.params['epochs']:
raise TrainStop('The maximum number of train epochs is reached')
+ if interrupt and epoch % interrupt == 0:
+ raise TrainStop('Training is interrupted at {} epochs.'.format(epoch))
-
+
train = self._train_class(net, *args, **self.params['train'])
Train.__init__(train, epochf, self.params['epochs'])
- self.error = []
+
+ #superfluous?
+ #self.error = []
+
try:
train(net, *args)
except TrainStop as msg: