Now Neurolab not support regularization methods.
Although they may have in the algorithms scipy.optimize (fmin_bfgs, fmin_cg, fmin_ncg).
About your implementation:
(1) I think that 'regularizer' is not a network property it is parametr of train process.
(2) function ff_grad calculate gradient only. I think, regularization must be in train func.
Example TrainGD with regularizer
class TrainGDR(Train):
"""
Gradient descent backpropagation with regularization
"""
def __init__(self, net, input, target, lr=0.01, adapt=False regularizer=0):
self.adapt = adapt
self.lr = lr
self.reg = regularizer
def __call__(self, net, input, target):
if not self.adapt:
while True:
g, output = self.calc(net, input, target)
e = self.error(net, input, target, output)
self.epochf(e, net, input, target)
self.learn(net, g)
else:
while True:
for i in range(input.shape[0]):
g = self.calc(net, [input[i]], [target[i]])[0]
self.learn(net, g)
e = self.error(net, input, target)
self.epochf(e, net, input, target)
return None
def calc(self, net, input, target):
g1, g2, output = tool.ff_grad(net, input, target)
return g1, output
def learn(self, net, grad):
for ln, layer in enumerate(net.layers):
layer.np['w'] -= self.lr * grad[ln]['w'] + self.reg * sum(grad[ln]['w'])
layer.np['b'] -= self.lr * grad[ln]['b'] + self.reg * sum(grad[ln]['b'])
return None