function train_one(c :: LinearClassifier, x :: Array{Float64}, y :: Int64; α :: Float64 = 0.025, input_gradient :: Union(Nothing, Array{Float64}) = nothing) predict!(c, x) c.outputs[y] -= 1
if input_gradient != nothing # input_gradient = ( c.weights * outputs' )' BLAS.gemv!('N', α, c.weights, c.outputs, 1.0, input_gradient) end
# c.weights -= α * x' * outputs; BLAS.ger!(-α, vec(x), c.outputs, c.weights)end
function predict!(c :: LinearClassifier, x :: Array{Float64}) c.outputs = vec(softmax(x * c.weights))end
type LinearClassifier k :: Int64 # number of outputs n :: Int64 # number of inputs weights :: Array{Float64, 2} # k * n weight matrix
outputs :: Vector{Float64}endJulia Version 0.3.6-pre+70
Commit 638fa02 (2015-02-12 13:59 UTC)
Platform Info:
System: Darwin (x86_64-apple-darwin14.1.0)
CPU: Intel(R) Core(TM) i7-4650U CPU @ 1.70GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas
LIBM: libopenlibm
LLVM: libLLVM-3.3
function train_one(c :: LinearClassifier, x :: Array{Float64}, y :: Int64, input_gradient :: Array{Float64}, α :: Float64 = 0.025)
predict!(c, x)
c.outputs[y] -= 1
# input_gradient = ( c.weights * outputs' )'
# BLAS.gemv!('N', α, c.weights, c.outputs, 1.0, input_gradient)
m = 0.0
j = 0
limit = c.n - 4
for i in 1:c.k
m = α * c.outputs[i]
j = 1
while j <= limit
@nexprs 4 (idx->input_gradient[j+idx-1] += m * c.weights[j+idx-1, i])
j+=4
end
while j <= c.n
input_gradient[j] += m * c.weights[j, i]
j+=1
end
end
# c.weights -= α * x' * outputs;
# BLAS.ger!(-α, vec(x), c.outputs, c.weights)
for i in 1:c.k
m = α * c.outputs[i]
j = 1
while j <= limit
c.weights[j, i] -= m * x[j]
c.weights[j+1, i] -= m * x[j+1]
c.weights[j+2, i] -= m * x[j+2]
c.weights[j+3, i] -= m * x[j+3]
j+=4
end
while j <= c.n
c.weights[j, i] -= m * x[j]
j+=1
end
end
end
Hello everyone,