using DataFrames
using Optim
using JuMP
using KNITRO
global depvar = [1,1,1,4,4]
global x_mat = [866.0 962.64 859.9 995.76 1135.5 199.69 151.72 553.34 505.6 237.88
727.93 758.89 796.82 894.69 968.9 168.66 168.66 520.24 486.49 199.19
599.48 783.05 719.86 900.11 1048.3 165.58 137.8 439.06 404.74 171.47
835.17 793.06 761.25 831.04 1048.7 180.88 147.14 483.0 425.22 222.95
755.59 846.29 858.86 985.64 883.05 174.91 138.9 404.41 389.52 178.49]
function log_pn{T}(b::Vector{T}, xs::Vector{T}; J=5):
"""
Returns the vector of J=5x1 probabilities given a beta=b
b : 2x1 vector
xs : data on alternatives. 10x1 vector
"""
num = Array(Float64, J)
base_x = [xs[1], xs[J]]
for option in 1:J
num[option] = e^(dot(b, [xs[option], xs[option+J]] - base_x))
end
den = sum(num)
return log(num) - log(den)
end
function neg_log_lik(beta)
N = size(depvar)[1]
sum_ll = 0.0
for i in 1:N
xs = vec(x_mat[i, :])
sum_ll += log_pn(beta, xs)[depvar[i]]
end
return -sum_ll
end
## Solver with Optim.optimize
function optim_s( ; beta0=[0.01, 0.01])
optimum = optimize(neg_log_lik, beta0, method=:cg, iterations=50)
println(optimum.minimum)
end
optim_s()
using MathProgBase
import MathProgBase.MathProgSolverInterface
type NSOpt <: MathProgSolverInterface.AbstractNLPEvaluator
## Constant model input
sample::Matrix{Float64} # Stores the data set
surv_firms::Vector{Bool} # Which firms in sample survive
surv_dur_lim::Matrix{Float64} # Limits of the survival duration interval
exit_firms::Vector{Bool} # Which firms in sample default
exit_lim::Matrix{Float64} # Limits of the default period (relative to t)
## Computation results storage
params::Vector{Float64} # Temporarily stores the current parameters used by solver
nsfactors::Matrix{Float64} # Temporarily stores the current ns factors for use by solver
nsdecay::Vector{Float64} # Temporarily stores the current ns decay parameter for use by solver
int_stor::Vector{Float64} # Storage vector for intensity integral
int_stor_g::Matrix{Float64} # Storage matrix for intensity integral gradient
int_stor_h::Array{Float64, 3} # Storage array for intensity integral Hessian
int_stor_h_com::Matrix{Float64} # Storage matrix for common factors in intensity integral Hessian
function NSOpt(sample::Matrix{Float64}, surv_firms::Vector{Bool}, surv_dur_lim::Matrix{Float64},
exit_firms::Vector{Bool}, exit_lim::Matrix{Float64}, params::Vector{Float64},
int_stor::Vector{Float64}, int_stor_g::Matrix{Float64}, int_stor_h::Array{Float64, 3},
int_stor_h_com::Matrix{Float64})
## Compute Nelson-Siegel factors and decay parameter
nsfactors, nsdecay = compute_nsfactors(sample, params)
## Initialize NSOpt object
new(sample, surv_firms, surv_dur_lim, exit_firms, exit_lim, params, nsfactors, nsdecay,
int_stor, int_stor_g, int_stor_h, int_stor_h_com)
end
end
function compute_nsfactors(sample::Matrix{Float64}, params::Vector{Float64})
trans = reshape(params, (convert(Int64, length(params) / 4), 4)) # Transformation matrix for sample
nsfactors = sample * trans[:, 1:3] # Update NS factors in model
nsdecay = exp(sample * trans[:, 4]) # Update NS decay parameters in model
return nsfactors, nsdecay
end
# TODO (Felix, 2015-02-04): Figure out way to deal with different number of parameters
function update_model!(params::Vector{Float64}, d::NSOpt)
if params != d.params
d.params = params # Update parameters in model
trans = reshape(params, (convert(Int64, length(params) / 4), 4)) # Transformation matrix for sample
d.nsfactors = d.sample * trans[:, 1:3] # Update NS factors in model
d.nsdecay = exp(d.sample * trans[:, 4]) # Update NS decay parameters in model
end
end
# TODO (Felix, 2015-02-03): Continue implementation from here.
# - Try to implement evaluation functions as wrappers for functions in ns_likelihood.jl
# - Put execution code into ns_estimation.jl
function MathProgSolverInterface.initialize(d::NSOpt, requested_features::Vector{Symbol})
for feat in requested_features
if !(feat in [:Grad, :Jac, :Hess])
error("Unsupported feature $feat")
# TODO: implement Jac-vec and Hess-vec products
# for solvers that need them
end
end
end
MathProgSolverInterface.features_available(d::NSOpt) = [:Grad, :Jac, :Hess]
## Compute target function
function MathProgSolverInterface.eval_f(d::NSOpt, x)
#= update_model!(x, d) =#
default_lik!(x, d.sample, d.surv_firms, d.surv_dur_lim, d.exit_firms, d.exit_lim, d.int_stor)
end
## Compute constraints (this problem is unconstrained)
MathProgSolverInterface.eval_g(d::NSOpt, g, x) = nothing
## Compute target function gradient
function MathProgSolverInterface.eval_grad_f(d::NSOpt, grad_f, x)
#= update_model!(x, d) =#
default_lik_g!(x, d.sample, d.surv_firms, d.surv_dur_lim, d.exit_firms, d.exit_lim,
d.int_stor, d.int_stor_g, grad_f)
end
## Compute Hessian of the Lagrangian matrix
function MathProgSolverInterface.eval_hesslag(d::NSOpt, H, x, σ, μ)
# TODO (Felix, 2015-02-04): How to deal with non existing constraints?
#= update_model!(x, d) =#
## Compute likelihood Hessian vector
default_lik_h!(x, d.sample, d.surv_firms, d.surv_dur_lim, d.exit_firms, d.exit_lim, d.int_stor,
d.int_stor_g, d.int_stor_h, d.int_stor_h_com, H)
## Multiply with weight σ and multiply onto H to obtain Lagrangian Hessian
H *= σ
end
### Structure of the Lagrangian Hessian
function MathProgSolverInterface.hesslag_structure(d::NSOpt)
# TODO (Felix, 2015-02-04): figure out how to incorporate models with fewer factors
dim = length(d.params)
return findn(sparse(triu(ones(dim, dim))))
end
# Structure of the Jacobian of all constraints (this problem is unconstrained).
MathProgSolverInterface.jac_structure(d::NSOpt) = [],[]
## Compute jacobian matrix of constraints
MathProgSolverInterface.eval_jac_g(d::NSOpt, J, x) = nothing
## Setup optimization input
startvals = [randn(3 * length(COVAR_SEL)), randn(length(COVAR_SEL)) / 1000]
l = -20 .* ones(length(startvals))
u = 20 .* ones(length(startvals))
lb = Float64[]
ub = Float64[]
## Setup optimization
solver = NLoptSolver(algorithm = :GN_ISRES)
m = MathProgSolverInterface.model(solver)
p = NSOpt(COVARIATES[:, COVAR_SEL], DEF_FIRMS_SURV, DEF_SURV_LIMITS, DEF_FIRMS_EXIT,
DEF_EXIT_LIMITS, startvals, stor, stor_g, stor_h, stor_h_com)
MathProgSolverInterface.loadnonlinearproblem!(m, length(startvals), 0, l, u, lb, ub, :Max, p)
MathProgSolverInterface.setwarmstart!(m, startvals)
## Run optimization
info("Starting optimisation...")
MathProgSolverInterface.optimize!(m)
It would actually be nice to have an interface to MathProgBase that is at the complexity level of the Optim interface. Autodiff could be supported in the same way as in Optim, via Duals. The whole thing would be less fancy than JuMP, but probably still very useful in some cases.
Having worked with MathProgBase a fair bit lately, I think there is actually nothing preventing this from happening, other than a volunteer to try to code it up :)
--
You received this message because you are subscribed to the Google Groups "julia-opt" group.
To unsubscribe from this group and stop receiving emails from it, send an email to julia-opt+...@googlegroups.com.
Visit this group at http://groups.google.com/group/julia-opt.
For more options, visit https://groups.google.com/d/optout.