using MathProgBase type NewtonSolver <: MathProgBase.AbstractMathProgSolver end type NewtonData <: MathProgBase.AbstractNonlinearModel numVar::Int d # NLP evaluator x::Vector{Float64} hess_I::Vector{Int} # 1st component of Hessian sparsity pattern hess_J::Vector{Int} # 2nd component of Hessian sparsity pattern status::Symbol end MathProgBase.NonlinearModel(solver::NewtonSolver) = NewtonData(0,nothing,Float64[],Int[],Int[],:Uninitialized); function MathProgBase.loadproblem!(m::NewtonData, numVar, numConstr, l, u, lb, ub, sense, d::MathProgBase.AbstractNLPEvaluator) @assert numConstr == 0 # we don't handle constraints @assert all(l .== -Inf) && all(u .== Inf) # or variable bounds @assert sense == :Min # or maximization MathProgBase.initialize(d, [:Grad, :Hess]) # request gradient and hessian evaluations m.d = d m.numVar = numVar m.x = zeros(numVar) m.hess_I, m.hess_J = MathProgBase.hesslag_structure(d) end; function MathProgBase.optimize!(m::NewtonData) iteration = 0 ∇f = Array(Float64,m.numVar) hess_val = Array(Float64, length(m.hess_I)) # array to store nonzeros in Hessian MathProgBase.eval_grad_f(m.d, ∇f, m.x) # writes gradient to ∇f vector ∇f_norm = norm(∇f) while ∇f_norm > 1e-5 println("Iteration $iteration. Gradient norm $∇f_norm, x = $(m.x)"); # Evaluate the Hessian of the Lagrangian. # We don't have any constraints, so this is just the Hessian. MathProgBase.eval_hesslag(m.d, hess_val, m.x, 1.0, Float64[]) # The derivative evaluator provides the Hessian matrix # in lower-triangular sparse (i,j,value) triplet format, # so now we convert this into a Julia symmetric sparse matrix object. H = Symmetric(sparse(m.hess_I,m.hess_J,hess_val),:L) m.x = m.x - H\∇f # newton step MathProgBase.eval_grad_f(m.d, ∇f, m.x) ∇f_norm = norm(∇f) iteration += 1 end println("Iteration $iteration. Gradient norm $∇f_norm, x = $(m.x), DONE"); m.status = :Optimal end; MathProgBase.setwarmstart!(m::NewtonData,x) = (m.x = x) MathProgBase.status(m::NewtonData) = m.status MathProgBase.getsolution(m::NewtonData) = m.x MathProgBase.getconstrduals(m::NewtonData) = [] # lagrange multipliers on constraints (none) MathProgBase.getreducedcosts(m::NewtonData) = zeros(m.numVar) # lagrange multipliers on variable bounds MathProgBase.getobjval(m::NewtonData) = MathProgBase.eval_f(m.d, m.x); using JuMP jumpmodel = Model(solver=NewtonSolver()) @variable(jumpmodel, x) @variable(jumpmodel, y) @NLobjective(jumpmodel, Min, (x-5)^2 + (y-3)^2) status = solve(jumpmodel); mod = """ var x; var y; minimize OBJ: (x-5)^2 + (y-3)^2; write gquad; """ fd = open("quad.mod","w") write(fd, mod) close(fd) ;~/ampl-demo/ampl quad.mod ;cat quad.nl | head -n 3 using AmplNLReader, AMPLMathProgInterface nlp = AmplNLReader.AmplModel("quad.nl") m = MathProgBase.model(NewtonSolver()) AMPLMathProgInterface.loadamplproblem!(m, nlp) MathProgBase.optimize!(m);