Skip to content

Commit

Permalink
Use PositiveFactorizations in Newton's method.
Browse files Browse the repository at this point in the history
  • Loading branch information
pkofod committed Mar 13, 2016
1 parent 93cd7c0 commit cd650cc
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 3 deletions.
1 change: 1 addition & 0 deletions REQUIRE
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
julia 0.4
Calculus
DualNumbers 0.2
PositiveFactorizations
1 change: 1 addition & 0 deletions src/Optim.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ VERSION >= v"0.4.0-dev+6521" && __precompile__(true)

module Optim
using Calculus
using PositiveFactorizations
using Compat

import Base.length,
Expand Down
10 changes: 7 additions & 3 deletions src/newton.jl
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,13 @@ function optimize{T}(d::TwiceDifferentiableFunction,
# Increment the number of steps we've had to perform
iteration += 1

# Search direction is always the negative gradient divided by H
# TODO: Do this calculation in place
@inbounds s[:] = -(H \ gr)
# Search direction is always the negative gradient divided by
# a matrix encoding the absolute values of the curvatures
# represented by H. It deviates from the usual "add a scaled
# identity matrix" version of the modified Newton method. More
# information can be found in the discussion at issue #153.
F, Hd = ldltfact!(Positive, H)
s[:] = -(F\gr)

# Refresh the line search cache
dphi0 = vecdot(gr, s)
Expand Down
11 changes: 11 additions & 0 deletions test/newton.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,14 @@ for (name, prob) in Optim.UnconstrainedProblems.examples
@assert norm(res.minimum - prob.solutions) < 1e-2
end
end

using ForwardDiff
easom(x) = -cos(x[1])*cos(x[2])*exp(-((x[1]-pi)^2 + (x[2]-pi)^2))
x, y = 1.2:0.1:4, 1.2:0.1:4

g_easom = ForwardDiff.gradient(easom)
h_easom = ForwardDiff.hessian(easom)

# start where old Newton's method would fail due to concavity
optimize(easom, (x, y) -> copy!(y, g_easom(x)), (x,y)->copy!(y, h_easom(x)), [2., 2.], Newton())
@test_approx_eq optimize(easom, (x, y) -> copy!(y, g_easom(x)), (x,y)->copy!(y, h_easom(x)), [2., 2.], Newton()).minimum [float(pi);float(pi)]

0 comments on commit cd650cc

Please sign in to comment.