Skip to content

Commit

Permalink
An implementation of DIOM
Browse files Browse the repository at this point in the history
  • Loading branch information
amontoison committed Sep 27, 2018
1 parent 112cfc8 commit ebad93c
Show file tree
Hide file tree
Showing 5 changed files with 221 additions and 2 deletions.
1 change: 1 addition & 0 deletions src/Krylov.jl
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ include("cg.jl")
include("cg_lanczos.jl")
include("minres.jl")
include("dqgmres.jl")
include("diom.jl")
include("symmlq.jl")
include("cr.jl")

Expand Down
146 changes: 146 additions & 0 deletions src/diom.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
# An implementation of DIOM for the solution of the square linear system Ax = b.
#
# This method is described in
#
# Y. Saad, Iterative methods for sparse linear systems.
# PWS Publishing Company, Boston, USA, 1996.
#
# Y. Saad, Practical use of some krylov subspace methods for solving indefinite and nonsymmetric linear systems.
# SIAM journal on scientific and statistical computing.
# Vol. 5, No. 1, March 1984.
#
# Alexis Montoison, <[email protected]>
# Montreal, September 2018.

export diom

"""Solve the consistent linear system Ax = b using direct incomplete orthogonalization method.
DIOM is similar to SYMMLQ for nonsymmetric problems.
It's a more economical algorithm based upon the LU factorization with partial pivoting.
In the particular case where A is symmetric indefinite, DIOM is theorecally equivalent
to SYMMLQ but slightly more economical.
An advantage of DIOM is that nonsymmetric or symmetric indefinite or both nonsymmetric
and indefinite systems of linear equations can be handled by this single algorithm.
This implementation allows a right preconditioning with M.
"""
function diom{T <: Number}(A :: AbstractLinearOperator, b :: AbstractVector{T};
M :: AbstractLinearOperator=opEye(size(A,1)),
atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6,
itmax :: Int=0, memory :: Int=20, verbose :: Bool=false)

m, n = size(A)
m == n || error("System must be square")
size(b, 1) == m || error("Inconsistent problem size")
verbose && @printf("DIOM: system of size %d\n", n)

# Initial solution x₀ and residual r₀.
x = zeros(T, n)
r = copy(b)
# Compute β.
rNorm = @knrm2(n, r) # rNorm = ‖r₀‖
rNorm 0 && return x, SimpleStats(true, false, [rNorm], [], "x = 0 is a zero-residual solution")

iter = 0
itmax == 0 && (itmax = 2*n)

rNorms = [rNorm;]
ε = atol + rtol * rNorm
verbose && @printf("%5d %7.1e\n", iter, rNorm)

# Set up workspace.
mem = min(memory, itmax) # Memory.
P = zeros(n, mem-1) # Directions for x.
V = zeros(n, mem) # Preconditioned Krylov vectors.
H = zeros(mem+1) # Last column of the band hessenberg matrix Hₘ = LₘUₘ.
# Each column has at most mem + 1 nonzero elements. hᵢ.ₘ is stored as H[m-i+2].
# m-i+2 represents the indice of the diagonal where hᵢ.ₘ is located.
# In addition of that, the last column of Uₘ is stored in H.
L = zeros(mem) # Last mem Pivots of Lₘ.

# Initial ξ and V.
ξ = rNorm
V[:,1] = r / rNorm

# Stopping criterion.
solved = rNorm ε
tired = iter itmax
status = "unknown"

while !(solved || tired)

# Update iteration index.
iter = iter + 1

# Set position in circular stack where iter-th Krylov vector should go.
pos = mod(iter-1, mem) + 1 # Position corresponding to iter in the circular stack.
next_pos = mod(iter, mem) + 1 # Position corresponding to iter + 1 in the circular stack.

# Incomplete Arnoldi procedure.
z = M * V[:,pos] # Forms pₘ
w = A * z # Forms vₘ₊₁
for i = max(1, iter-mem+1) : iter
ipos = mod(i-1, mem) + 1 # Position of vᵢ in the circular stack
jpos = iter - i + 2
H[jpos] = @kdot(n, w, V[:,ipos]) # hᵢ.ₘ = < A * vₘ , vᵢ >
@kaxpy!(n, -H[jpos], V[:,ipos], w) # w ← w - hᵢ.ₘ * vᵢ
end
# Compute hₘ₊₁.ₘ and vₘ₊₁.
H[1] = @knrm2(n, w) # hₘ₊₁.ₘ = ‖vₘ₊₁‖
if H[1] 0 # hₘ₊₁.ₘ ≈ 0 ⇒ "lucky breakdown"
V[:,next_pos] = w / H[1] # vₘ₊₁ = w / hₘ₊₁.ₘ
end

if iter == 1
# l₂.₁ ← h₂.₁ / u₁.₁
L[next_pos] = H[1] / H[2]
else
# Update LU factorization of Hₘ by computing the last row of Lₘ and the next pivot lₘ₊₁.ₘ of Lₘ.
# uᵢ.ₘ ← hᵢ.ₘ - lᵢ.ᵢ₋₁ * uᵢ₋₁.ₘ
for i = max(2,iter-mem+2) : iter
lpos = mod(i-1, mem) + 1 # Position of lᵢ.ᵢ₋₁ in the circular stack
jpos = iter - i + 2
H[jpos] = H[jpos] - L[lpos] * H[jpos+1]
end
# lₘ₊₁.ₘ ← hₘ₊₁.ₘ / uₘ.ₘ
L[next_pos] = H[1] / H[2]

# Compute ξₘ the last composant of zₘ = β(Lₘ)⁻¹e₁.
# ξₘ = -lₘ.ₘ₋₁ * ξₘ₋₁
ξ = - L[pos] * ξ
end

# Compute the last column of Pₘ = Vₘ(Uₘ)⁻¹.
for i = max(1,iter-mem+1) : iter - 1
ipos = mod(i-1, mem-1) + 1
jpos = iter - i + 2
# z ← z - uᵢ.ₘ * pᵢ
@kaxpy!(n, -H[jpos], P[:,ipos], z)
end
dpos = mod(iter-1, mem-1) + 1 # Position of pₘ in the circular stack
# pₘ = z / uₘ.ₘ
P[:,dpos] = z / H[2]

# Update solution x.
# xₘ ← xₘ₋₁ + ξₘ * pₘ
@kaxpy!(n, ξ, P[:,dpos], x)

# Update residual norm estimate.
# ‖ b - Axₘ ‖ = hₘ₊₁.ₘ * |ξₘ / uₘ.ₘ|
rNorm = H[1] * abs/ H[2])
push!(rNorms, rNorm)

# Update stopping criterion.
solved = rNorm ε
tired = iter itmax
verbose && @printf("%5d %7.1e\n", iter, rNorm)
end
verbose && @printf("\n")

status = tired ? "maximum number of iterations exceeded" : "solution good enough given atol and rtol"
stats = SimpleStats(solved, false, rNorms, T[], status)
return (x, stats)
end
2 changes: 1 addition & 1 deletion src/variants.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ function preallocated_LinearOperator(A)
end

# Variants where A is a matrix without specific properties
for fn in (:cgls, :cgne, :craig, :craigmr, :crls, :crmr, :lslq, :lsmr, :lsqr, :dqgmres)
for fn in (:cgls, :cgne, :craig, :craigmr, :crls, :crmr, :lslq, :lsmr, :lsqr, :dqgmres, :diom)
@eval begin
# Variant for A given as a dense array and b given as a dense vector
$fn{TA <: Number, Tb <: Number}(A :: Array{TA,2}, b :: Vector{Tb}, args...; kwargs...) =
Expand Down
3 changes: 2 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
using Base.Test
using Krylov
using Main.Krylov
using LinearOperators

include("test_diom.jl")
include("test_dqgmres.jl")
include("gen_lsq.jl")
include("check_min_norm.jl")
Expand Down
71 changes: 71 additions & 0 deletions test/test_diom.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
include("get_div_grad.jl")

diom_tol = 1.0e-6

# 1. Symmetric and positive definite systems.
#
# Cubic spline matrix.
n = 10;
A = spdiagm((ones(n-1), 4*ones(n), ones(n-1)), (-1, 0, 1)); b = A * [1:n;]

(x, stats) = diom(A, b)
r = b - A * x
resid = norm(r) / norm(b)
@printf("DIOM: Relative residual: %8.1e\n", resid)
@test(resid <= diom_tol)
@test(stats.solved)

# Symmetric indefinite variant.
A = A - 3 * speye(n)
b = A * [1:n;]
(x, stats) = diom(A, b, itmax=10)
r = b - A * x
resid = norm(r) / norm(b)
@printf("DIOM: Relative residual: %8.1e\n", resid)
@test(resid <= diom_tol)
@test(stats.solved)

# Code coverage.
(x, stats) = diom(full(A), b)
show(stats)

# Sparse Laplacian.
A = get_div_grad(16, 16, 16)
b = ones(size(A, 1))
(x, stats) = diom(A, b)
r = b - A * x
resid = norm(r) / norm(b)
@printf("DIOM: Relative residual: %8.1e\n", resid)
@test(resid <= diom_tol)
@test(stats.solved)

# Symmetric indefinite variant, almost singular.
A = A - 5 * speye(size(A, 1))
(x, stats) = diom(A, b)
r = b - A * x
resid = norm(r) / norm(b)
@printf("DIOM: Relative residual: %8.1e\n", resid)
@test(resid <= 100 * diom_tol)
@test(stats.solved)

# Test b == 0
(x, stats) = diom(A, zeros(size(A,1)))
@test x == zeros(size(A,1))
@test stats.status == "x = 0 is a zero-residual solution"

# Test integer values
A = spdiagm((ones(Int, n-1), 4*ones(Int, n), ones(Int, n-1)), (-1, 0, 1)); b = A * [1:n;]
(x, stats) = diom(A, b)
@test stats.solved

# Test with Jacobi (or diagonal) preconditioner
A = ones(10,10) + 9 * eye(10)
b = 10 * [1:10;]
M = 1/10 * opEye(10)
(x, stats) = diom(A, b, M=M)
show(stats)
r = b - A * x
resid = norm(r) / norm(b)
@printf("DIOM: Relative residual: %8.1e\n", resid)
@test(resid <= diom_tol)
@test(stats.solved)

0 comments on commit ebad93c

Please sign in to comment.