Skip to content

[Breaking change] Change names to follow JSO conventions #194

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Aug 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion lib/MadNLPGPU/src/MadNLPGPU.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import CUDAKernels: CUDADevice

import MadNLP
import MadNLP:
@kwdef, Logger, @debug, @warn, @error,
@kwdef, MadNLPLogger, @debug, @warn, @error,
AbstractOptions, AbstractLinearSolver, AbstractNLPModel, set_options!,
SymbolicException,FactorizationException,SolveException,InertiaException,
introduce, factorize!, solve!, improve!, is_inertia, inertia, tril_to_full!,
Expand All @@ -28,6 +28,8 @@ if has_cuda()
include("lapackgpu.jl")
export LapackGPUSolver
end
export CuMadNLPSolver

include("interface.jl")

end # module
5 changes: 2 additions & 3 deletions lib/MadNLPGPU/src/interface.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@

function CuInteriorPointSolver(nlp::AbstractNLPModel{T}; kwargs...) where T
function CuMadNLPSolver(nlp::AbstractNLPModel{T}; kwargs...) where T
opt_ipm, opt_linear_solver, logger = MadNLP.load_options(; linear_solver=LapackGPUSolver, kwargs...)

@assert is_supported(opt_ipm.linear_solver, T)
Expand All @@ -15,5 +14,5 @@ function CuInteriorPointSolver(nlp::AbstractNLPModel{T}; kwargs...) where T
VT = CuVector{T}
MadNLP.DenseCondensedKKTSystem{T, VT, MT}
end
return MadNLP.InteriorPointSolver{T,KKTSystem}(nlp, opt_ipm, opt_linear_solver; logger=logger)
return MadNLP.MadNLPSolver{T,KKTSystem}(nlp, opt_ipm, opt_linear_solver; logger=logger)
end
4 changes: 2 additions & 2 deletions lib/MadNLPGPU/src/kernels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,12 @@ function MadNLP.jtprod!(y::AbstractVector, kkt::MadNLP.AbstractDenseKKTSystem{T,
return
end

function MadNLP.set_aug_diagonal!(kkt::MadNLP.AbstractDenseKKTSystem{T, VT, MT}, ips::MadNLP.InteriorPointSolver) where {T, VT<:CuVector{T}, MT<:CuMatrix{T}}
function MadNLP.set_aug_diagonal!(kkt::MadNLP.AbstractDenseKKTSystem{T, VT, MT}, solver::MadNLP.MadNLPSolver) where {T, VT<:CuVector{T}, MT<:CuMatrix{T}}
haskey(kkt.etc, :pr_diag_host) || (kkt.etc[:pr_diag_host] = Vector{T}(undef, length(kkt.pr_diag)))
pr_diag_h = kkt.etc[:pr_diag_host]::Vector{T}
# Broadcast is not working as MadNLP array are allocated on the CPU,
# whereas pr_diag is allocated on the GPU
pr_diag_h .= ips.zl./(ips.x.-ips.xl) .+ ips.zu./(ips.xu.-ips.x)
pr_diag_h .= solver.zl./(solver.x.-solver.xl) .+ solver.zu./(solver.xu.-solver.x)
copyto!(kkt.pr_diag, pr_diag_h)
fill!(kkt.du_diag, 0.0)
end
Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPGPU/src/lapackgpu.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ mutable struct LapackGPUSolver{T} <: AbstractLinearSolver{T}
info::CuVector{Int32}
etc::Dict{Symbol,Any} # throw some algorithm-specific things here
opt::LapackOptions
logger::Logger
logger::MadNLPLogger
end


function LapackGPUSolver(
dense::MT;
option_dict::Dict{Symbol,Any}=Dict{Symbol,Any}(),
opt=LapackOptions(),logger=Logger(),
opt=LapackOptions(),logger=MadNLPLogger(),
kwargs...) where {T,MT <: AbstractMatrix{T}}

set_options!(opt,option_dict,kwargs...)
Expand Down
18 changes: 9 additions & 9 deletions lib/MadNLPGPU/test/densekkt_gpu.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,19 @@ function _compare_gpu_with_cpu(KKTSystem, n, m, ind_fixed)
nlp = MadNLPTests.DenseDummyQP{T}(; n=n, m=m, fixed_variables=ind_fixed)

# Solve on CPU
h_ips = MadNLP.InteriorPointSolver(nlp; madnlp_options...)
MadNLP.optimize!(h_ips)
h_solver = MadNLP.MadNLPSolver(nlp; madnlp_options...)
MadNLP.solve!(h_solver)

# Solve on GPU
d_ips = MadNLPGPU.CuInteriorPointSolver(nlp; madnlp_options...)
MadNLP.optimize!(d_ips)
d_solver = MadNLPGPU.CuMadNLPSolver(nlp; madnlp_options...)
MadNLP.solve!(d_solver)

@test isa(d_ips.kkt, KKTSystem{T, CuVector{T}, CuMatrix{T}})
@test isa(d_solver.kkt, KKTSystem{T, CuVector{T}, CuMatrix{T}})
# # Check that both results match exactly
@test h_ips.cnt.k == d_ips.cnt.k
@test h_ips.obj_val ≈ d_ips.obj_val atol=atol
@test h_ips.x ≈ d_ips.x atol=atol
@test h_ips.ld_ips.l atol=atol
@test h_solver.cnt.k == d_solver.cnt.k
@test h_solver.obj_val ≈ d_solver.obj_val atol=atol
@test h_solver.x ≈ d_solver.x atol=atol
@test h_solver.yd_solver.y atol=atol
end
end

Expand Down
2 changes: 1 addition & 1 deletion lib/MadNLPHSL/src/MadNLPHSL.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
module MadNLPHSL

import Libdl: dlopen, RTLD_DEEPBIND
import MadNLP: @kwdef, Logger, @debug, @warn, @error,
import MadNLP: @kwdef, MadNLPLogger, @debug, @warn, @error,
AbstractOptions, AbstractLinearSolver, set_options!, SparseMatrixCSC, SubVector,
SymbolicException,FactorizationException,SolveException,InertiaException,
introduce, factorize!, solve!, improve!, is_inertia, inertia, findIJ, nnz,
Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPHSL/src/ma27.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ mutable struct Ma27Solver{T} <: AbstractLinearSolver{T}
maxfrt::Vector{Int32}

opt::Ma27Options
logger::Logger
logger::MadNLPLogger
end


Expand Down Expand Up @@ -89,7 +89,7 @@ for (fa, fb, fc, typ) in [
end

function Ma27Solver(csc::SparseMatrixCSC{T};
opt=Ma27Options(),logger=Logger(),
opt=Ma27Options(),logger=MadNLPLogger(),
) where T
I,J = findIJ(csc)
nz=Int32(nnz(csc))
Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPHSL/src/ma57.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ mutable struct Ma57Solver{T} <: AbstractLinearSolver{T}
work::Vector{T}

opt::Ma57Options
logger::Logger
logger::MadNLPLogger
end


Expand Down Expand Up @@ -85,7 +85,7 @@ for (fa,fb,fc,typ) in (
end

function Ma57Solver(csc::SparseMatrixCSC{T};
opt=Ma57Options(),logger=Logger()
opt=Ma57Options(),logger=MadNLPLogger()
) where T
I,J=findIJ(csc)

Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPHSL/src/ma77.jl
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ mutable struct Ma77Solver{T} <: AbstractLinearSolver{T}
keep::Vector{Ptr{Nothing}}

opt::Ma77Options
logger::Logger
logger::MadNLPLogger
end

for (fdefault, fanalyse, ffactor, fsolve, ffinalise, fopen, finputv, finputr, typ) in [
Expand Down Expand Up @@ -245,7 +245,7 @@ end

function Ma77Solver(
csc::SparseMatrixCSC{T,Int32};
opt=Ma77Options(),logger=Logger(),
opt=Ma77Options(),logger=MadNLPLogger(),
) where T
full,tril_to_full_view = get_tril_to_full(csc)
order = Vector{Int32}(undef,csc.n)
Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPHSL/src/ma86.jl
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ mutable struct Ma86Solver{T} <: AbstractLinearSolver{T}
keep::Vector{Ptr{Nothing}}

opt::Ma86Options
logger::Logger
logger::MadNLPLogger
end


Expand Down Expand Up @@ -130,7 +130,7 @@ ma86_set_num_threads(n) = ccall((:omp_set_num_threads_,libma86),

function Ma86Solver(
csc::SparseMatrixCSC{T,Int32};
opt=Ma86Options(),logger=Logger(),
opt=Ma86Options(),logger=MadNLPLogger(),
) where T
ma86_set_num_threads(opt.ma86_num_threads)

Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPHSL/src/ma97.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ mutable struct Ma97Solver{T} <:AbstractLinearSolver{T}
fkeep::Vector{Ptr{Nothing}}

opt::Ma97Options
logger::Logger
logger::MadNLPLogger
end

for (fdefault, fanalyse, ffactor, fsolve, ffinalise, typ) in [
Expand Down Expand Up @@ -135,7 +135,7 @@ ma97_set_num_threads(n) = ccall((:omp_set_num_threads_,libma97),

function Ma97Solver(
csc::SparseMatrixCSC{T,Int32};
opt=Ma97Options(),logger=Logger(),
opt=Ma97Options(),logger=MadNLPLogger(),
) where T

ma97_set_num_threads(opt.ma97_num_threads)
Expand Down
6 changes: 3 additions & 3 deletions lib/MadNLPKrylov/src/MadNLPKrylov.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
module MadNLPKrylov

import MadNLP:
@kwdef, Logger, @debug, @warn, @error,
@kwdef, MadNLPLogger, @debug, @warn, @error,
AbstractOptions, AbstractIterator, set_options!, @sprintf,
solve_refine!, mul!, ldiv!, size, default_options
import IterativeSolvers:
Expand Down Expand Up @@ -34,11 +34,11 @@ mutable struct KrylovIterator{T} <: AbstractIterator{T}
g::Union{Nothing,GMRESIterable}
res::Vector{T}
opt::KrylovOptions
logger::Logger
logger::MadNLPLogger
end

function KrylovIterator(res::Vector{T},_mul!,_ldiv!;
opt=KrylovOptions(),logger=Logger(),
opt=KrylovOptions(),logger=MadNLPLogger(),
) where T
!isempty(kwargs) && (for (key,val) in kwargs; option_dict[key]=val; end)

Expand Down
6 changes: 3 additions & 3 deletions lib/MadNLPMumps/src/MadNLPMumps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import StaticArrays: SVector, setindex
import MUMPS_seq_jll
import MadNLP:
parsefile, dlopen,
@kwdef, Logger, @debug, @warn, @error,
@kwdef, MadNLPLogger, @debug, @warn, @error,
SparseMatrixCSC, SubVector,
SymbolicException,FactorizationException,SolveException,InertiaException,
AbstractOptions, AbstractLinearSolver, set_options!, input_type, default_options,
Expand Down Expand Up @@ -239,7 +239,7 @@ mutable struct MumpsSolver{T} <: AbstractLinearSolver{T}
mumps_struc::Struc
is_singular::Bool
opt::MumpsOptions
logger::Logger
logger::MadNLPLogger
end

for (lib,fname,typ) in [(MUMPS_seq_jll.libdmumps,:dmumps_c,Float64), (MUMPS_seq_jll.libsmumps, :smumps_c,Float32)]
Expand All @@ -265,7 +265,7 @@ end
# ---------------------------------------------------------------------------------------

function MumpsSolver(csc::SparseMatrixCSC{T,Int32};
opt=MumpsOptions(), logger=Logger(),
opt=MumpsOptions(), logger=MadNLPLogger(),
) where T

I,J = findIJ(csc)
Expand Down
2 changes: 1 addition & 1 deletion lib/MadNLPPardiso/src/MadNLPPardiso.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ include(joinpath("..","deps","deps.jl"))

import Libdl: dlopen, RTLD_DEEPBIND
import MadNLP:
MadNLP, @kwdef, Logger, @debug, @warn, @error,
MadNLP, @kwdef, MadNLPLogger, @debug, @warn, @error,
SubVector, SparseMatrixCSC,
SymbolicException,FactorizationException,SolveException,InertiaException,
AbstractOptions, AbstractLinearSolver, set_options!,
Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPPardiso/src/pardiso.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ mutable struct PardisoSolver{T} <: AbstractLinearSolver{T}
csc::SparseMatrixCSC{T,Int32}
w::Vector{T}
opt::PardisoOptions
logger::Logger
logger::MadNLPLogger
end

_pardisoinit(
Expand All @@ -42,7 +42,7 @@ _pardiso(
pt,maxfct,mnum,mtype,phase,n,a,ia,ja,perm,nrhs,iparm,msglvl,b,x,err,dparm)

function PardisoSolver(csc::SparseMatrixCSC{T,Int32};
opt=PardisoOptions(),logger=Logger(),
opt=PardisoOptions(),logger=MadNLPLogger(),
option_dict::Dict{Symbol,Any}=Dict{Symbol,Any}(),
kwargs...) where T
!isempty(kwargs) && (for (key,val) in kwargs; option_dict[key]=val; end)
Expand Down
4 changes: 2 additions & 2 deletions lib/MadNLPPardiso/src/pardisomkl.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ mutable struct PardisoMKLSolver{T} <: AbstractLinearSolver{T}
csc::SparseMatrixCSC{T,Int32}
w::Vector{T}
opt::PardisoMKLOptions
logger::Logger
logger::MadNLPLogger
end

pardisomkl_pardisoinit(pt,mtype::Ref{Cint},iparm::Vector{Cint}) where T =
Expand Down Expand Up @@ -51,7 +51,7 @@ end


function PardisoMKLSolver(csc::SparseMatrixCSC{T};
opt=PardisoMKLOptions(),logger=Logger(),
opt=PardisoMKLOptions(),logger=MadNLPLogger(),
) where T

w = Vector{T}(undef,csc.n)
Expand Down
30 changes: 15 additions & 15 deletions src/IPM/IPM.jl
Original file line number Diff line number Diff line change
@@ -1,25 +1,25 @@
# MadNLP.jl
# Created by Sungho Shin (sungho.shin@wisc.edu)

abstract type AbstractInteriorPointSolver{T} end
abstract type AbstractMadNLPSolver{T} end

include("restoration.jl")

mutable struct InteriorPointSolver{T,KKTSystem <: AbstractKKTSystem{T}} <: AbstractInteriorPointSolver{T}
mutable struct MadNLPSolver{T,KKTSystem <: AbstractKKTSystem{T}} <: AbstractMadNLPSolver{T}
nlp::AbstractNLPModel
kkt::KKTSystem

opt::IPMOptions
cnt::Counters
logger::Logger
opt::MadNLPOptions
cnt::MadNLPCounters
logger::MadNLPLogger

n::Int # number of variables (after reformulation)
m::Int # number of cons
nlb::Int
nub::Int

x::Vector{T} # primal (after reformulation)
l::Vector{T} # dual
y::Vector{T} # dual
zl::Vector{T} # dual (after reformulation)
zu::Vector{T} # dual (after reformulation)
xl::Vector{T} # primal lower bound (after reformulation)
Expand Down Expand Up @@ -95,7 +95,7 @@ mutable struct InteriorPointSolver{T,KKTSystem <: AbstractKKTSystem{T}} <: Abstr
output::Dict
end

function InteriorPointSolver(nlp::AbstractNLPModel{T}; kwargs...) where T
function MadNLPSolver(nlp::AbstractNLPModel{T}; kwargs...) where T
opt_ipm, opt_linear_solver, logger = load_options(; kwargs...)
@assert is_supported(opt_ipm.linear_solver, T)

Expand All @@ -113,17 +113,17 @@ function InteriorPointSolver(nlp::AbstractNLPModel{T}; kwargs...) where T
MT = Matrix{T}
DenseCondensedKKTSystem{T, VT, MT}
end
return InteriorPointSolver{T,KKTSystem}(nlp, opt_ipm, opt_linear_solver; logger=logger)
return MadNLPSolver{T,KKTSystem}(nlp, opt_ipm, opt_linear_solver; logger=logger)
end

# Inner constructor
function InteriorPointSolver{T,KKTSystem}(
function MadNLPSolver{T,KKTSystem}(
nlp::AbstractNLPModel,
opt::IPMOptions,
opt::MadNLPOptions,
opt_linear_solver::AbstractOptions;
logger=Logger(),
logger=MadNLPLogger(),
) where {T, KKTSystem<:AbstractKKTSystem{T}}
cnt = Counters(start_time=time())
cnt = MadNLPCounters(start_time=time())

# generic options
opt.disable_garbage_collector &&
Expand All @@ -142,7 +142,7 @@ function InteriorPointSolver{T,KKTSystem}(
xl = [get_lvar(nlp);view(get_lcon(nlp),ind_cons.ind_ineq)]
xu = [get_uvar(nlp);view(get_ucon(nlp),ind_cons.ind_ineq)]
x = [get_x0(nlp);zeros(T,ns)]
l = copy(get_y0(nlp))
y = copy(get_y0(nlp))
zl= zeros(T,get_nvar(nlp)+ns)
zu= zeros(T,get_nvar(nlp)+ns)

Expand Down Expand Up @@ -210,8 +210,8 @@ function InteriorPointSolver{T,KKTSystem}(
end


return InteriorPointSolver{T,KKTSystem}(nlp,kkt,opt,cnt,logger,
n,m,nlb,nub,x,l,zl,zu,xl,xu,0.,f,c,
return MadNLPSolver{T,KKTSystem}(nlp,kkt,opt,cnt,logger,
n,m,nlb,nub,x,y,zl,zu,xl,xu,0.,f,c,
jacl,
d, p,
_w1, _w2, _w3, _w4,
Expand Down
Loading