|
1 | | -ReverseDiffAD(nvar, f) = ADNLPModels.ADModelBackend( |
2 | | - nvar, |
3 | | - f, |
4 | | - gradient_backend = ADNLPModels.ReverseDiffADGradient, |
5 | | - hprod_backend = ADNLPModels.ReverseDiffADHvprod, |
6 | | - jprod_backend = ADNLPModels.ReverseDiffADJprod, |
7 | | - jtprod_backend = ADNLPModels.ReverseDiffADJtprod, |
8 | | - jacobian_backend = ADNLPModels.ReverseDiffADJacobian, |
9 | | - hessian_backend = ADNLPModels.ReverseDiffADHessian, |
10 | | -) |
| 1 | +function test_allocations(nlp::ADNLPModel) |
| 2 | + x = nlp.meta.x0 |
| 3 | + y = zeros(eltype(nlp.meta.x0), nlp.meta.ncon) |
| 4 | + g = zeros(eltype(nlp.meta.x0), nlp.meta.nvar) |
| 5 | + @test_opt target_modules=(ADNLPModels,) obj(nlp, x) |
| 6 | + @test_opt target_modules=(ADNLPModels,) cons!(nlp, x, y) |
| 7 | + @test_opt target_modules=(ADNLPModels,) grad!(nlp, x, g) |
| 8 | +end |
11 | 9 |
|
12 | | -function test_getter_setter(nlp) |
13 | | - @test get_adbackend(nlp) == nlp.adbackend |
14 | | - if typeof(nlp) <: ADNLPModel |
15 | | - set_adbackend!(nlp, ReverseDiffAD(nlp.meta.nvar, nlp.f)) |
16 | | - elseif typeof(nlp) <: ADNLSModel |
17 | | - function F(x; nequ = nlp.nls_meta.nequ) |
18 | | - Fx = similar(x, nequ) |
19 | | - nlp.F!(Fx, x) |
20 | | - return Fx |
21 | | - end |
22 | | - set_adbackend!(nlp, ReverseDiffAD(nlp.meta.nvar, x -> sum(F(x) .^ 2))) |
23 | | - end |
24 | | - @test typeof(get_adbackend(nlp).gradient_backend) <: ADNLPModels.ReverseDiffADGradient |
25 | | - @test typeof(get_adbackend(nlp).hprod_backend) <: ADNLPModels.ReverseDiffADHvprod |
26 | | - @test typeof(get_adbackend(nlp).hessian_backend) <: ADNLPModels.ReverseDiffADHessian |
27 | | - set_adbackend!( |
28 | | - nlp, |
29 | | - gradient_backend = ADNLPModels.ForwardDiffADGradient, |
30 | | - jtprod_backend = ADNLPModels.GenericForwardDiffADJtprod(), |
31 | | - ) |
32 | | - @test typeof(get_adbackend(nlp).gradient_backend) <: ADNLPModels.ForwardDiffADGradient |
33 | | - @test typeof(get_adbackend(nlp).hprod_backend) <: ADNLPModels.ReverseDiffADHvprod |
34 | | - @test typeof(get_adbackend(nlp).jtprod_backend) <: ADNLPModels.GenericForwardDiffADJtprod |
35 | | - @test typeof(get_adbackend(nlp).hessian_backend) <: ADNLPModels.ReverseDiffADHessian |
| 10 | +function test_allocations(nlp::ADNLSModel) |
| 11 | + x = nlp.meta.x0 |
| 12 | + y = zeros(eltype(nlp.meta.x0), nlp.meta.ncon) |
| 13 | + g = zeros(eltype(nlp.meta.x0), nlp.meta.nvar) |
| 14 | + Fx = zeros(eltype(nlp.meta.x0), nlp.nls_meta.nequ) |
| 15 | + @test_opt target_modules=(ADNLPModels,) function_filter=(@nospecialize(f) -> f != ForwardDiff.gradient!) obj(nlp, x) |
| 16 | + @test_opt target_modules=(ADNLPModels,) function_filter=(@nospecialize(f) -> f != ForwardDiff.gradient!) cons!(nlp, x, y) |
| 17 | + @test_opt target_modules=(ADNLPModels,) function_filter=(@nospecialize(f) -> f != ForwardDiff.gradient!) grad!(nlp, x, g, Fx) |
| 18 | + @test_opt target_modules=(ADNLPModels,) function_filter=(@nospecialize(f) -> f != ForwardDiff.gradient!) residual!(nlp, x, Fx) |
36 | 19 | end |
0 commit comments