Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Add allocation tests #145

Merged
merged 1 commit into from
Jul 6, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions test/allocs.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
"""
@wrappedallocs(expr)
Given an expression, this macro wraps that expression inside a new function
which will evaluate that expression and measure the amount of memory allocated
by the expression. Wrapping the expression in a new function allows for more
accurate memory allocation detection when using global variables (e.g. when
at the REPL).
For example, `@wrappedallocs(x + y)` produces:
```julia
function g(x1, x2)
@allocated x1 + x2
end
g(x, y)
```
You can use this macro in a unit test to verify that a function does not
allocate:
```
@test @wrappedallocs(x + y) == 0
```
"""
macro wrappedallocs(expr)
argnames = [gensym() for a in expr.args]
quote
function g($(argnames...))
@allocated $(Expr(expr.head, argnames...))
end
$(Expr(:call, :g, [esc(a) for a in expr.args]...))
end
end

if v"1.7" <= VERSION
@testset "Test 0-allocations of NLPModel API for AugLagModel" begin
list_of_problems = NLPModelsTest.nlp_problems

T = Float64
for problem in list_of_problems
nlp = eval(Symbol(problem))(T)
if nlp.meta.ncon > 0
μ = one(T)
x = nlp.meta.x0
fx = obj(nlp, x)
y = nlp.meta.y0
cx = similar(y)
model = Percival.AugLagModel(nlp, y, μ, x, fx, cx)

test_zero_allocations(model, exclude = [hess])
end
end
end

@testset "Allocation tests $(model)" for model in setdiff(NLPModelsTest.nlp_problems, ["HS10", "HS13", "LINCON", "LINSV"])
nlp = eval(Meta.parse(model))()

nlp.meta.ncon > 0 || continue

if !equality_constrained(nlp)
nlp = SlackModel(nlp)
end

solver = PercivalSolver(nlp)
x = copy(nlp.meta.x0)
stats = GenericExecutionStats(nlp)
SolverCore.solve!(solver, nlp, stats)
reset!(solver)
reset!(nlp)
al = @wrappedallocs SolverCore.solve!(solver, nlp, stats)
@test al == 0
end
end
21 changes: 1 addition & 20 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,26 +8,7 @@ mutable struct DummyModel{T, S} <: AbstractNLPModel{T, S}
meta::NLPModelMeta{T, S}
end

if v"1.7" <= VERSION
@testset "Test 0-allocations of NLPModel API for AugLagModel" begin
list_of_problems = NLPModelsTest.nlp_problems

T = Float64
for problem in list_of_problems
nlp = eval(Symbol(problem))(T)
if nlp.meta.ncon > 0
μ = one(T)
x = nlp.meta.x0
fx = obj(nlp, x)
y = nlp.meta.y0
cx = similar(y)
model = Percival.AugLagModel(nlp, y, μ, x, fx, cx)

test_zero_allocations(model, exclude = [hess])
end
end
end
end
include("allocs.jl")

function test()
nlp = DummyModel(NLPModelMeta(1, minimize = false))
Expand Down