From 17079b8f7452f0da351d717370d8ff84a6527161 Mon Sep 17 00:00:00 2001 From: tmigot <tangi.migot@gmail.com> Date: Wed, 5 Jul 2023 21:24:39 +0200 Subject: [PATCH] Add allocation tests --- test/allocs.jl | 69 ++++++++++++++++++++++++++++++++++++++++++++++++ test/runtests.jl | 21 +-------------- 2 files changed, 70 insertions(+), 20 deletions(-) create mode 100644 test/allocs.jl diff --git a/test/allocs.jl b/test/allocs.jl new file mode 100644 index 0000000..0d1ca10 --- /dev/null +++ b/test/allocs.jl @@ -0,0 +1,69 @@ +""" + @wrappedallocs(expr) +Given an expression, this macro wraps that expression inside a new function +which will evaluate that expression and measure the amount of memory allocated +by the expression. Wrapping the expression in a new function allows for more +accurate memory allocation detection when using global variables (e.g. when +at the REPL). +For example, `@wrappedallocs(x + y)` produces: +```julia +function g(x1, x2) + @allocated x1 + x2 +end +g(x, y) +``` +You can use this macro in a unit test to verify that a function does not +allocate: +``` +@test @wrappedallocs(x + y) == 0 +``` +""" +macro wrappedallocs(expr) + argnames = [gensym() for a in expr.args] + quote + function g($(argnames...)) + @allocated $(Expr(expr.head, argnames...)) + end + $(Expr(:call, :g, [esc(a) for a in expr.args]...)) + end +end + +if v"1.7" <= VERSION + @testset "Test 0-allocations of NLPModel API for AugLagModel" begin + list_of_problems = NLPModelsTest.nlp_problems + + T = Float64 + for problem in list_of_problems + nlp = eval(Symbol(problem))(T) + if nlp.meta.ncon > 0 + μ = one(T) + x = nlp.meta.x0 + fx = obj(nlp, x) + y = nlp.meta.y0 + cx = similar(y) + model = Percival.AugLagModel(nlp, y, μ, x, fx, cx) + + test_zero_allocations(model, exclude = [hess]) + end + end + end + + @testset "Allocation tests $(model)" for model in setdiff(NLPModelsTest.nlp_problems, ["HS10", "HS13", "LINCON", "LINSV"]) + nlp = eval(Meta.parse(model))() + + nlp.meta.ncon > 0 || continue + + if !equality_constrained(nlp) + nlp = SlackModel(nlp) + end + + solver = PercivalSolver(nlp) + x = copy(nlp.meta.x0) + stats = GenericExecutionStats(nlp) + SolverCore.solve!(solver, nlp, stats) + reset!(solver) + reset!(nlp) + al = @wrappedallocs SolverCore.solve!(solver, nlp, stats) + @test al == 0 + end +end diff --git a/test/runtests.jl b/test/runtests.jl index 50a97b2..071562d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -8,26 +8,7 @@ mutable struct DummyModel{T, S} <: AbstractNLPModel{T, S} meta::NLPModelMeta{T, S} end -if v"1.7" <= VERSION - @testset "Test 0-allocations of NLPModel API for AugLagModel" begin - list_of_problems = NLPModelsTest.nlp_problems - - T = Float64 - for problem in list_of_problems - nlp = eval(Symbol(problem))(T) - if nlp.meta.ncon > 0 - μ = one(T) - x = nlp.meta.x0 - fx = obj(nlp, x) - y = nlp.meta.y0 - cx = similar(y) - model = Percival.AugLagModel(nlp, y, μ, x, fx, cx) - - test_zero_allocations(model, exclude = [hess]) - end - end - end -end +include("allocs.jl") function test() nlp = DummyModel(NLPModelMeta(1, minimize = false))