From 0c4a9d878bd167932746aacae3525859108606aa Mon Sep 17 00:00:00 2001 From: Nick Robinson Date: Thu, 31 Aug 2023 12:17:11 +0100 Subject: [PATCH] Add ability to pass an expression to run after every testitem (#108) * WIP: Pass expression to run after every testitem * don't nest expr * Add tests * fixup! Add tests * Add docs * fixup! fixup! Add tests * Update test/integrationtests.jl * Run test_end_expr in softscope, same as testitem * Bump version * fixup! Run test_end_expr in softscope, same as testitem * Add TestEndExpr.jl to TEST_PKGS * Don't import testsetups in test_end_expr --- Project.toml | 2 +- README.md | 37 ++++- src/ReTestItems.jl | 64 +++++--- test/_integration_test_tools.jl | 1 + test/integrationtests.jl | 129 ++++++++++++++- test/packages/README.md | 1 + test/packages/TestEndExpr.jl/Manifest.toml | 154 ++++++++++++++++++ test/packages/TestEndExpr.jl/Project.toml | 13 ++ test/packages/TestEndExpr.jl/README.md | 14 ++ .../TestEndExpr.jl/src/TestEndExpr.jl | 56 +++++++ .../TestEndExpr.jl/test/page_tests.jl | 11 ++ test/packages/TestEndExpr.jl/test/runtests.jl | 21 +++ 12 files changed, 473 insertions(+), 30 deletions(-) create mode 100644 test/packages/TestEndExpr.jl/Manifest.toml create mode 100644 test/packages/TestEndExpr.jl/Project.toml create mode 100644 test/packages/TestEndExpr.jl/README.md create mode 100644 test/packages/TestEndExpr.jl/src/TestEndExpr.jl create mode 100644 test/packages/TestEndExpr.jl/test/page_tests.jl create mode 100644 test/packages/TestEndExpr.jl/test/runtests.jl diff --git a/Project.toml b/Project.toml index 6f8f1fe9..a867495d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "ReTestItems" uuid = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" -version = "1.16.0" +version = "1.17.0" [deps] Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" diff --git a/README.md b/README.md index 2a774797..6c5c0c46 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,8 @@ By default, `Test` and the package being tested will be imported into the `@test Since a `@testitem` is the block of code that will be executed, `@testitem`s cannot be nested. +#### Test setup + If some test-specific code needs to be shared by multiple `@testitem`s, this code can be placed in a `module` and marked as `@testsetup`, and the `@testitem`s can depend on it via the `setup` keyword. @@ -124,7 +126,38 @@ end end ``` -### Summary +The `setup` is run once on each worker process that requires it; +it is not run before every `@testitem` that depends on the setup. + +#### Post-testitem hook + +If there is something that should be checked after every single `@testitem`, then it's possible to pass an expression to `runtests` using the `test_end_expr` keyword. +This expression will be run immediately after each `@testitem`. + +```julia +test_end_expr = quote + @testset "global Foo unchanged" begin + foo = get_global_foo() + @test foo.changes == 0 + end +end +runtests("frozzle_tests.jl"; test_end_expr) +``` + +#### Worker process start-up + +If there is some set-up that should be done on each worker process before it is used to evaluated test-items, then it is possible to pass an expression to `runtests` via the `worker_init_expr` keyword. +This expression will be run on each worker process as soon as it is started. + +```julia +nworkers = 3 +worker_init_expr = quote + set_global_foo_memory_limit!(Sys.total_memory()/nworkers) +end +runtests("frobble_tests.jl"; nworkers, worker_init_expr) +``` + +## Summary 1. Write tests inside of an `@testitem` block. - These are like an `@testset`, except that they must contain all the code they need to run; @@ -157,6 +190,8 @@ end using ReTestItems, MyPackage runtests(MyPackage) ``` + - Pass to `runtests` any configuration you want your tests to run with, such as `retries`, `testitem_timeout`, `nworkers`, `nworker_threads`, `worker_init_expr`, `test_end_expr`. + See the `runtests` docstring for details. --- diff --git a/src/ReTestItems.jl b/src/ReTestItems.jl index bc91bf1c..01c240a6 100644 --- a/src/ReTestItems.jl +++ b/src/ReTestItems.jl @@ -42,6 +42,16 @@ function softscope(@nospecialize ex) return ex end +# Call softscope on each top-level body expr +# which has the effect of the body acting like you're at the REPL or +# inside a testset, except imports/using/etc all still work as expected +# more info: https://docs.julialang.org/en/v1.10-dev/manual/variables-and-scoping/#on-soft-scope +function softscope_all!(@nospecialize ex) + for i = 1:length(ex.args) + ex.args[i] = softscope(ex.args[i]) + end +end + include("workers.jl") using .Workers include("macros.jl") @@ -127,6 +137,8 @@ will be run. supported through a string (e.g. "auto,2"). - `worker_init_expr::Expr`: an expression that will be evaluated on each worker process before any tests are run. Can be used to load packages or set up the environment. Must be a `:block` expression. +- `test_end_expr::Expr`: an expression that will be evaluated after each testitem is run. + Can be used to verify that global state is unchanged after running a test. Must be a `:block` expression. - `report::Bool=false`: If `true`, write a JUnit-format XML file summarising the test results. Can also be set using the `RETESTITEMS_REPORT` environment variable. The location at which the XML report is saved can be set using the `RETESTITEMS_REPORT_LOCATION` environment variable. @@ -182,7 +194,8 @@ function runtests( tags::Union{Symbol,AbstractVector{Symbol},Nothing}=nothing, report::Bool=parse(Bool, get(ENV, "RETESTITEMS_REPORT", "false")), logs::Symbol=default_log_display_mode(report, nworkers), - verbose_results::Bool=(logs !== :issues && isinteractive()) + verbose_results::Bool=(logs !== :issues && isinteractive()), + test_end_expr::Expr=Expr(:block), ) nworker_threads = _validated_nworker_threads(nworker_threads) paths′ = filter(paths) do p @@ -208,10 +221,10 @@ function runtests( debuglvl = Int(debug) if debuglvl > 0 LoggingExtras.withlevel(LoggingExtras.Debug; verbosity=debuglvl) do - _runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, testitem_timeout, retries, verbose_results, debuglvl, report, logs) + _runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, test_end_expr, testitem_timeout, retries, verbose_results, debuglvl, report, logs) end else - return _runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, testitem_timeout, retries, verbose_results, debuglvl, report, logs) + return _runtests(shouldrun_combined, paths′, nworkers, nworker_threads, worker_init_expr, test_end_expr, testitem_timeout, retries, verbose_results, debuglvl, report, logs) end end @@ -225,7 +238,7 @@ end # By tracking and reusing test environments, we can avoid this issue. const TEST_ENVS = Dict{String, String}() -function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, worker_init_expr::Expr, testitem_timeout::Real, retries::Int, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol) +function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, worker_init_expr::Expr, test_end_expr::Expr, testitem_timeout::Real, retries::Int, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol) # Don't recursively call `runtests` e.g. if we `include` a file which calls it. # So we ignore the `runtests(...)` call in `test/runtests.jl` when `runtests(...)` # was called from the command line. @@ -245,7 +258,7 @@ function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, wor if is_running_test_runtests_jl(proj_file) # Assume this is `Pkg.test`, so test env already active. @debugv 2 "Running in current environment `$(Base.active_project())`" - return _runtests_in_current_env(shouldrun, paths, proj_file, nworkers, nworker_threads, worker_init_expr, testitem_timeout, retries, verbose_results, debug, report, logs) + return _runtests_in_current_env(shouldrun, paths, proj_file, nworkers, nworker_threads, worker_init_expr, test_end_expr, testitem_timeout, retries, verbose_results, debug, report, logs) else @debugv 1 "Activating test environment for `$proj_file`" orig_proj = Base.active_project() @@ -258,7 +271,7 @@ function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, wor testenv = TestEnv.activate() TEST_ENVS[proj_file] = testenv end - _runtests_in_current_env(shouldrun, paths, proj_file, nworkers, nworker_threads, worker_init_expr, testitem_timeout, retries, verbose_results, debug, report, logs) + _runtests_in_current_env(shouldrun, paths, proj_file, nworkers, nworker_threads, worker_init_expr, test_end_expr, testitem_timeout, retries, verbose_results, debug, report, logs) finally Base.set_active_project(orig_proj) end @@ -267,7 +280,7 @@ function _runtests(shouldrun, paths, nworkers::Int, nworker_threads::String, wor end function _runtests_in_current_env( - shouldrun, paths, projectfile::String, nworkers::Int, nworker_threads, worker_init_expr::Expr, + shouldrun, paths, projectfile::String, nworkers::Int, nworker_threads, worker_init_expr::Expr, test_end_expr::Expr, testitem_timeout::Real, retries::Int, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol, ) start_time = time() @@ -294,7 +307,7 @@ function _runtests_in_current_env( run_number = 1 max_runs = 1 + max(retries, testitem.retries) while run_number ≤ max_runs - res = runtestitem(testitem, ctx; verbose_results, logs) + res = runtestitem(testitem, ctx; test_end_expr, verbose_results, logs) ts = res.testset print_errors_and_captured_logs(testitem, run_number; logs) report_empty_testsets(testitem, ts) @@ -333,7 +346,7 @@ function _runtests_in_current_env( ti = starting[i] @spawn begin with_logger(original_logger) do - manage_worker($w, $proj_name, $testitems, $ti, $nworker_threads, $worker_init_expr, $testitem_timeout, $retries, $verbose_results, $debug, $report, $logs) + manage_worker($w, $proj_name, $testitems, $ti, $nworker_threads, $worker_init_expr, $test_end_expr, $testitem_timeout, $retries, $verbose_results, $debug, $report, $logs) end end end @@ -441,7 +454,7 @@ function record_test_error!(testitem, msg, elapsed_seconds::Real=0.0) end function manage_worker( - worker::Worker, proj_name, testitems, testitem, nworker_threads, worker_init_expr, + worker::Worker, proj_name, testitems, testitem, nworker_threads, worker_init_expr::Expr, test_end_expr::Expr, timeout::Real, retries::Int, verbose_results::Bool, debug::Int, report::Bool, logs::Symbol ) ntestitems = length(testitems.testitems) @@ -449,7 +462,7 @@ function manage_worker( while testitem !== nothing ch = Channel{TestItemResult}(1) testitem.workerid[] = worker.pid - fut = remote_eval(worker, :(ReTestItems.runtestitem($testitem, GLOBAL_TEST_CONTEXT; verbose_results=$verbose_results, logs=$(QuoteNode(logs))))) + fut = remote_eval(worker, :(ReTestItems.runtestitem($testitem, GLOBAL_TEST_CONTEXT; test_end_expr=$(QuoteNode(test_end_expr)), verbose_results=$verbose_results, logs=$(QuoteNode(logs))))) max_runs = 1 + max(retries, testitem.retries) try timer = Timer(timeout) do tm @@ -823,19 +836,22 @@ end # when `runtestitem` called directly or `@testitem` called outside of `runtests`. function runtestitem( ti::TestItem, ctx::TestContext; - logs::Symbol=:eager, verbose_results::Bool=true, finish_test::Bool=true, + test_end_expr::Expr=Expr(:block), logs::Symbol=:eager, verbose_results::Bool=true, finish_test::Bool=true, ) name = ti.name log_testitem_start(ti, ctx.ntestitems) ts = DefaultTestSet(name; verbose=verbose_results) stats = PerfStats() - # start with empty block expr and build up our @testitem module body + # start with empty block expr and build up our `@testitem` and `test_end_expr` module bodies body = Expr(:block) + test_end_body = Expr(:block) if ti.default_imports push!(body.args, :(using Test)) + push!(test_end_body.args, :(using Test)) if !isempty(ctx.projectname) # this obviously assumes we're in an environment where projectname is reachable push!(body.args, :(using $(Symbol(ctx.projectname)))) + push!(test_end_body.args, :(using $(Symbol(ctx.projectname)))) end end Test.push_testset(ts) @@ -865,27 +881,29 @@ function runtestitem( push!(body.args, :(const $setup = $ts_mod)) end @debugv 1 "Setup for test item $(repr(name)) done$(_on_worker())." - # add our @testitem quoted code to module body expr + + # add our `@testitem` quoted code to module body expr append!(body.args, ti.code.args) mod_expr = :(module $(gensym(name)) end) - # replace the module body with our built up expr - # we're being a bit sneaky here by calling softscope on each top-level body expr - # which has the effect of test item body acting like you're at the REPL or - # inside a testset, except imports/using/etc all still work as expected - # more info: https://docs.julialang.org/en/v1.10-dev/manual/variables-and-scoping/#on-soft-scope - for i = 1:length(body.args) - body.args[i] = softscope(body.args[i]) - end + softscope_all!(body) mod_expr.args[3] = body + + # add the `test_end_expr` to a module to be run after the test item + append!(test_end_body.args, test_end_expr.args) + softscope_all!(test_end_body) + test_end_mod_expr = :(module $(gensym(name * " test_end")) end) + test_end_mod_expr.args[3] = test_end_body + # eval the testitem into a temporary module, so that all results can be GC'd # once the test is done and sent over the wire. (However, note that anonymous modules # aren't always GC'd right now: https://github.com/JuliaLang/julia/issues/48711) - @debugv 1 "Evaluating test item $(repr(name))$(_on_worker())." # disabled for now since there were issues when tests tried serialize/deserialize # with things defined in an anonymous module # environment = Module() + @debugv 1 "Evaluating test item $(repr(name))$(_on_worker())." _, stats = @timed_with_compilation _redirect_logs(logs == :eager ? DEFAULT_STDOUT[] : logpath(ti)) do with_source_path(() -> Core.eval(Main, mod_expr), ti.file) + with_source_path(() -> Core.eval(Main, test_end_mod_expr), ti.file) nothing # return nothing as the first return value of @timed_with_compilation end @debugv 1 "Done evaluating test item $(repr(name))$(_on_worker())." diff --git a/test/_integration_test_tools.jl b/test/_integration_test_tools.jl index 7530a752..69134929 100644 --- a/test/_integration_test_tools.jl +++ b/test/_integration_test_tools.jl @@ -6,6 +6,7 @@ # (ii) do _not_ want expected fail/errors to cause ReTestItems' tests to fail/error # This is not `@test_throws` etc, because we're not testing that the code fails/errors # we're testing that _the tests themselves_ fail/error. +using Test """ EncasedTestSet(desc, results) <: AbstractTestset diff --git a/test/integrationtests.jl b/test/integrationtests.jl index 2a769dd7..dcfc4853 100644 --- a/test/integrationtests.jl +++ b/test/integrationtests.jl @@ -21,7 +21,7 @@ const TEST_PKG_DIR = joinpath(_TEST_DIR, "packages") # Note "DontPass.jl" is handled specifically below, as it's the package which doesn't have # passing tests. Other packages should pass tests and be added here: -const TEST_PKGS = ("NoDeps.jl", "TestsInSrc.jl", "TestProjectFile.jl") +const TEST_PKGS = ("NoDeps.jl", "TestsInSrc.jl", "TestProjectFile.jl", "TestEndExpr.jl") include(joinpath(_TEST_DIR, "_integration_test_tools.jl")) @@ -182,11 +182,18 @@ end # `runtests` runs in this process, so allows us to actually record the test results, # which means we can tests that runtests ran the tests we expected it to. @testset "runtests() $pkg" for pkg in TEST_PKGS - results = with_test_package(pkg) do - runtests() + if pkg == "TestEndExpr.jl" + # TestEndExpr.jl requires `worker_init_expr` which isn't supported when nworkers=0. + @test_skip with_test_package(pkg) do + runtests() + end + else + results = with_test_package(pkg) do + runtests() + end + @test n_passed(results) > 0 # tests were found and ran + @test all_passed(results) # no tests failed/errored end - @test n_passed(results) > 0 # tests were found and ran - @test all_passed(results) # no tests failed/errored end @testset "runtests() DontPass.jl" begin results = with_test_package("DontPass.jl") do @@ -827,4 +834,116 @@ end end end +@testset "test_end_expr" begin + # `_happy_tests.jl` has 3 testitems with 1 passing test each. + file = joinpath(TEST_FILES_DIR, "_happy_tests.jl") + # Test that running a `test_end_expr` after each testitem works; + # should work exactly the same no matter if we use workers or not. + @testset "nworkers=$nworkers" for nworkers in (0, 1, 2) + @testset "post-testitem checks pass" begin + # Here there should be two extra passing tests per testitem. + test_end1 = quote + @test 1 == 1 + @test 2 == 2 + end + results1 = encased_testset() do + runtests(file; nworkers, test_end_expr=test_end1) + end + @test n_tests(results1) == 9 + @test all_passed(results1) + end + + @testset "post-testitem checks fail" begin + # Here there should be one extra failing tests per testitem. + test_end2 = quote + @test 1 == 2 + end + results2 = encased_testset() do + runtests(file; nworkers, test_end_expr=test_end2) + end + @test n_tests(results2) == 6 + @test n_passed(results2) == 3 + @test length(failures(results2)) == 3 + end + end + @testset "report printing" begin + using IOCapture + # Test that a passing `test_end_expr` we just report the total number of tests + # including the extra tests, which here is an extra 1 test per testitem. + test_end3 = quote @test true end + results3 = encased_testset() do + runtests(file; nworkers=1, test_end_expr=test_end3) + end + c3 = IOCapture.capture() do + Test.print_test_results(results3) + end + @assert n_tests(results3) == 6 + @assert all_passed(results3) + @test contains( + c3.output, + r""" + Test Summary: \| Pass Total Time + ReTestItems \| 6 6 \d.\ds + """ + ) + # Test that for a failing `test_end_expr` we report the failing tests, including + # the `@testset` which we have put inside the `test_end_expr`. + test_end4 = quote + @testset "post-testitem" begin + @test false + end + end + results4 = encased_testset() do + runtests(file; nworkers=1, test_end_expr=test_end4) + end + @test n_tests(results4) == 6 + @test n_passed(results4) == 3 + c4 = IOCapture.capture() do + Test.print_test_results(results4) + end + @test contains( + c4.output, + r""" + Test Summary: \| Pass Fail Total Time + ReTestItems \| 3 3 6 \d.\ds + """ + ) + @test contains( + c4.output, + r""" + \s* happy 1 \| 1 1 2 \d.\ds + \s* post-testitem \| 1 1 \d.\ds + """ + ) + end + @testset "TestEndExpr.jl package" begin + # Test that the TestEndExpr.jl package passes without the `test_end_expr` + # checking for danginling pins. + worker_init_expr = quote + using TestEndExpr: init_pager! + init_pager!() + end + results_no_end = with_test_package("TestEndExpr.jl") do + runtests(; nworkers=1, worker_init_expr) + end + @test all_passed(results_no_end) + # Test that the TestEndExpr.jl package fails when we add the `test_end_expr` + # checking for danginling pins. + test_end_expr = quote + using TestEndExpr: GLOBAL_PAGER, count_pins + p = GLOBAL_PAGER[] + (isnothing(p) || isempty(p.pages)) && return nothing + @testset "no pins left at end of test" begin + @test count_pins(p) == 0 + end + end + results_with_end = with_test_package("TestEndExpr.jl") do + runtests(; nworkers=1, worker_init_expr, test_end_expr) + end + @test !all_passed(results_with_end) + @test n_passed(results_with_end) ≥ 1 + @test length(failures(results_with_end)) ≥ 1 + end +end + end # integrationtests.jl testset diff --git a/test/packages/README.md b/test/packages/README.md index 14a014f7..cc35199f 100644 --- a/test/packages/README.md +++ b/test/packages/README.md @@ -13,3 +13,4 @@ See `test/integrationtests.jl`. - *TestsInSrc.jl* - A package which has all of its `@testitems` in the `src/` directory. - *TestProjectFile.jl* - A package which has test-only dependencies declared in a `test/Project.toml`. - *MonoRepo.jl* - A package which depends on local, unregistered sub-packages. See `MonoRepo.jl/README.md`. +- *TestEndExpr.jl* - A package which requires users to uphold an invariant which we would want to test is being upheld by all code run in the tests. This provides a use-case for the `test_end_expr` functionality. diff --git a/test/packages/TestEndExpr.jl/Manifest.toml b/test/packages/TestEndExpr.jl/Manifest.toml new file mode 100644 index 00000000..499c1c7b --- /dev/null +++ b/test/packages/TestEndExpr.jl/Manifest.toml @@ -0,0 +1,154 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.9.2" +manifest_format = "2.0" +project_hash = "31dabfddf1e36ecf2dc5cff56cdddc361b29f16f" + +[[deps.ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" +version = "1.1.1" + +[[deps.Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[deps.Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[deps.Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[deps.Downloads]] +deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +version = "1.6.0" + +[[deps.FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[deps.InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[deps.LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" +version = "0.6.3" + +[[deps.LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" +version = "7.84.0+0" + +[[deps.LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[deps.LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" +version = "1.10.2+0" + +[[deps.Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[deps.Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[deps.LoggingExtras]] +deps = ["Dates", "Logging"] +git-tree-sha1 = "a03c77519ab45eb9a34d3cfe2ca223d79c064323" +uuid = "e6f89c97-d47a-5376-807f-9c37f3926c36" +version = "1.0.1" + +[[deps.Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[deps.MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" +version = "2.28.2+0" + +[[deps.MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" +version = "2022.10.11" + +[[deps.NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" + +[[deps.Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +version = "1.9.2" + +[[deps.Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[deps.REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[deps.Random]] +deps = ["SHA", "Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[deps.ReTestItems]] +deps = ["Dates", "Logging", "LoggingExtras", "Pkg", "Serialization", "Sockets", "Test", "TestEnv"] +path = "../../.." +uuid = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" +version = "1.16.0" + +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" + +[[deps.Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[deps.Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[deps.TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.3" + +[[deps.Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" +version = "1.10.0" + +[[deps.Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[deps.TestEnv]] +deps = ["Pkg"] +git-tree-sha1 = "dca851f0824deaf6a73ef1308fe7b2c53239c710" +uuid = "1e6cf692-eddd-4d53-88a5-2d735e33781b" +version = "1.100.1" + +[[deps.UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[deps.Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[deps.Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" +version = "1.2.13+0" + +[[deps.nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" +version = "1.48.0+0" + +[[deps.p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" +version = "17.4.0+0" diff --git a/test/packages/TestEndExpr.jl/Project.toml b/test/packages/TestEndExpr.jl/Project.toml new file mode 100644 index 00000000..a01aa6b8 --- /dev/null +++ b/test/packages/TestEndExpr.jl/Project.toml @@ -0,0 +1,13 @@ +name = "TestEndExpr" +uuid = "4560ce14-60c1-53a9-8c5b-16f535851c77" +version = "0.0.0" + +[deps] +ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" + +[extras] +ReTestItems = "817f1d60-ba6b-4fd5-9520-3cf149f6a823" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["ReTestItems", "Test"] diff --git a/test/packages/TestEndExpr.jl/README.md b/test/packages/TestEndExpr.jl/README.md new file mode 100644 index 00000000..b0010b98 --- /dev/null +++ b/test/packages/TestEndExpr.jl/README.md @@ -0,0 +1,14 @@ +# TestEndExpr.jl + +A package for the sake of testing the `test_end_expr` functionality of `ReTestItems.runtests`. + +We want to emulate a situation where correct usage of a package requires users to uphold a certain invariant. +In this very simplified example, `Page` objects must be pinned via `pin!` when in use, and subsequently unpinned via `unpin!` when no longer in use; there must be an equal number of `pin!` and `unpin!` calls in order to avoid a memory leak. + +We want to be able to write potentially very many `@testitem`s which test all sorts of functionality that use `Page`s under the hood. +But we also want to test that every such usage correctly upholds the invariant, i.e. no `Page` is left "pinned" at the end of the `@testitem`. + +We could do that by manually adding something like `@test no_pages_pinned()` as the last line of every `@testitem`, but we might not want to rely on test authors remembering to do this. +So instead, we want to use `test_end_expr` to declare a block of code like `@test no_pages_pinned()` to automatically run at the end of every `@testitem`. + +In the tests of this package, we define test-items that **pass** when run without such a `test_end_expr`, but at least one of the test-items **fails** when run with a `test_end_expr` testing that no pages are left pinned. diff --git a/test/packages/TestEndExpr.jl/src/TestEndExpr.jl b/test/packages/TestEndExpr.jl/src/TestEndExpr.jl new file mode 100644 index 00000000..215588ca --- /dev/null +++ b/test/packages/TestEndExpr.jl/src/TestEndExpr.jl @@ -0,0 +1,56 @@ +module TestEndExpr + +export Pager, Page, good_read, bad_read + +mutable struct Page + content::Any + @atomic pincount::Int + function Page(x) + p = new(x, 0) # 1 would be more realistic, but 0 keeps things simpler for our tests. + @assert !isnothing(GLOBAL_PAGER[]) + @lock GLOBAL_PAGER[].lock push!(GLOBAL_PAGER[].pages, p) + return p + end +end + +pin!(p::Page) = @atomic p.pincount += 1 +unpin!(p::Page) = @atomic p.pincount -= 1 + +function read_content(p::Page) + @assert p.pincount > 0 + return p.content +end + +function good_read(p::Page) + pin!(p) + content = read_content(p) + unpin!(p) + return content +end + +function bad_read(p::Page) + pin!(p) + content = read_content(p) + #= No `unpin!(p)` call! =# + return content +end + +struct Pager + pages::Vector{Page} + lock::ReentrantLock +end +Pager() = Pager(Page[], ReentrantLock()) + +## `Pager` is free to delete pages if they are not pinned. +## Commented out since it is just for example and not used. +# cleanup!(p::Pager) = @lock p.lock deleteat!(p.pages, count_pins.(p.pages) .== 0) + +count_pins(p::Page) = p.pincount +function count_pins(p::Pager) + return @lock p.lock sum(count_pins, p.pages; init=0) +end + +const GLOBAL_PAGER = Ref{Union{Nothing,Pager}}(nothing) +init_pager!() = isnothing(GLOBAL_PAGER[]) && (GLOBAL_PAGER[] = Pager()) + +end # module diff --git a/test/packages/TestEndExpr.jl/test/page_tests.jl b/test/packages/TestEndExpr.jl/test/page_tests.jl new file mode 100644 index 00000000..9cb7701b --- /dev/null +++ b/test/packages/TestEndExpr.jl/test/page_tests.jl @@ -0,0 +1,11 @@ +@testitem "correct usage" begin + p = Page("Once upon a time") + # `good_read` successfully reads the page and leaves it unpinned. + @test good_read(p) == "Once upon a time" +end + +@testitem "incorrect usage" begin + p = Page("A long time ago") + # `bad_read` successfully reads the page *but forgets to unpin it!* + @test bad_read(p) == "A long time ago" +end diff --git a/test/packages/TestEndExpr.jl/test/runtests.jl b/test/packages/TestEndExpr.jl/test/runtests.jl new file mode 100644 index 00000000..309b133f --- /dev/null +++ b/test/packages/TestEndExpr.jl/test/runtests.jl @@ -0,0 +1,21 @@ +using ReTestItems +using TestEndExpr + +const worker_init_expr = quote + using TestEndExpr: init_pager! + init_pager!() +end + +## This is the sort of `test_end_expr` we would want to run +## We don't use this here in the TestEndExpr.jl tests, so that all the tests pass. +## Instead we set in `ReTestItems/test/integrationtests.jl` so we can test +## that such a `test_end_expr` causes the tests to fail. +# const test_end_expr = quote +# p = GLOBAL_PAGER[] +# (isnothing(p) || isempty(p.pages)) && return nothing +# @testset "no pins left at end of test" begin +# @test count_pins(p) == 0 +# end +# end + +runtests(TestEndExpr; nworkers=1, worker_init_expr)