diff --git a/README.md b/README.md index ac7b6bd..90d33e7 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ Running the tests are as simple as placing the file in a `tests` folder and then You can see more tests in this repo's [tests](https://github.com/FourierTransformer/tested/tree/main/tests) folder! ## AI Disclosure -As of versions > 0.1.0, AI has been used to help implement features, research Lua/Teal internals, debug issues, and make more readable output. Before this version, the code was hand-written, but some research was done with the help of AI. The docs will remain hand-written for now. I am personally still a little skeptical of AI and its place in open source, but at the moment am willing to evaluate it. +As of versions > 0.1.0, AI has been used to help implement _some_ features, research Lua/Teal internals, debug issues, and make more readable output. Before this version, the code was hand-written, but some research was done with the help of AI. The docs will remain hand-written for now. I am personally still a little skeptical of AI and its place in open source, but at the moment am willing to evaluate it. ## Licenses Parts of the following are included in the source code present in this repo: diff --git a/build/tested.lua b/build/tested.lua index 280f509..66fba6b 100644 --- a/build/tested.lua +++ b/build/tested.lua @@ -3,27 +3,35 @@ local assert_table = require("tested.assert_table") local tested = { tests = {}, run_only_tests = false } -function tested.test(name, fn) - table.insert(tested.tests, { name = name, fn = fn, kind = "test" }) -end +local function extract_fn_and_options(fn_or_options, fn) + local options = {} + if type(fn_or_options) == "function" then + fn = fn_or_options + + elseif type(fn_or_options) == "table" then + options = fn_or_options + if not fn then error("a function must be provided to run a unit test") end + fn = fn + end -function tested.skip(name, fn) - table.insert(tested.tests, { name = name, fn = fn, kind = "skip" }) + return fn, options end -function tested.only(name, fn) - tested.run_only_tests = true - table.insert(tested.tests, { name = name, fn = fn, kind = "only" }) + +function tested.test(name, fn_or_options, fn) + local func, options = extract_fn_and_options(fn_or_options, fn) + table.insert(tested.tests, { name = name, fn = func, options = options, kind = "test" }) end -function tested.conditional_test(name, condition, fn) - if condition then - table.insert(tested.tests, { name = name, fn = fn, kind = "conditional_test" }) - else - table.insert(tested.tests, { name = name, fn = fn, kind = "conditional_skip" }) - end +function tested.skip(name, fn_or_options, fn) + local func, options = extract_fn_and_options(fn_or_options, fn) + table.insert(tested.tests, { name = name, fn = func, options = options, kind = "skip" }) end +function tested.only(name, fn_or_options, fn) + local func, options = extract_fn_and_options(fn_or_options, fn) + table.insert(tested.tests, { name = name, fn = func, options = options, kind = "only" }) +end function tested.assert(assertion) local errors = {} @@ -112,7 +120,7 @@ function tested:run(filename, options) end local test_results = { - counts = { passed = 0, failed = 0, skipped = 0, invalid = 0 }, + counts = { passed = 0, failed = 0, expected = 0, skipped = 0, invalid = 0 }, tests = {}, filename = filename, fully_tested = false, @@ -135,7 +143,7 @@ function tested:run(filename, options) test_results.tests[i].time = 0 test_results.counts.skipped = test_results.counts.skipped + 1 - elseif test.kind == "conditional_skip" then + elseif test.options.run_when ~= nil and test.options.run_when == false then test_results.tests[i].result = "CONDITIONAL_SKIP" test_results.tests[i].message = "Condition in `tested.conditional_skip` returned false. Skipping test." test_results.tests[i].time = 0 @@ -181,22 +189,49 @@ function tested:run(filename, options) if ok == false then test_results.tests[i].result = "EXCEPTION" test_results.tests[i].message = err .. "\n" .. debug.traceback() - test_results.counts.invalid = test_results.counts.invalid + 1 elseif total_assertions == 0 then test_results.tests[i].result = "UNKNOWN" test_results.tests[i].message = "No assertions run during test" - test_results.counts.invalid = test_results.counts.invalid + 1 elseif assert_failed_count == 0 then test_results.tests[i].result = "PASS" test_results.tests[i].message = "All assertions have passed" - test_results.counts.passed = test_results.counts.passed + 1 else test_results.tests[i].result = "FAIL" test_results.tests[i].message = assert_failed_count .. " assertions have failed" + end + + + if test.options.expected ~= nil then + if test_results.tests[i].result == test.options.expected then + if test.options.expected == "EXCEPTION" then + test_results.tests[i].result = "EXPECTED_EXCEPTION" + elseif test.options.expected == "UNKNOWN" then + test_results.tests[i].result = "EXPECTED_UNKNOWN" + elseif test.options.expected == "FAIL" then + test_results.tests[i].result = "EXPECTED_FAIL" + end + else + test_results.tests[i].message = "Expected test result to be " .. test.options.expected .. ", but came back as " .. test_results.tests[i].result .. "\n" .. test_results.tests[i].message + test_results.tests[i].result = "UNEXPECTED" + end + end + + + if test_results.tests[i].result == "PASS" then + test_results.counts.passed = test_results.counts.passed + 1 + + elseif test_results.tests[i].result == "FAIL" then test_results.counts.failed = test_results.counts.failed + 1 + + elseif test_results.tests[i].result == "EXPECTED_FAIL" or test_results.tests[i].result == "EXPECTED_EXCEPTION" or test_results.tests[i].result == "EXPECTED_UNKNOWN" then + test_results.counts.expected = test_results.counts.expected + 1 + + elseif test_results.tests[i].result == "EXCEPTION" or test_results.tests[i].result == "UNKNOWN" or test_results.tests[i].result == "UNEXPECTED" then + test_results.counts.invalid = test_results.counts.invalid + 1 + end end end diff --git a/build/tested/main.lua b/build/tested/main.lua index 4e85084..eb25246 100644 --- a/build/tested/main.lua +++ b/build/tested/main.lua @@ -24,12 +24,15 @@ local TESTED_VERSION = "tested v0.1.0" + + local cli_to_display = { ["skip"] = "SKIP", ["pass"] = "PASS", ["fail"] = "FAIL", ["exception"] = "EXCEPTION", ["unknown"] = "UNKNOWN", + ["unexpected"] = "UNEXPECTED", } @@ -64,7 +67,7 @@ local function parse_args() default(false) parser:option("-s --show"): description("What test results to display (default: '-s fail -s exception -s unknown')"): - choices({ "all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown" }): + choices({ "all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected" }): count("*") parser:mutex( parser:option("-f --display-format"): @@ -100,7 +103,7 @@ end local function set_defaults(args) logger:info("Setting Defaults...") if #args.show == 0 then - args.show = { "fail", "exception", "unknown" } + args.show = { "fail", "exception", "unknown", "unexpected" } args.specified_show = false else args.specified_show = true @@ -111,7 +114,7 @@ local function set_defaults(args) local show_all = false for _, display_option in ipairs(args.show) do if display_option == "all" then show_all = true; break end end - if show_all then args.show = { "skip", "pass", "fail", "exception", "unknown" } end + if show_all then args.show = { "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected" } end end local function validate_args(args) @@ -221,6 +224,7 @@ local function display_types(options) if cli_to_display[cli_option] then to_display[cli_to_display[cli_option]] = true if cli_option == "skip" then + to_display["SKIP"] = true to_display["CONDITIONAL_SKIP"] = true end else @@ -228,11 +232,19 @@ local function display_types(options) to_display["EXCEPTION"] = true to_display["UNKNOWN"] = true to_display["TIMEOUT"] = true + to_display["UNEXPECTED"] = true elseif cli_option == "valid" then to_display["PASS"] = true to_display["SKIP"] = true to_display["CONDITIONAL_SKIP"] = true to_display["FAIL"] = true + to_display["EXPECTED_FAIL"] = true + to_display["EXPECTED_EXCEPTION"] = true + to_display["EXPECTED_UNKNOWN"] = true + elseif cli_option == "expected" then + to_display["EXPECTED_FAIL"] = true + to_display["EXPECTED_EXCEPTION"] = true + to_display["EXPECTED_UNKNOWN"] = true end end end diff --git a/build/tested/results/terminal.lua b/build/tested/results/terminal.lua index 69eb43f..5e6b78b 100644 --- a/build/tested/results/terminal.lua +++ b/build/tested/results/terminal.lua @@ -10,6 +10,10 @@ local symbol_map = { EXCEPTION = " !", TIMEOUT = " ⏱", UNKNOWN = " ?", + EXPECTED_FAIL = "(✗)", + EXPECTED_EXCEPTION = "(!)", + EXPECTED_UNKNOWN = "(?)", + UNEXPECTED = " ‽", } local color_map = { @@ -20,6 +24,10 @@ local color_map = { EXCEPTION = " %{cyan}", TIMEOUT = " %{blue}", UNKNOWN = " %{magenta}", + EXPECTED_FAIL = " %{dim red}", + EXPECTED_EXCEPTION = " %{dim cyan}", + EXPECTED_UNKNOWN = " %{dim magenta}", + UNEXPECTED = " %{bright red}", } local terminal = {} @@ -78,7 +86,7 @@ function terminal.results(tested_result, test_types_to_display) if test_types_to_display[test_result.result] then tadd.add(color_map[test_result.result], symbol_map[test_result.result], " ", test_result.name, to_ms(test_result.time, false), "%{reset}\n") local extra_newline = false - if test_result.result == "FAIL" or test_result.result == "PASS" then + if test_result.result == "FAIL" or test_result.result == "PASS" or test_result.result == "EXPECTED_FAIL" then for _, assertion_result in ipairs(test_result.assertion_results) do if (assertion_result.result == "FAIL" and test_types_to_display["FAIL"]) or assertion_result.result == "PASS" and test_types_to_display["PASS"] then format_assertion_result(assertion_result) @@ -92,7 +100,7 @@ function terminal.results(tested_result, test_types_to_display) if extra_newline then tadd.add("\n") end end - if test_result.result == "EXCEPTION" or test_result.result == "UNKNOWN" then + if test_result.result == "EXCEPTION" or test_result.result == "UNKNOWN" or test_result.result == "UNEXPECTED" or test_result.result == "EXPECTED_EXCEPTION" or test_result.result == "EXPECTED_UNKNOWN" then tadd.add(" ", (test_result.message:gsub("\n", "\n ")), "\n") tadd.add("\n") end @@ -118,7 +126,9 @@ function terminal.summary(output) tostring(output.total_counts.passed), " passed%{reset}, %{red}", tostring(output.total_counts.failed), - " failed%{reset}\n") + " failed%{reset}, ", + tostring(output.total_counts.expected), + " expected\n") tadd.add( diff --git a/build/tested/test_runner.lua b/build/tested/test_runner.lua index 534e3b8..1477d40 100644 --- a/build/tested/test_runner.lua +++ b/build/tested/test_runner.lua @@ -50,7 +50,7 @@ function test_runner.run_tests( total_time = 0, total_tests = 0, all_fully_tested = true, - total_counts = { passed = 0, failed = 0, skipped = 0, invalid = 0 }, + total_counts = { passed = 0, failed = 0, expected = 0, skipped = 0, invalid = 0 }, module_results = {}, } local coverage_results = {} @@ -85,6 +85,7 @@ function test_runner.run_tests( if test_output.fully_tested == false then output.all_fully_tested = false end output.total_counts.passed = output.total_counts.passed + test_output.counts.passed output.total_counts.failed = output.total_counts.failed + test_output.counts.failed + output.total_counts.expected = output.total_counts.expected + test_output.counts.expected output.total_counts.skipped = output.total_counts.skipped + test_output.counts.skipped output.total_counts.invalid = output.total_counts.invalid + test_output.counts.invalid output.total_time = output.total_time + test_output.total_time @@ -114,7 +115,7 @@ local function run_parallel_tests( total_time = 0, total_tests = 0, all_fully_tested = true, - total_counts = { passed = 0, failed = 0, skipped = 0, invalid = 0 }, + total_counts = { passed = 0, failed = 0, expected = 0, skipped = 0, invalid = 0 }, module_results = {}, } local coverage_results = {} @@ -158,6 +159,7 @@ local function run_parallel_tests( if test_output.fully_tested == false then output.all_fully_tested = false end output.total_counts.passed = output.total_counts.passed + test_output.counts.passed output.total_counts.failed = output.total_counts.failed + test_output.counts.failed + output.total_counts.expected = output.total_counts.expected + test_output.counts.expected output.total_counts.skipped = output.total_counts.skipped + test_output.counts.skipped output.total_counts.invalid = output.total_counts.invalid + test_output.counts.invalid output.total_time = output.total_time + test_output.total_time diff --git a/build/tested/types.lua b/build/tested/types.lua index c30ba06..b4fecf9 100644 --- a/build/tested/types.lua +++ b/build/tested/types.lua @@ -108,6 +108,25 @@ local types = {} + + + + + + + + + + + + + + + + + + + diff --git a/docs/api-reference.md b/docs/api-reference.md index 9d3681f..4d3e6c5 100644 --- a/docs/api-reference.md +++ b/docs/api-reference.md @@ -1,14 +1,18 @@ -# API Reference +# API Quick Reference -### Tests +## Tests -- `tested.test(name: string, fn: function())` -- `tested.skip(name: string, fn: function())` - don't run this test. Useful if something is known-broken and you want to disable a test. +- `tested.test(name: string, options?: table, fn: function())` +- `tested.skip(name: string, options?: table, fn: function())` - don't run this test. Useful if something is known-broken and you want to temporarily disable a test. - `tested.only(name: string, fn: function())` - will only run these tests. Useful if you want to debug a few tests in a large file. -- `tested.conditional_test(name: string, condition: boolean, fn: function())` - Will only run the test if the condition passes. - - Ex: the condition could be `type(jit) == 'table'` and the test will only run on LuaJIT -### Asserts +### Options +- `expected: "FAIL", "EXCEPTION", or "UNKNOWN"` - Useful if a unit test is going to be broken for an extended period of time and you want to mark it as a specific expected result. It will not be shown by default in the output (but can be enabled by `--show expected`, `--show all`, or `--show valid`). If the result stops being the expected value, the test will display and count as `invalid`. + - ex: `tested.test("fails all the time", {expected="FAIL"}, function())` +- `run_when: boolean` - a value that can be determined at runtime - useful if the test should only run on LuaJIT, a certain operating system, or even configuration. + - ex: `tested.test("luajit only", {run_when=type(jit) == 'table'}, function())` - will only run when executing via LuaJIT + +## Asserts All the asserts in `tested` take in a table with a couple of values that should hopefully make debugging your unit tests. The `given` and `should` are [optional] text representations of what your unit test are doing. It can be useful to have text representations so you're not having to rely on the values alone. It's also nice if you're passing in a bunch of test files and use the filename in `given`, so that it appears in the output if something goes wrong. - `tested.assert({given?: string, should?: string, expected, actual})` @@ -17,10 +21,10 @@ All the asserts in `tested` take in a table with a couple of values that should - `tested.assert_throws_exception({given?: string, should?: string, expected?: any, actual: function()})` - `expected` is also optional here, but if passed in, `tested` will check if it matches the error that comes back from the function. If `expected` is a `string`, it should match the exact string that is thrown in your error command. -### How `tested` works (high level) -1. Recursively search through the `tests` folder (from where it's called) or the folders specfied [on the commandline](./cli.md#tested-base-command) looking for files with the suffx `_test.lua` (or `_test.tl`) and makes a list of them +## How `tested` works (high level) +1. Recursively search through the `tests` folder (from where it's called) or the folders specfied [on the commandline](./cli.md#tested-base-command) looking for files with the suffix `_test.lua` (or `_test.tl`) and makes a list of them 2. Before running a test file, it notes which packages have been loaded. 3. It runs through the test file and creates a list of all the tests that need to be run. Shuffling the list if desired. 4. It runs each test, tracking the asserts and results -5. It clears any packages that were loaded during the test from the `package.loaded` table and then runs garabage collection. +5. It clears any packages that were loaded during the test from the `package.loaded` table and then runs garbage collection. 6. It gathers up all the results \ No newline at end of file diff --git a/docs/cli.md b/docs/cli.md index 1e98dcd..7d06539 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -18,14 +18,14 @@ The `-r` will randomize the order of tests _within_ a test file. ## `tested -s/--show` -By default the `tested` output shows the problematic test results, that likely need to be addressed: `fail`, `exception`, and `unknown`, but allows filtering to display other results. +By default the `tested` output shows the problematic test results, that likely need to be addressed: `fail`, `exception`, `unknown`, and `unexpected` but allows filtering to display other results. -Current Values: `pass fail skip exception unknown valid invalid all` +Current Values: `pass fail skip exception unknown expected unexpected valid invalid all` There are three groupings that can also be used: -- `tested -s valid` - will show `pass`, `fail`, and `skip` -- `tested -s invalid` - will show `exception` and `unknown` +- `tested -s valid` - will show `pass`, `fail`, `expected`, and `skip` +- `tested -s invalid` - will show `exception`, `unknown`, and `unexpected` - `tested -s all` - shows all tests To pass multiple values: @@ -52,10 +52,9 @@ Specify the number of threads `tested` should use. If set to `0`, will not use a ## `tested -h` - Reference ``` -$ tested -h -Usage: tested ([-f {terminal,plain}] | [-z ]) [-h] - [-c] [-r] - [-s {all,valid,invalid,skip,pass,fail,exception,unknown}] +Usage: tested ([-f {terminal,plain,tap}] | [-z ]) + [-h] [-c] [-r] + [-s {all,valid,invalid,skip,pass,fail,exception,unknown,expected,unexpected}] [-n ] [-x ] [-d {DEBUG,INFO,WARNING}] [--version] [] ... @@ -68,11 +67,11 @@ Options: -h, --help Show this help message and exit. -c, --coverage Enable code coverage - will generate luacov.stats.out (default: not-set) -r, --random Randomize the order of the tests (default: not-set) - -s {all,valid,invalid,skip,pass,fail,exception,unknown}, - --show {all,valid,invalid,skip,pass,fail,exception,unknown} + -s {all,valid,invalid,skip,pass,fail,exception,unknown,expected,unexpected}, + --show {all,valid,invalid,skip,pass,fail,exception,unknown,expected,unexpected} What test results to display (default: '-s fail -s exception -s unknown') - -f {terminal,plain}, - --display-format {terminal,plain} + -f {terminal,plain,tap}, + --display-format {terminal,plain,tap} What format to output the results in (default: 'terminal') (default: terminal) -z , --custom-formatter diff --git a/docs/custom-formatter.md b/docs/custom-formatter.md index c5156c5..aab4e35 100644 --- a/docs/custom-formatter.md +++ b/docs/custom-formatter.md @@ -25,7 +25,7 @@ function custom_formatter.results( ) end --- +-- Outputs a summary at the end function custom_formatter.summary(output: types.TestRunnerOutput) end return custom_formatter @@ -123,6 +123,7 @@ An example of what `types.TestRunnerOutput` looks like. `types.TestedOutput` is total_counts = { failed = 2, invalid = 0, + expected = 0, passed = 3, skipped = 0 }, @@ -141,6 +142,10 @@ enum TestResult "EXCEPTION" "TIMEOUT" "UNKNOWN" + "EXPECTED_FAIL" + "EXPECTED_EXCEPTION" + "EXPECTED_UNKNOWN" + "UNEXPECTED" end interface AssertionResult @@ -163,6 +168,7 @@ end interface TestCounts passed: integer failed: integer + expected: integer skipped: integer invalid: integer end diff --git a/docs/index.md b/docs/index.md index 78a5731..02c779d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -7,7 +7,7 @@ luarocks install tested ``` -After installing, the `tested` module will be available to your Lua code with `require("tested")` and a CLI tool, also called `tested` will be installed wherever your LuaRocks installs executables. +After installing, the `tested` module will be available to your Lua code with `require("tested")` and a CLI tool, also called `tested` will be installed wherever your LuaRocks installs executables (be sure LuaRocks `bin` folder is on your path!). ## Example Test @@ -70,34 +70,77 @@ Test files should be placed in a folder called `tests`, and the `tested` command Check out the [Unit Testing](./unit-testing.md) page for more information on how `tested` works! and after, if you're using Teal, be sure to check out the [Teal Support](./teal-support.md) page! -## Inspiration -While researching unit testing frameworks and how people felt about them I found the following helpful: +## Quickstart -- Eric Elliot's [Rethinking Unit Test Assertions](https://medium.com/javascript-scene/rethinking-unit-test-assertions-55f59358253f) -- [HN thread](https://news.ycombinator.com/item?id=33479397) from 2022 on _how people feel_ about unit testing -- ... and many other articles/forums out there! +### Folder setup +In the root of your project, you should create a `tests` folder and place all your test files (with the suffix of `_test.lua` or `_test.tl`). From there you can run the `tested` command from a it will find all the tests and run them. -But yeah, that's where the optional `given` and `should` came from as well as the ability to selectively skip or run tests. A lot of that resonated with me alongside letting people _choose_ if they want to follow those patterns (ie not making `given` and `should`required). The rest of the allowing multiple asserts and not being perscriptive in how people write their tests I think will help make `tested` work in anyone's workflow. +``` +. +├─ tests/ +│ ├─ my_library_test.lua +│ └─ a_different_test.lua +└─ my_library.lua +``` + +### The test file + +Let's take a look at a basic test file: + +```lua title="tests/my_library_test.lua" +local tested = require("tested") + +tested.test("just a test!", function() + tested.assert({ + given = "4 + 4", + should = "return 8", + expected = 8, + actual = sum(4, 4) + }) +end) + +print("This will be printed before _any_ tests run!") + +tested.test("just works without given and should!", function() + tested.assert({ + expected=true, + actual=true + }) +end) + +return tested +``` + +After the `tested` command loads up a test file, it goes through and finds all the various tests defined in the file (in this case there are two) and adds them to a list to be executed. In the example above, that `print` statement will execute before either tests. + +It does this so tests can be [shuffled](#cli-quick-reference), [skipped](#skipping-tests), or to [only](#only-tests) run a specific test within a test file! + +### Asserts +The basic assert is composed of four parts: +```lua + tested.assert({ + given = "4 + 4", + should = "return 8", + expected = 8, + actual = sum(4, 4) + }) +``` + +The `given` and `should` are optional strings that get displayed in the output to help you identify which specific assert has failed. The idea behind them is to be able to look at the testing output and know _exactly_ what and how something has failed. If your test references multiple files, placing a filename in given can be incredibly useful. Since some tests are more obvious than others (based on a test name), they are not required and can be omitted. + +The `expected` and `actual` take in the expected and actual values. There are a couple of other asserts builtin to `tested`, including one for [exceptions](#testing-exceptions), [truthy, and falsy](#truthyfalsy-tests)! -## Does the Lua ecosystem need _another_ unit testing framework? -This is honestly a question that went through my mind many times while working on `tested`. [busted](https://github.com/lunarmodules/busted) already handles unit testing for many projects. Major thanks to everyone who's helped maintain it over the years, it's no easy feat keeping things going. I've used it for years and it's always worked for what I needed. -`tested` mostly grew out of my desire to _build_ a unit testing framework - with as few dependencies as possible, using the Lua 5.1+ style module system, and capable of running tests in parallel. If it ends up being a library that only I use, that's okay with me. I had a lot of fun making it, and the journey was definitely worth it! +### CLI Quick Reference +There are a couple CLI commands that are good to know when you get started: -### busted comparison +- `tested -c` or `--coverage` will enable luacov code coverage and generate a `luacov.stats.out` file +- `tested -r` or `--random` will randomize the order of tests _within a test file_. +- `tested -s` or `--show` supports displaying different status of tests. By default `tested` shows tests which require followup (so `fail`, `exception`, and `invalid`) + - Ex: `tested -s pass -s skip` see all passed and skipped tests + - Ex: `tested -s valid` -| Feature | `tested` | busted | -| ------- | -------- | ------ | -| Dependencies | 8 | 12 | -| Time to install | TBD | 31.77 | -| Time to run ftcsv tests | 82ms | 102ms | -| Time to run ftcsv tests w/coverage | 423ms | 1.5s | -| Single-threaded time to run ftcsv tests w/coverage | 600ms | 1.5s | -| Custom Formatters | ✅ | ✅ | -| Extendible to Lua-compat Languages | ✅ | ✅ | -| Mocks | ❌ | ✅ | -| Spies | ❌ | ✅ | -| Ecosystem of extensions | ❌ | ✅ | +To see the entire list of CLI options, check out the [CLI Reference](./cli.md) -- The times/dependency count for busted include installing luacov -- `tested` runs tests multi-threaded, but busted does not \ No newline at end of file +### Teal Support +`tested` has builtin Teal support, be sure to check out the [Teal Support](./teal-support.md) page for some of the considerations around its usage with Teal. \ No newline at end of file diff --git a/docs/unit-testing.md b/docs/unit-testing.md index e41871f..7f6fad8 100644 --- a/docs/unit-testing.md +++ b/docs/unit-testing.md @@ -1,81 +1,6 @@ # Unit Testing `tested` as a framework, tries to let you _just write tests_. If you want multiple asserts in one test, go for it. Dynamically generate tests? No Problem! `tested` aims to be flexible enough to work with a wide variety of testing scenarios and philosophies. -## Quickstart - -### Folder setup -In the root of your project, you should create a `tests` folder and place all your test files (with the suffix of `_test.lua` or `_test.tl`). From there you can run the `tested` command from a it will find all the tests and run them. - -``` -. -├─ tests/ -│ ├─ my_library_test.lua -│ └─ a_different_test.lua -└─ my_library.lua -``` - -### The test file - -Let's take a look at a basic test file: - -```lua title="tests/my_library_test.lua" -local tested = require("tested") - -tested.test("just a test!", function() - tested.assert({ - given = "4 + 4", - should = "return 8", - expected = 8, - actual = sum(4, 4) - }) -end) - -print("This will be printed before _any_ tests run!") - -tested.test("just works without given and should!", function() - tested.assert({ - expected=true, - actual=true - }) -end) - -return tested -``` - -After the `tested` command loads up a test file, it goes through and finds all the various tests defined in the file (in this case there are two) and adds them to a list to be executed. In the example above, that `print` statement will execute before either tests. - -It does this so tests can be [shuffled](#cli-quick-reference), [skipped](#skipping-tests), or to [only](#only-tests) run a specific test within a test file! - -### Asserts -The basic assert is composed of a couple of four parts: -```lua - tested.assert({ - given = "4 + 4", - should = "return 8", - expected = 8, - actual = sum(4, 4) - }) -``` - -The `given` and `should` are optional strings that get displayed in the output to help you identify which specific assert has failed. The idea behind them is to be able to look at the testing output and know _exactly_ what and how something has failed. If your test references multiple files, placing a filename in given can be incredibly useful. Since some tests are more obvious than others (based on a test name), they are not required and can be omitted. - -The `expected` and `actual` take in the expected and actual values. There are a couple of other asserts builtin to `tested`, including one for [exceptions](#testing-exceptions), [truthy, and falsy](#truthyfalsy-tests)! - - -### CLI Quick Reference -There are a couple CLI commands that are good to know when you get started: - -- `tested -c` or `--coverage` will enable luacov code coverage and generate a `luacov.stats.out` file -- `tested -r` or `--random` will randomize the order of tests _within a test file_. -- `tested -s` or `--show` supports displaying different status of tests. By default `tested` shows tests which require followup (so `fail`, `exception`, and `invalid`) - - Ex: `tested -s pass -s skip` see all passed and skipped tests - - Ex: `tested -s valid` - -To see the entire list of CLI options, check out the [CLI Reference](./cli.md) - -### Teal Support -`tested` has builtin Teal support, be sure to check out the [Teal Support](./teal-support.md) page for some of the considerations around its usage with Teal. - ## Testing tables `tested.assert` will also deep compare tables, and will generate a little summary of the differences as well as print out the expected and actual table. @@ -164,7 +89,7 @@ tested.test("tables with self-cycles, but the same structure should be equal", f }) end) ``` - + ## Truthy/Falsy tests Sometimes in Lua you want to check if _anything_ returned (like a `string.match` or that a value exists in a table), we've added in an `assert_truthy` and `assert_falsy` to help out in those cases. @@ -221,27 +146,20 @@ end) ``` -## Skipping tests +## Skipping & Only tests -If you need to have a test be skipped (for something is known broken) or want to _conditionally_ skip tests based on something that can be determined at runtime (LuaJIT, operating system, dependency present or not), there is `tested.skip` and `tested.conditional_test`: +For quick debugging purposes, there are `tested.skip` and `tested.only`. These allow you to quickly isolate testing when running selective tests a particular file. For things that are going to broken longer term, you should set the `expected` option. + +`tested.skip`: ```lua tested.skip("skipping because tested.skip", function() tested.assert({expected = 8, actual = sum(4, 4)}) end) - --- the second argument to `conditional_test` takes in a boolean --- true runs the test, false will skip it -tested.conditional_test("luajit only test", (type("jit") == "table"), function() - tested.assert({expected = 8, actual = sum(5, 3) }) -end) - ``` -## Only tests -There is also a `tested.only` which will only cause the tests marked with `tested.only` _in a test file_ to be run. This can be helpful if you need to debug a handful of tests and don't want to see the output of the other tests in the file (they will be marked as skipped). +There is also a `tested.only` which will only cause the tests marked with `tested.only` _in a test file_ to be run. This can be helpful if you need to debug a handful of tests and don't want to see the output of the other tests in the file (they will be marked as skipped). -Since this only works on a _per-test file_ basis, it may also be useful to pass the specific test file to `tested` as well: `tested ./tests/file_with_only_test.lua` ```lua -- this will be marked as skipped tested.test("skipping because others are tested.only", function() @@ -259,8 +177,55 @@ tested.only("this will also run!", function() end) ``` +Both of these work on a _per-test file_ basis, so it may also be useful to pass the specific test file that you are working with to `tested` as well: `tested ./tests/file_with_only_test.lua` + +## Options + +### Conditional Skipping +If you want to _conditionally_ skip tests based on something that can be determined at runtime (LuaJIT, operating system, dependency present or not), there is the `run_when` options + +```lua +-- the `run_when` option takes in a boolean where true runs the test, false will skip it +tested.test("luajit only test", {run_when=(type(jit) == "table")}, function() + tested.assert({expected = 8, actual = sum(5, 3) }) +end) + +``` + +### Expected Results +If there are tests that are going to be broken for an extended period of time (ex: dependencies outside of your control, waaayy out future feature, a bug fix in a future sprint) you can set the `expected` option: + +```lua +tested.test("expected exception: throws as expected", {expected="EXCEPTION"}, function() + error("this exception is expected") +end) +``` + +The values `expected` can be are: +- `FAIL` - for tests that will simply just fail +- `EXCEPTION` - for tests that raise an error +- `UNKNOWN` - for tests with no assertions written + +This will hide the test result from the default output, _however_, if the value of the test differs from the `expected` value, it **will** show up in the testing output and is considered `UNEXPECTED`: + +```lua +-- this will show up in the tested output with an error message indicating that it's passed but has expected to fail. +tested.test("unexpected: expected fail but test passes", {expected="FAIL"}, function() + tested.assert({ + given = "1 + 1", + should = "equal 2", + expected = 2, + actual = 1 + 1 + }) +end) +``` + + + ## Invalid tests -If a test file has a test that throws an unhandled exception or `tested` finds a test without any asserts, they are considered "invalid", and will display as such in the results and will be listed in the summary as "invalid": +If a test file has a test that throws an unhandled exception, `tested` finds a test without any asserts, or a test with `expected` set returns without that result, they are considered "invalid", and will display as such in the results and will be listed in the summary as "invalid". + +
diff --git a/src/tested.tl b/src/tested.tl
index 5610128..0e6c9ac 100644
--- a/src/tested.tl
+++ b/src/tested.tl
@@ -3,27 +3,35 @@ local assert_table = require("tested.assert_table")
 
 local tested: types.Tested = { tests = {}, run_only_tests = false }
 
-function tested.test(name: string, fn: function())
-   table.insert(tested.tests, {name=name, fn=fn, kind="test"})
-end
+local function extract_fn_and_options(fn_or_options: function() | types.TestedOptions, fn?: function()): function(), types.TestedOptions
+   local options: types.TestedOptions = {}
+   if type(fn_or_options) == "function" then
+      fn = fn_or_options as function()
+
+   elseif type(fn_or_options) == "table" then
+      options = fn_or_options as types.TestedOptions
+      if not fn then error("a function must be provided to run a unit test") end
+      fn = fn
+   end
 
-function tested.skip(name: string, fn: function())
-   table.insert(tested.tests, {name=name, fn=fn, kind="skip"})
+   return fn, options
 end
 
-function tested.only(name: string, fn: function())
-   tested.run_only_tests = true
-   table.insert(tested.tests, {name=name, fn=fn, kind="only"})
+-- teal currently doesn't support polymorphism, so we gotta kinda handle it ourselves
+function tested.test(name: string, fn_or_options: function() | types.TestedOptions, fn?: function())
+   local func, options = extract_fn_and_options(fn_or_options, fn)
+   table.insert(tested.tests, {name=name, fn=func, options=options, kind="test"})
 end
 
-function tested.conditional_test(name: string, condition: boolean, fn: function())
-   if condition then
-      table.insert(tested.tests, {name=name, fn=fn, kind="conditional_test"})
-   else
-      table.insert(tested.tests, {name=name, fn=fn, kind="conditional_skip"})
-   end
+function tested.skip(name: string, fn_or_options: function() | types.TestedOptions, fn?: function())
+   local func, options = extract_fn_and_options(fn_or_options, fn)
+   table.insert(tested.tests, {name=name, fn=func, options=options, kind="skip"})
 end
 
+function tested.only(name: string, fn_or_options: function() | types.TestedOptions, fn?: function())
+   local func, options = extract_fn_and_options(fn_or_options, fn)
+   table.insert(tested.tests, {name=name, fn=func, options=options, kind="only"})
+end
 
 function tested.assert(assertion: types.Assertion): boolean, string
    local errors = {}
@@ -112,7 +120,7 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T
    end
 
    local test_results: types.TestedOutput = {
-      counts = {passed=0, failed=0, skipped=0, invalid=0},
+      counts = {passed=0, failed=0, expected=0, skipped=0, invalid=0},
       tests = {},
       filename = filename,
       fully_tested = false,
@@ -135,7 +143,7 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T
          test_results.tests[i].time = 0
          test_results.counts.skipped = test_results.counts.skipped + 1
 
-      elseif test.kind == "conditional_skip" then
+      elseif test.options.run_when ~= nil and test.options.run_when == false then
          test_results.tests[i].result = "CONDITIONAL_SKIP"
          test_results.tests[i].message = "Condition in `tested.conditional_skip` returned false. Skipping test."
          test_results.tests[i].time = 0
@@ -181,22 +189,49 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T
          if ok == false then
             test_results.tests[i].result = "EXCEPTION"
             test_results.tests[i].message = err .. "\n" .. debug.traceback()
-            test_results.counts.invalid = test_results.counts.invalid + 1
 
          elseif total_assertions == 0 then
             test_results.tests[i].result = "UNKNOWN"
             test_results.tests[i].message = "No assertions run during test"
-            test_results.counts.invalid = test_results.counts.invalid + 1
 
          elseif assert_failed_count == 0 then
             test_results.tests[i].result = "PASS"
             test_results.tests[i].message = "All assertions have passed"
-            test_results.counts.passed = test_results.counts.passed + 1
 
          else
             test_results.tests[i].result = "FAIL"
             test_results.tests[i].message = assert_failed_count .. " assertions have failed"
+         end
+
+         -- adjust for expected-ness
+         if test.options.expected ~= nil then
+            if test_results.tests[i].result == test.options.expected as types.TestResult then
+               if test.options.expected == "EXCEPTION" then
+                  test_results.tests[i].result = "EXPECTED_EXCEPTION"
+               elseif test.options.expected == "UNKNOWN" then
+                  test_results.tests[i].result = "EXPECTED_UNKNOWN"
+               elseif test.options.expected == "FAIL" then
+                  test_results.tests[i].result = "EXPECTED_FAIL"
+               end
+            else
+               test_results.tests[i].message = "Expected test result to be " .. test.options.expected .. ", but came back as " .. test_results.tests[i].result .. "\n" .. test_results.tests[i].message
+               test_results.tests[i].result = "UNEXPECTED"
+            end
+         end
+
+         -- count based on final result
+         if test_results.tests[i].result == "PASS" then
+            test_results.counts.passed = test_results.counts.passed + 1
+
+         elseif test_results.tests[i].result == "FAIL" then
             test_results.counts.failed = test_results.counts.failed + 1
+
+         elseif test_results.tests[i].result == "EXPECTED_FAIL" or test_results.tests[i].result == "EXPECTED_EXCEPTION" or test_results.tests[i].result == "EXPECTED_UNKNOWN" then
+            test_results.counts.expected = test_results.counts.expected + 1
+
+         elseif test_results.tests[i].result == "EXCEPTION" or test_results.tests[i].result == "UNKNOWN" or test_results.tests[i].result == "UNEXPECTED" then
+            test_results.counts.invalid = test_results.counts.invalid + 1
+
          end
       end
    end
diff --git a/src/tested/main.tl b/src/tested/main.tl
index a89c294..5c51739 100644
--- a/src/tested/main.tl
+++ b/src/tested/main.tl
@@ -21,6 +21,8 @@ local enum DisplayOptions
    "fail"
    "exception"
    "unknown"
+   "expected"
+   "unexpected"
    -- "timeout" -- NYI
 end
 
@@ -30,6 +32,7 @@ local cli_to_display: {DisplayOptions:types.TestResult} = {
    ["fail"] = "FAIL",
    ["exception"] = "EXCEPTION",
    ["unknown"] = "UNKNOWN",
+   ["unexpected"] = "UNEXPECTED",
    -- ["timeout"] = "TIMEOUT"
 }
 
@@ -64,7 +67,7 @@ local function parse_args(): CLIOptions
       :default(false)
    parser:option("-s --show")
       :description("What test results to display (default: '-s fail -s exception -s unknown')")
-      :choices({"all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown"})
+      :choices({"all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected"})
       :count("*")
    parser:mutex(
       parser:option("-f --display-format")
@@ -100,7 +103,7 @@ end
 local function set_defaults(args: CLIOptions)
    logger:info("Setting Defaults...")
    if #args.show == 0 then 
-      args.show = {"fail", "exception", "unknown"} -- NYI: timeout
+      args.show = {"fail", "exception", "unknown", "unexpected"} -- NYI: timeout
       args.specified_show = false
    else
       args.specified_show = true
@@ -111,7 +114,7 @@ local function set_defaults(args: CLIOptions)
    
    local show_all = false
    for _, display_option in ipairs(args.show) do if display_option == "all" then show_all = true break end end
-   if show_all then args.show = {"skip", "pass", "fail", "exception", "unknown"} end -- NYI: timeout
+   if show_all then args.show = {"skip", "pass", "fail", "exception", "unknown", "expected", "unexpected"} end -- NYI: timeout
 end
 
 local function validate_args(args: CLIOptions)
@@ -221,6 +224,7 @@ local function display_types(options: {DisplayOptions}): {types.TestResult: bool
       if cli_to_display[cli_option] then
          to_display[cli_to_display[cli_option]] = true
          if cli_option == "skip" then
+            to_display["SKIP"] = true
             to_display["CONDITIONAL_SKIP"] = true
          end
       else
@@ -228,11 +232,19 @@ local function display_types(options: {DisplayOptions}): {types.TestResult: bool
             to_display["EXCEPTION"] = true
             to_display["UNKNOWN"] = true
             to_display["TIMEOUT"] = true
+            to_display["UNEXPECTED"] = true
          elseif cli_option == "valid" then
             to_display["PASS"] = true
             to_display["SKIP"] = true
             to_display["CONDITIONAL_SKIP"] = true
             to_display["FAIL"] = true
+            to_display["EXPECTED_FAIL"] = true
+            to_display["EXPECTED_EXCEPTION"] = true
+            to_display["EXPECTED_UNKNOWN"] = true
+         elseif cli_option == "expected" then
+            to_display["EXPECTED_FAIL"] = true
+            to_display["EXPECTED_EXCEPTION"] = true
+            to_display["EXPECTED_UNKNOWN"] = true
          end
       end
    end
diff --git a/src/tested/results/terminal.tl b/src/tested/results/terminal.tl
index 1dcdcc3..256a2f2 100644
--- a/src/tested/results/terminal.tl
+++ b/src/tested/results/terminal.tl
@@ -10,6 +10,10 @@ local symbol_map : {types.TestResult: string} = {
    EXCEPTION = " !",
    TIMEOUT = " ⏱",
    UNKNOWN = " ?",
+   EXPECTED_FAIL = "(✗)",
+   EXPECTED_EXCEPTION = "(!)",
+   EXPECTED_UNKNOWN = "(?)",
+   UNEXPECTED = " ‽"
 }
 
 local color_map : {types.TestResult: string} = {
@@ -20,6 +24,10 @@ local color_map : {types.TestResult: string} = {
    EXCEPTION = " %{cyan}",
    TIMEOUT = " %{blue}",
    UNKNOWN = " %{magenta}",
+   EXPECTED_FAIL = " %{dim red}",
+   EXPECTED_EXCEPTION = " %{dim cyan}",
+   EXPECTED_UNKNOWN = " %{dim magenta}",
+   UNEXPECTED = " %{bright red}"
 }
 
 local record terminal is types.ResultFormatter where self.format == "terminal"
@@ -78,7 +86,7 @@ function terminal.results(tested_result: types.TestedOutput, test_types_to_displ
       if test_types_to_display[test_result.result] then
          tadd.add(color_map[test_result.result], symbol_map[test_result.result], " ", test_result.name, to_ms(test_result.time, false), "%{reset}\n")
          local extra_newline = false
-         if test_result.result == "FAIL" or test_result.result == "PASS" then
+         if test_result.result == "FAIL" or test_result.result == "PASS" or test_result.result == "EXPECTED_FAIL" then
             for _, assertion_result in ipairs(test_result.assertion_results) do
                if (assertion_result.result == "FAIL" and test_types_to_display["FAIL"]) or assertion_result.result == "PASS" and test_types_to_display["PASS"] then
                   format_assertion_result(assertion_result)
@@ -92,7 +100,7 @@ function terminal.results(tested_result: types.TestedOutput, test_types_to_displ
             if extra_newline then tadd.add("\n") end
          end
 
-         if test_result.result == "EXCEPTION" or test_result.result == "UNKNOWN" then
+         if test_result.result == "EXCEPTION" or test_result.result == "UNKNOWN" or test_result.result == "UNEXPECTED" or test_result.result == "EXPECTED_EXCEPTION" or test_result.result == "EXPECTED_UNKNOWN"then
             tadd.add("      ", (test_result.message:gsub("\n", "\n      ")), "\n")
             tadd.add("\n")
          end
@@ -118,7 +126,9 @@ function terminal.summary(output: types.TestRunnerOutput)
       tostring(output.total_counts.passed),
       " passed%{reset}, %{red}",
       tostring(output.total_counts.failed), 
-      " failed%{reset}\n"
+      " failed%{reset}, ",
+      tostring(output.total_counts.expected),
+      " expected\n"
    )
    
    tadd.add(
diff --git a/src/tested/test_runner.tl b/src/tested/test_runner.tl
index 9831ff9..d053ac0 100644
--- a/src/tested/test_runner.tl
+++ b/src/tested/test_runner.tl
@@ -50,7 +50,7 @@ function test_runner.run_tests(
       total_time = 0,
       total_tests = 0,
       all_fully_tested = true,
-      total_counts = {passed=0, failed=0, skipped=0, invalid=0},
+      total_counts = {passed=0, failed=0, expected=0, skipped=0, invalid=0},
       module_results = {},
    }
    local coverage_results = {}
@@ -85,6 +85,7 @@ function test_runner.run_tests(
       if test_output.fully_tested == false then output.all_fully_tested = false end
       output.total_counts.passed = output.total_counts.passed + test_output.counts.passed
       output.total_counts.failed = output.total_counts.failed + test_output.counts.failed
+      output.total_counts.expected = output.total_counts.expected + test_output.counts.expected
       output.total_counts.skipped = output.total_counts.skipped + test_output.counts.skipped
       output.total_counts.invalid = output.total_counts.invalid + test_output.counts.invalid
       output.total_time = output.total_time + test_output.total_time
@@ -114,7 +115,7 @@ local function run_parallel_tests(
       total_time = 0,
       total_tests = 0,
       all_fully_tested = true,
-      total_counts = {passed=0, failed=0, skipped=0, invalid=0},
+      total_counts = {passed=0, failed=0, expected=0, skipped=0, invalid=0},
       module_results = {},
    }
    local coverage_results = {}
@@ -158,6 +159,7 @@ local function run_parallel_tests(
       if test_output.fully_tested == false then output.all_fully_tested = false end
       output.total_counts.passed = output.total_counts.passed + test_output.counts.passed
       output.total_counts.failed = output.total_counts.failed + test_output.counts.failed
+      output.total_counts.expected = output.total_counts.expected + test_output.counts.expected
       output.total_counts.skipped = output.total_counts.skipped + test_output.counts.skipped
       output.total_counts.invalid = output.total_counts.invalid + test_output.counts.invalid
       output.total_time = output.total_time + test_output.total_time
diff --git a/src/tested/types.tl b/src/tested/types.tl
index 519987d..3fb70d3 100644
--- a/src/tested/types.tl
+++ b/src/tested/types.tl
@@ -21,6 +21,16 @@ local record types
       "EXCEPTION"
       "TIMEOUT"
       "UNKNOWN"
+      "EXPECTED_FAIL"
+      "EXPECTED_EXCEPTION"
+      "EXPECTED_UNKNOWN"
+      "UNEXPECTED"
+   end
+
+   enum ExpectedTestResult
+      "FAIL"
+      "EXCEPTION"
+      "UNKNOWN"
    end
 
    interface AssertionResult
@@ -43,6 +53,7 @@ local record types
    interface TestCounts
       passed: integer
       failed: integer
+      expected: integer
       skipped: integer
       invalid: integer
    end
@@ -93,6 +104,7 @@ local record types
       kind: TestType
       name: string
       fn: function()
+      options: TestedOptions
    end
 
    interface BaseAssertion
@@ -113,6 +125,13 @@ local record types
       expected: any
    end
 
+   interface TestedOptions
+      -- retries: integer
+      -- retry_timeout: number
+      expected: ExpectedTestResult
+      run_when: boolean
+   end
+
    interface Tested
       tests: {Test}
       run_only_tests: boolean
@@ -120,10 +139,10 @@ local record types
       assert_truthy: function(assertion: TruthyAssertion): boolean, string
       assert_falsy: function(assertion: FalsyAssertion): boolean, string
       assert_throws_exception: function(assertion: ExceptionAssertion): boolean, string
-      test: function(name: string, fn: function())
-      skip: function(name: string, fn: function())
-      only: function(name: string, fn: function())
-      conditional_test: function(name: string, condition: boolean, fn: function())
+      -- teal currently doesn't support polymorphism, so we gotta kinda handle it ourselves
+      test: function(name: string, fn_or_options: function() | TestedOptions, fn?: function())
+      skip: function(name: string, fn_or_options: function() | TestedOptions, fn?: function())
+      only: function(name: string, fn_or_options: function() | TestedOptions, fn?: function())
       run: function(self: types.Tested, filename: string, options: types.TestRunnerOptions): types.TestedOutput
    end
 end
diff --git a/tests/expected_test.tl b/tests/expected_test.tl
new file mode 100644
index 0000000..94e4c4a
--- /dev/null
+++ b/tests/expected_test.tl
@@ -0,0 +1,59 @@
+local tested = require("tested")
+
+-- === EXPECTED_FAIL ===
+-- A test expected to fail that actually fails → EXPECTED_FAIL
+tested.test("expected fail: assertion fails as expected", {expected="FAIL"}, function()
+   tested.assert({
+      given = "1 + 1",
+      should = "equal 3 (intentionally wrong)",
+      expected = 3,
+      actual = 1 + 1
+   })
+end)
+
+-- === EXPECTED_EXCEPTION ===
+-- A test expected to throw that actually throws → EXPECTED_EXCEPTION
+tested.test("expected exception: throws as expected", {expected="EXCEPTION"}, function()
+   error("this exception is expected")
+end)
+
+-- === EXPECTED_UNKNOWN ===
+-- A test expected to be unknown that runs no assertions → EXPECTED_UNKNOWN
+tested.test("expected unknown: no assertions as expected", {expected="UNKNOWN"}, function()
+   -- intentionally no assertions
+end)
+
+-- === UNEXPECTED: expected FAIL but test passes ===
+-- A test expected to fail that actually passes → UNEXPECTED
+tested.test("unexpected: expected fail but test passes", {expected="FAIL"}, function()
+   tested.assert({
+      given = "1 + 1",
+      should = "equal 2",
+      expected = 2,
+      actual = 1 + 1
+   })
+end)
+
+-- === UNEXPECTED: expected EXCEPTION but no exception thrown ===
+-- A test expected to throw that completes normally → UNEXPECTED
+tested.test("unexpected: expected exception but no exception thrown", {expected="EXCEPTION"}, function()
+   tested.assert({
+      given = "1 + 1",
+      should = "equal 2",
+      expected = 2,
+      actual = 1 + 1
+   })
+end)
+
+-- === UNEXPECTED: expected UNKNOWN but assertions were run ===
+-- A test expected to be unknown that does run assertions → UNEXPECTED
+tested.test("unexpected: expected unknown but assertions were run", {expected="UNKNOWN"}, function()
+   tested.assert({
+      given = "1 + 1",
+      should = "equal 2",
+      expected = 2,
+      actual = 1 + 1
+   })
+end)
+
+return tested
diff --git a/tests/tested_test.tl b/tests/tested_test.tl
index b1dd721..ac0a8de 100644
--- a/tests/tested_test.tl
+++ b/tests/tested_test.tl
@@ -31,7 +31,7 @@ tested.test("sum()", function()
     tested.assert(thing)
 end)
 
-tested.conditional_test("conditional guy over here", true, function()
+tested.test("conditional guy over here", {run_when=true}, function()
     tested.assert({
         given = "4 + 4",
         should = "8",
@@ -40,7 +40,7 @@ tested.conditional_test("conditional guy over here", true, function()
     })
 end)
 
-tested.conditional_test("conditional guy should be skipped", false, function()
+tested.test("conditional guy should be skipped", {run_when=false}, function()
     tested.assert({
         given = "4 and 4",
         should = "return 8",