Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ Running the tests are as simple as placing the file in a `tests` folder and then
You can see more tests in this repo's [tests](https://github.com/FourierTransformer/tested/tree/main/tests) folder!

## AI Disclosure
As of versions > 0.1.0, AI has been used to help implement features, research Lua/Teal internals, debug issues, and make more readable output. Before this version, the code was hand-written, but some research was done with the help of AI. The docs will remain hand-written for now. I am personally still a little skeptical of AI and its place in open source, but at the moment am willing to evaluate it.
As of versions > 0.1.0, AI has been used to help implement _some_ features, research Lua/Teal internals, debug issues, and make more readable output. Before this version, the code was hand-written, but some research was done with the help of AI. The docs will remain hand-written for now. I am personally still a little skeptical of AI and its place in open source, but at the moment am willing to evaluate it.

## Licenses
Parts of the following are included in the source code present in this repo:
Expand Down
73 changes: 54 additions & 19 deletions build/tested.lua
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,35 @@ local assert_table = require("tested.assert_table")

local tested = { tests = {}, run_only_tests = false }

function tested.test(name, fn)
table.insert(tested.tests, { name = name, fn = fn, kind = "test" })
end
local function extract_fn_and_options(fn_or_options, fn)
local options = {}
if type(fn_or_options) == "function" then
fn = fn_or_options

elseif type(fn_or_options) == "table" then
options = fn_or_options
if not fn then error("a function must be provided to run a unit test") end
fn = fn
end

function tested.skip(name, fn)
table.insert(tested.tests, { name = name, fn = fn, kind = "skip" })
return fn, options
end

function tested.only(name, fn)
tested.run_only_tests = true
table.insert(tested.tests, { name = name, fn = fn, kind = "only" })

function tested.test(name, fn_or_options, fn)
local func, options = extract_fn_and_options(fn_or_options, fn)
table.insert(tested.tests, { name = name, fn = func, options = options, kind = "test" })
end

function tested.conditional_test(name, condition, fn)
if condition then
table.insert(tested.tests, { name = name, fn = fn, kind = "conditional_test" })
else
table.insert(tested.tests, { name = name, fn = fn, kind = "conditional_skip" })
end
function tested.skip(name, fn_or_options, fn)
local func, options = extract_fn_and_options(fn_or_options, fn)
table.insert(tested.tests, { name = name, fn = func, options = options, kind = "skip" })
end

function tested.only(name, fn_or_options, fn)
local func, options = extract_fn_and_options(fn_or_options, fn)
table.insert(tested.tests, { name = name, fn = func, options = options, kind = "only" })
end

function tested.assert(assertion)
local errors = {}
Expand Down Expand Up @@ -112,7 +120,7 @@ function tested:run(filename, options)
end

local test_results = {
counts = { passed = 0, failed = 0, skipped = 0, invalid = 0 },
counts = { passed = 0, failed = 0, expected = 0, skipped = 0, invalid = 0 },
tests = {},
filename = filename,
fully_tested = false,
Expand All @@ -135,7 +143,7 @@ function tested:run(filename, options)
test_results.tests[i].time = 0
test_results.counts.skipped = test_results.counts.skipped + 1

elseif test.kind == "conditional_skip" then
elseif test.options.run_when ~= nil and test.options.run_when == false then
test_results.tests[i].result = "CONDITIONAL_SKIP"
test_results.tests[i].message = "Condition in `tested.conditional_skip` returned false. Skipping test."
test_results.tests[i].time = 0
Expand Down Expand Up @@ -181,22 +189,49 @@ function tested:run(filename, options)
if ok == false then
test_results.tests[i].result = "EXCEPTION"
test_results.tests[i].message = err .. "\n" .. debug.traceback()
test_results.counts.invalid = test_results.counts.invalid + 1

elseif total_assertions == 0 then
test_results.tests[i].result = "UNKNOWN"
test_results.tests[i].message = "No assertions run during test"
test_results.counts.invalid = test_results.counts.invalid + 1

elseif assert_failed_count == 0 then
test_results.tests[i].result = "PASS"
test_results.tests[i].message = "All assertions have passed"
test_results.counts.passed = test_results.counts.passed + 1

else
test_results.tests[i].result = "FAIL"
test_results.tests[i].message = assert_failed_count .. " assertions have failed"
end


if test.options.expected ~= nil then
if test_results.tests[i].result == test.options.expected then
if test.options.expected == "EXCEPTION" then
test_results.tests[i].result = "EXPECTED_EXCEPTION"
elseif test.options.expected == "UNKNOWN" then
test_results.tests[i].result = "EXPECTED_UNKNOWN"
elseif test.options.expected == "FAIL" then
test_results.tests[i].result = "EXPECTED_FAIL"
end
else
test_results.tests[i].message = "Expected test result to be " .. test.options.expected .. ", but came back as " .. test_results.tests[i].result .. "\n" .. test_results.tests[i].message
test_results.tests[i].result = "UNEXPECTED"
end
end


if test_results.tests[i].result == "PASS" then
test_results.counts.passed = test_results.counts.passed + 1

elseif test_results.tests[i].result == "FAIL" then
test_results.counts.failed = test_results.counts.failed + 1

elseif test_results.tests[i].result == "EXPECTED_FAIL" or test_results.tests[i].result == "EXPECTED_EXCEPTION" or test_results.tests[i].result == "EXPECTED_UNKNOWN" then
test_results.counts.expected = test_results.counts.expected + 1

elseif test_results.tests[i].result == "EXCEPTION" or test_results.tests[i].result == "UNKNOWN" or test_results.tests[i].result == "UNEXPECTED" then
test_results.counts.invalid = test_results.counts.invalid + 1

end
end
end
Expand Down
18 changes: 15 additions & 3 deletions build/tested/main.lua
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,15 @@ local TESTED_VERSION = "tested v0.1.0"





local cli_to_display = {
["skip"] = "SKIP",
["pass"] = "PASS",
["fail"] = "FAIL",
["exception"] = "EXCEPTION",
["unknown"] = "UNKNOWN",
["unexpected"] = "UNEXPECTED",

}

Expand Down Expand Up @@ -64,7 +67,7 @@ local function parse_args()
default(false)
parser:option("-s --show"):
description("What test results to display (default: '-s fail -s exception -s unknown')"):
choices({ "all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown" }):
choices({ "all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected" }):
count("*")
parser:mutex(
parser:option("-f --display-format"):
Expand Down Expand Up @@ -100,7 +103,7 @@ end
local function set_defaults(args)
logger:info("Setting Defaults...")
if #args.show == 0 then
args.show = { "fail", "exception", "unknown" }
args.show = { "fail", "exception", "unknown", "unexpected" }
args.specified_show = false
else
args.specified_show = true
Expand All @@ -111,7 +114,7 @@ local function set_defaults(args)

local show_all = false
for _, display_option in ipairs(args.show) do if display_option == "all" then show_all = true; break end end
if show_all then args.show = { "skip", "pass", "fail", "exception", "unknown" } end
if show_all then args.show = { "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected" } end
end

local function validate_args(args)
Expand Down Expand Up @@ -221,18 +224,27 @@ local function display_types(options)
if cli_to_display[cli_option] then
to_display[cli_to_display[cli_option]] = true
if cli_option == "skip" then
to_display["SKIP"] = true
to_display["CONDITIONAL_SKIP"] = true
end
else
if cli_option == "invalid" then
to_display["EXCEPTION"] = true
to_display["UNKNOWN"] = true
to_display["TIMEOUT"] = true
to_display["UNEXPECTED"] = true
elseif cli_option == "valid" then
to_display["PASS"] = true
to_display["SKIP"] = true
to_display["CONDITIONAL_SKIP"] = true
to_display["FAIL"] = true
to_display["EXPECTED_FAIL"] = true
to_display["EXPECTED_EXCEPTION"] = true
to_display["EXPECTED_UNKNOWN"] = true
elseif cli_option == "expected" then
to_display["EXPECTED_FAIL"] = true
to_display["EXPECTED_EXCEPTION"] = true
to_display["EXPECTED_UNKNOWN"] = true
end
end
end
Expand Down
16 changes: 13 additions & 3 deletions build/tested/results/terminal.lua
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ local symbol_map = {
EXCEPTION = " !",
TIMEOUT = " ⏱",
UNKNOWN = " ?",
EXPECTED_FAIL = "(✗)",
EXPECTED_EXCEPTION = "(!)",
EXPECTED_UNKNOWN = "(?)",
UNEXPECTED = " ‽",
}

local color_map = {
Expand All @@ -20,6 +24,10 @@ local color_map = {
EXCEPTION = " %{cyan}",
TIMEOUT = " %{blue}",
UNKNOWN = " %{magenta}",
EXPECTED_FAIL = " %{dim red}",
EXPECTED_EXCEPTION = " %{dim cyan}",
EXPECTED_UNKNOWN = " %{dim magenta}",
UNEXPECTED = " %{bright red}",
}

local terminal = {}
Expand Down Expand Up @@ -78,7 +86,7 @@ function terminal.results(tested_result, test_types_to_display)
if test_types_to_display[test_result.result] then
tadd.add(color_map[test_result.result], symbol_map[test_result.result], " ", test_result.name, to_ms(test_result.time, false), "%{reset}\n")
local extra_newline = false
if test_result.result == "FAIL" or test_result.result == "PASS" then
if test_result.result == "FAIL" or test_result.result == "PASS" or test_result.result == "EXPECTED_FAIL" then
for _, assertion_result in ipairs(test_result.assertion_results) do
if (assertion_result.result == "FAIL" and test_types_to_display["FAIL"]) or assertion_result.result == "PASS" and test_types_to_display["PASS"] then
format_assertion_result(assertion_result)
Expand All @@ -92,7 +100,7 @@ function terminal.results(tested_result, test_types_to_display)
if extra_newline then tadd.add("\n") end
end

if test_result.result == "EXCEPTION" or test_result.result == "UNKNOWN" then
if test_result.result == "EXCEPTION" or test_result.result == "UNKNOWN" or test_result.result == "UNEXPECTED" or test_result.result == "EXPECTED_EXCEPTION" or test_result.result == "EXPECTED_UNKNOWN" then
tadd.add(" ", (test_result.message:gsub("\n", "\n ")), "\n")
tadd.add("\n")
end
Expand All @@ -118,7 +126,9 @@ function terminal.summary(output)
tostring(output.total_counts.passed),
" passed%{reset}, %{red}",
tostring(output.total_counts.failed),
" failed%{reset}\n")
" failed%{reset}, ",
tostring(output.total_counts.expected),
" expected\n")


tadd.add(
Expand Down
6 changes: 4 additions & 2 deletions build/tested/test_runner.lua
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ function test_runner.run_tests(
total_time = 0,
total_tests = 0,
all_fully_tested = true,
total_counts = { passed = 0, failed = 0, skipped = 0, invalid = 0 },
total_counts = { passed = 0, failed = 0, expected = 0, skipped = 0, invalid = 0 },
module_results = {},
}
local coverage_results = {}
Expand Down Expand Up @@ -85,6 +85,7 @@ function test_runner.run_tests(
if test_output.fully_tested == false then output.all_fully_tested = false end
output.total_counts.passed = output.total_counts.passed + test_output.counts.passed
output.total_counts.failed = output.total_counts.failed + test_output.counts.failed
output.total_counts.expected = output.total_counts.expected + test_output.counts.expected
output.total_counts.skipped = output.total_counts.skipped + test_output.counts.skipped
output.total_counts.invalid = output.total_counts.invalid + test_output.counts.invalid
output.total_time = output.total_time + test_output.total_time
Expand Down Expand Up @@ -114,7 +115,7 @@ local function run_parallel_tests(
total_time = 0,
total_tests = 0,
all_fully_tested = true,
total_counts = { passed = 0, failed = 0, skipped = 0, invalid = 0 },
total_counts = { passed = 0, failed = 0, expected = 0, skipped = 0, invalid = 0 },
module_results = {},
}
local coverage_results = {}
Expand Down Expand Up @@ -158,6 +159,7 @@ local function run_parallel_tests(
if test_output.fully_tested == false then output.all_fully_tested = false end
output.total_counts.passed = output.total_counts.passed + test_output.counts.passed
output.total_counts.failed = output.total_counts.failed + test_output.counts.failed
output.total_counts.expected = output.total_counts.expected + test_output.counts.expected
output.total_counts.skipped = output.total_counts.skipped + test_output.counts.skipped
output.total_counts.invalid = output.total_counts.invalid + test_output.counts.invalid
output.total_time = output.total_time + test_output.total_time
Expand Down
19 changes: 19 additions & 0 deletions build/tested/types.lua
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,25 @@ local types = {}

























Expand Down
24 changes: 14 additions & 10 deletions docs/api-reference.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,18 @@
# API Reference
# API Quick Reference

### Tests
## Tests

- `tested.test(name: string, fn: function())`
- `tested.skip(name: string, fn: function())` - don't run this test. Useful if something is known-broken and you want to disable a test.
- `tested.test(name: string, options?: table, fn: function())`
- `tested.skip(name: string, options?: table, fn: function())` - don't run this test. Useful if something is known-broken and you want to temporarily disable a test.
- `tested.only(name: string, fn: function())` - will only run these tests. Useful if you want to debug a few tests in a large file.
- `tested.conditional_test(name: string, condition: boolean, fn: function())` - Will only run the test if the condition passes.
- Ex: the condition could be `type(jit) == 'table'` and the test will only run on LuaJIT

### Asserts
### Options
- `expected: "FAIL", "EXCEPTION", or "UNKNOWN"` - Useful if a unit test is going to be broken for an extended period of time and you want to mark it as a specific expected result. It will not be shown by default in the output (but can be enabled by `--show expected`, `--show all`, or `--show valid`). If the result stops being the expected value, the test will display and count as `invalid`.
- ex: `tested.test("fails all the time", {expected="FAIL"}, function())`
- `run_when: boolean` - a value that can be determined at runtime - useful if the test should only run on LuaJIT, a certain operating system, or even configuration.
- ex: `tested.test("luajit only", {run_when=type(jit) == 'table'}, function())` - will only run when executing via LuaJIT

## Asserts
All the asserts in `tested` take in a table with a couple of values that should hopefully make debugging your unit tests. The `given` and `should` are [optional] text representations of what your unit test are doing. It can be useful to have text representations so you're not having to rely on the values alone. It's also nice if you're passing in a bunch of test files and use the filename in `given`, so that it appears in the output if something goes wrong.

- `tested.assert({given?: string, should?: string, expected, actual})`
Expand All @@ -17,10 +21,10 @@ All the asserts in `tested` take in a table with a couple of values that should
- `tested.assert_throws_exception({given?: string, should?: string, expected?: any, actual: function()})`
- `expected` is also optional here, but if passed in, `tested` will check if it matches the error that comes back from the function. If `expected` is a `string`, it should match the exact string that is thrown in your error command.

### How `tested` works (high level)
1. Recursively search through the `tests` folder (from where it's called) or the folders specfied [on the commandline](./cli.md#tested-base-command) looking for files with the suffx `_test.lua` (or `_test.tl`) and makes a list of them
## How `tested` works (high level)
1. Recursively search through the `tests` folder (from where it's called) or the folders specfied [on the commandline](./cli.md#tested-base-command) looking for files with the suffix `_test.lua` (or `_test.tl`) and makes a list of them
2. Before running a test file, it notes which packages have been loaded.
3. It runs through the test file and creates a list of all the tests that need to be run. Shuffling the list if desired.
4. It runs each test, tracking the asserts and results
5. It clears any packages that were loaded during the test from the `package.loaded` table and then runs garabage collection.
5. It clears any packages that were loaded during the test from the `package.loaded` table and then runs garbage collection.
6. It gathers up all the results
Loading
Loading