Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions build/tested.lua
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,12 @@ function tested:run(filename, options)
test_results.tests[i].time = 0
test_results.counts.skipped = test_results.counts.skipped + 1

elseif options and options.filter ~= nil and not string.find(test.name, options.filter) then
test_results.tests[i].result = "CONDITIONAL_SKIP"
test_results.tests[i].message = "Test name does not match filter pattern '" .. options.filter .. "'"
test_results.tests[i].time = 0
test_results.counts.skipped = test_results.counts.skipped + 1

elseif test.options.run_when ~= nil and test.options.run_when == false then
test_results.tests[i].result = "CONDITIONAL_SKIP"
test_results.tests[i].message = "Condition in `tested.conditional_skip` returned false. Skipping test."
Expand Down
20 changes: 18 additions & 2 deletions build/tested/main.lua
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ local TestRunner, run_parallel_tests = test_runner[1], test_runner[2]
local logging = require("tested.libs.logging")
local logger = logging.get_logger("tested.main")

local TESTED_VERSION = "tested v0.1.0"
local TESTED_VERSION = "tested v0.2.0"



Expand Down Expand Up @@ -55,6 +55,7 @@ local cli_to_display = {






local function parse_args()
Expand All @@ -65,6 +66,8 @@ local function parse_args()
parser:flag("-r --random"):
description("Randomize the order of the tests (default: not-set)"):
default(false)
parser:option("-F --filter"):
description("Only run tests whose name matches this Lua pattern (default: not-set)")
parser:option("-s --show"):
description("What test results to display (default: '-s fail -s exception -s unknown')"):
choices({ "all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected" }):
Expand Down Expand Up @@ -126,6 +129,12 @@ local function validate_args(args)
if info.mode == "directory" then table.insert(args.test_directories, path) end
if info.mode == "file" then table.insert(args.test_files, path) end
end
if args.filter then
local ok, err = pcall(string.find, "", args.filter)
if not ok then
error("Invalid --filter pattern '" .. args.filter .. "': " .. tostring(err), 0)
end
end
end

local function load_result_formatter(args)
Expand Down Expand Up @@ -255,6 +264,7 @@ local function run_tests(formatter, args, test_files)
local options = {
random = args.random,
coverage = args.coverage,
filter = args.filter,
}

local display_results = function(test_output)
Expand Down Expand Up @@ -295,8 +305,14 @@ local function main()
local test_files = get_all_test_files(args)
if #test_files == 0 then error("Unable to find any tests to run in: " .. table.concat(args.paths, ", "), 0) end

local header_comments = {}
if args.filter ~= nil then
table.insert(header_comments, "Filtering tests with pattern: '" .. args.filter .. "'")
end


formatter.header(TESTED_VERSION, args.paths, header_comments)

formatter.header(TESTED_VERSION, args.paths)
local runner_output = run_tests(formatter, args, test_files)
formatter.summary(runner_output)

Expand Down
2 changes: 1 addition & 1 deletion build/tested/results/tap.lua
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ local tap = {}
tap.allow_filtering = false
tap.format = "tap"

function tap.header(_version_info, _filepaths)
function tap.header(_version_info, _filepaths, _comments)
print("TAP version 14")
end

Expand Down
5 changes: 4 additions & 1 deletion build/tested/results/terminal.lua
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,11 @@ terminal.allow_filtering = true

terminal.colors = colors

function terminal.header(version_info, filepaths)
function terminal.header(version_info, filepaths, comments)
print(colors("%{bright}" .. version_info .. " " .. table.concat(filepaths, " ")))
for _, comment in ipairs(comments) do
print(comment)
end
print()
end

Expand Down
1 change: 1 addition & 0 deletions build/tested/types.lua
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,7 @@ local types = {}






return types
6 changes: 5 additions & 1 deletion docs/cli.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ The `-c` flag will enable [luacov](https://github.com/lunarmodules/luacov), and
## `tested -r/--random`
The `-r` will randomize the order of tests _within_ a test file.

## `tested -F/--filter`
Will only run tests that match the Lua pattern that applies to the **test name** using `string.find`. Useful for debugging specific tests.

## `tested -s/--show`
By default the `tested` output shows the problematic test results, that likely need to be addressed: `fail`, `exception`, `unknown`, and `unexpected` but allows filtering to display other results.
Expand Down Expand Up @@ -53,7 +55,7 @@ Specify the number of threads `tested` should use. If set to `0`, will not use a

```
Usage: tested ([-f {terminal,plain,tap}] | [-z <custom_formatter>])
[-h] [-c] [-r]
[-h] [-c] [-r] [-F <filter>]
[-s {all,valid,invalid,skip,pass,fail,exception,unknown,expected,unexpected}]
[-n <threads>] [-x <format_handler>] [-d {DEBUG,INFO,WARNING}]
[--version] [<paths>] ...
Expand All @@ -67,6 +69,8 @@ Options:
-h, --help Show this help message and exit.
-c, --coverage Enable code coverage - will generate luacov.stats.out (default: not-set)
-r, --random Randomize the order of the tests (default: not-set)
-F <filter>, Only run tests whose name matches this Lua pattern (default: not-set)
--filter <filter>
-s {all,valid,invalid,skip,pass,fail,exception,unknown,expected,unexpected},
--show {all,valid,invalid,skip,pass,fail,exception,unknown,expected,unexpected}
What test results to display (default: '-s fail -s exception -s unknown')
Expand Down
9 changes: 4 additions & 5 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,6 @@ Test files should be placed in a folder called `tests`, and the `tested` command
Fully Tested!
```

Check out the [Unit Testing](./unit-testing.md) page for more information on how `tested` works! and after, if you're using Teal, be sure to check out the [Teal Support](./teal-support.md) page!

## Quickstart

### Folder setup
Expand Down Expand Up @@ -113,7 +111,7 @@ return tested

After the `tested` command loads up a test file, it goes through and finds all the various tests defined in the file (in this case there are two) and adds them to a list to be executed. In the example above, that `print` statement will execute before either tests.

It does this so tests can be [shuffled](#cli-quick-reference), [skipped](#skipping-tests), or to [only](#only-tests) run a specific test within a test file!
It does this so tests can be [shuffled](#cli-quick-reference), skipped, or to only run a specific test within a test file!

### Asserts
The basic assert is composed of four parts:
Expand All @@ -128,15 +126,16 @@ The basic assert is composed of four parts:

The `given` and `should` are optional strings that get displayed in the output to help you identify which specific assert has failed. The idea behind them is to be able to look at the testing output and know _exactly_ what and how something has failed. If your test references multiple files, placing a filename in given can be incredibly useful. Since some tests are more obvious than others (based on a test name), they are not required and can be omitted.

The `expected` and `actual` take in the expected and actual values. There are a couple of other asserts builtin to `tested`, including one for [exceptions](#testing-exceptions), [truthy, and falsy](#truthyfalsy-tests)!
The `expected` and `actual` take in the expected and actual values. There are a couple of other asserts builtin to `tested`, including one for [exceptions](./unit-testing.md#testing-exceptions), [truthy, and falsy](./unit-testing.md#truthyfalsy-tests)!


### CLI Quick Reference
There are a couple CLI commands that are good to know when you get started:

- `tested -c` or `--coverage` will enable luacov code coverage and generate a `luacov.stats.out` file
- `tested -r` or `--random` will randomize the order of tests _within a test file_.
- `tested -s` or `--show` supports displaying different status of tests. By default `tested` shows tests which require followup (so `fail`, `exception`, and `invalid`)
- `tested -F <pattern>` or `--filter <pattern>` will filter tests based on a `string.find` pattern. It can just be the test name, a couple words from the test name (in order), or a full on Lua pattern!
- `tested -s <option>` or `--show <option>` supports displaying different status of tests. By default `tested` shows tests which require followup (so `fail`, `exception`, and `invalid`)
- Ex: `tested -s pass -s skip` see all passed and skipped tests
- Ex: `tested -s valid`

Expand Down
6 changes: 6 additions & 0 deletions src/tested.tl
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,12 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T
test_results.tests[i].time = 0
test_results.counts.skipped = test_results.counts.skipped + 1

elseif options and options.filter ~= nil and not string.find(test.name, options.filter) then
test_results.tests[i].result = "CONDITIONAL_SKIP"
test_results.tests[i].message = "Test name does not match filter pattern '" .. options.filter .. "'"
test_results.tests[i].time = 0
test_results.counts.skipped = test_results.counts.skipped + 1

elseif test.options.run_when ~= nil and test.options.run_when == false then
test_results.tests[i].result = "CONDITIONAL_SKIP"
test_results.tests[i].message = "Condition in `tested.conditional_skip` returned false. Skipping test."
Expand Down
22 changes: 19 additions & 3 deletions src/tested/main.tl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ local TestRunner, run_parallel_tests = test_runner[1], test_runner[2]
local logging = require("tested.libs.logging")
local logger = logging.get_logger("tested.main")

local TESTED_VERSION = "tested v0.1.0"
local TESTED_VERSION = "tested v0.2.0"

local type types = require("tested.types")

Expand Down Expand Up @@ -50,6 +50,7 @@ local record CLIOptions
paths: {string}
debug: logging.level
threads: integer
filter: string

-- derived from above values
test_files: {string}
Expand All @@ -65,6 +66,8 @@ local function parse_args(): CLIOptions
parser:flag("-r --random")
:description("Randomize the order of the tests (default: not-set)")
:default(false)
parser:option("-F --filter")
:description("Only run tests whose name matches this Lua pattern (default: not-set)")
parser:option("-s --show")
:description("What test results to display (default: '-s fail -s exception -s unknown')")
:choices({"all", "valid", "invalid", "skip", "pass", "fail", "exception", "unknown", "expected", "unexpected"})
Expand Down Expand Up @@ -126,6 +129,12 @@ local function validate_args(args: CLIOptions)
if info.mode == "directory" then table.insert(args.test_directories, path) end
if info.mode == "file" then table.insert(args.test_files, path) end
end
if args.filter then
local ok, err = pcall(string.find, "", args.filter)
if not ok then
error("Invalid --filter pattern '" .. args.filter .. "': " .. tostring(err), 0)
end
end
end

local function load_result_formatter(args: CLIOptions): types.ResultFormatter
Expand Down Expand Up @@ -254,7 +263,8 @@ end
local function run_tests(formatter: types.ResultFormatter, args: CLIOptions, test_files: {string}): types.TestRunnerOutput
local options: types.TestRunnerOptions = {
random = args.random,
coverage = args.coverage
coverage = args.coverage,
filter = args.filter
}

local display_results = function(test_output: types.TestedOutput)
Expand Down Expand Up @@ -295,8 +305,14 @@ local function main()
local test_files = get_all_test_files(args)
if #test_files == 0 then error("Unable to find any tests to run in: " .. table.concat(args.paths, ", "), 0) end

local header_comments: {string} = {}
if args.filter ~= nil then
table.insert(header_comments, "Filtering tests with pattern: '" .. args.filter .. "'")
end

-- running the tests
formatter.header(TESTED_VERSION, args.paths)
formatter.header(TESTED_VERSION, args.paths, header_comments)

local runner_output = run_tests(formatter, args, test_files)
formatter.summary(runner_output)

Expand Down
2 changes: 1 addition & 1 deletion src/tested/results/tap.tl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ local record tap is types.ResultFormatter where self.format == "tap" end
tap.allow_filtering = false
tap.format = "tap"

function tap.header(_version_info: string, _filepaths: {string})
function tap.header(_version_info: string, _filepaths: {string}, _comments: {string})
print("TAP version 14")
end

Expand Down
5 changes: 4 additions & 1 deletion src/tested/results/terminal.tl
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,11 @@ terminal.allow_filtering = true
-- used to disable the colors in the "plain" formatter
terminal.colors = colors

function terminal.header(version_info: string, filepaths: {string})
function terminal.header(version_info: string, filepaths: {string}, comments: {string})
print(colors("%{bright}" .. version_info .. " " .. table.concat(filepaths, " ")))
for _, comment in ipairs(comments) do
print(comment)
end
print()
end

Expand Down
3 changes: 2 additions & 1 deletion src/tested/types.tl
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ local record types
interface TestRunnerOptions
random: boolean
coverage: boolean
filter: string
end

enum TestResult
Expand Down Expand Up @@ -80,7 +81,7 @@ local record types
interface ResultFormatter
format: string
allow_filtering: boolean
header: function(version: string, filepaths: {string})
header: function(version: string, filepaths: {string}, comments: {string})
results: function(tested_result: types.TestedOutput, test_types_to_display: {types.TestResult: boolean})
summary: function(runner_output: types.TestRunnerOutput)
end
Expand Down
Loading