From b970a7819062a39045d3cf8b07b57a9c3189676c Mon Sep 17 00:00:00 2001 From: FourierTransformer Date: Mon, 11 May 2026 21:59:56 -0500 Subject: [PATCH 1/3] got some basic test lifecycle going on --- build/tested.lua | 33 ++++++++++ build/tested/types.lua | 10 +++ src/tested.tl | 33 ++++++++++ src/tested/types.tl | 10 +++ tests/coroutine_test.lua | 134 +++++++++++++++++++++++++++++++++++++++ tests/lifecycle_test.lua | 41 ++++++++++++ 6 files changed, 261 insertions(+) create mode 100644 tests/coroutine_test.lua create mode 100644 tests/lifecycle_test.lua diff --git a/build/tested.lua b/build/tested.lua index 787d573..c1a33b6 100644 --- a/build/tested.lua +++ b/build/tested.lua @@ -56,6 +56,22 @@ function tested.only(name, fn_or_options, fn) table.insert(tested.tests, { name = name, fn = func, options = options, kind = "only" }) end +function tested.before(fn) + tested.before_fn = fn +end + +function tested.after(fn) + tested.after_fn = fn +end + +function tested.before_each(fn) + tested.before_each_fn = fn +end + +function tested.after_each(fn) + tested.after_each_fn = fn +end + function tested.assert(assertion) local errors = {} if assertion.expected == nil then table.insert(errors, "'expected'") end @@ -218,6 +234,10 @@ function tested:run(filename, options) total_time = 0, } + if tested.before_fn then + tested.before_fn() + end + for i, test in ipairs(self.tests) do test_results.tests[i] = { assertion_results = {}, name = test.name } @@ -229,6 +249,10 @@ function tested:run(filename, options) test_results.tests[i].time = 0 else + if tested.before_each_fn then + tested.before_each_fn() + end + local assert_failed_count = 0 local total_assertions = 0 @@ -269,6 +293,10 @@ function tested:run(filename, options) adjust_for_expected(test.options.expected, test_results.tests[i]) + + if tested.after_each_fn then + tested.after_each_fn() + end end @@ -277,6 +305,11 @@ function tested:run(filename, options) if test_results.counts.failed == 0 and test_results.counts.invalid == 0 then test_results.fully_tested = true end + + if tested.after_fn then + tested.after_fn() + end + return test_results end diff --git a/build/tested/types.lua b/build/tested/types.lua index 0dac245..b60afa8 100644 --- a/build/tested/types.lua +++ b/build/tested/types.lua @@ -137,6 +137,16 @@ local types = {} + + + + + + + + + + diff --git a/src/tested.tl b/src/tested.tl index 783f815..bfd9f5b 100644 --- a/src/tested.tl +++ b/src/tested.tl @@ -56,6 +56,22 @@ function tested.only(name: string, fn_or_options: function() | types.TestedOptio table.insert(tested.tests, {name=name, fn=func, options=options, kind="only"}) end +function tested.before(fn: function()) + tested.before_fn = fn +end + +function tested.after(fn: function()) + tested.after_fn = fn +end + +function tested.before_each(fn: function()) + tested.before_each_fn = fn +end + +function tested.after_each(fn: function()) + tested.after_each_fn = fn +end + function tested.assert(assertion: types.Assertion): boolean, string local errors = {} if assertion.expected == nil then table.insert(errors, "'expected'") end @@ -218,6 +234,10 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T total_time = 0 } + if tested.before_fn then + tested.before_fn() + end + for i, test in ipairs(self.tests) do test_results.tests[i] = {assertion_results = {}, name = test.name} @@ -229,6 +249,10 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T test_results.tests[i].time = 0 else + if tested.before_each_fn then + tested.before_each_fn() + end + local assert_failed_count = 0 local total_assertions = 0 @@ -269,6 +293,10 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T -- only adjust for tests that are run. Otherwise the skips when filtering or tested.skip will trigger adjust_for_expected(test.options.expected, test_results.tests[i]) + + if tested.after_each_fn then + tested.after_each_fn() + end end -- always add up at the end! @@ -277,6 +305,11 @@ function tested:run(filename: string, options: types.TestRunnerOptions): types.T if test_results.counts.failed == 0 and test_results.counts.invalid == 0 then test_results.fully_tested = true end + + if tested.after_fn then + tested.after_fn() + end + return test_results end diff --git a/src/tested/types.tl b/src/tested/types.tl index 1fab0fb..d402133 100644 --- a/src/tested/types.tl +++ b/src/tested/types.tl @@ -136,6 +136,12 @@ local record types interface Tested tests: {Test} run_only_tests: boolean + + before_fn: function() + after_fn: function() + before_each_fn: function() + after_each_fn: function() + assert: function(assertion: Assertion): boolean, string assert_truthy: function(assertion: TruthyAssertion): boolean, string assert_falsy: function(assertion: FalsyAssertion): boolean, string @@ -145,6 +151,10 @@ local record types skip: function(name: string, fn_or_options: function() | TestedOptions, fn?: function()) only: function(name: string, fn_or_options: function() | TestedOptions, fn?: function()) run: function(self: types.Tested, filename: string, options: types.TestRunnerOptions): types.TestedOutput + before: function(fn: function()) + after: function(fn: function()) + before_each: function(fn: function()) + after_each: function(fn: function()) end end diff --git a/tests/coroutine_test.lua b/tests/coroutine_test.lua new file mode 100644 index 0000000..a5d0007 --- /dev/null +++ b/tests/coroutine_test.lua @@ -0,0 +1,134 @@ +local tested = require("tested") + +tested.test("basic create and resume runs the function", function() + local result = nil + local co = coroutine.create(function() result = 42 end) + tested.assert({ given = "status before first resume", should = "be suspended", expected = "suspended", actual = coroutine.status(co) }) + coroutine.resume(co) + tested.assert({ given = "result after resume", should = "be 42", expected = 42, actual = result }) + tested.assert({ given = "status after completion", should = "be dead", expected = "dead", actual = coroutine.status(co) }) +end) + +tested.test("yield passes values back to the resume caller", function() + local co = coroutine.create(function() + coroutine.yield(10) + coroutine.yield(20) + coroutine.yield(30) + end) + local ok1, v1 = coroutine.resume(co) + local ok2, v2 = coroutine.resume(co) + local ok3, v3 = coroutine.resume(co) + tested.assert({ given = "first resume ok", expected = true, actual = ok1 }) + tested.assert({ given = "first yield value", expected = 10, actual = v1 }) + tested.assert({ given = "second yield value", expected = 20, actual = v2 }) + tested.assert({ given = "third yield value", expected = 30, actual = v3 }) +end) + +tested.test("resume passes values back into the coroutine via yield", function() + local received = {} + local co = coroutine.create(function() + local a = coroutine.yield() + local b = coroutine.yield() + received = { a, b } + end) + coroutine.resume(co) + coroutine.resume(co, "hello") + coroutine.resume(co, "world") + tested.assert({ given = "first received value", expected = "hello", actual = received[1] }) + tested.assert({ given = "second received value", expected = "world", actual = received[2] }) +end) + +tested.test("status transitions: suspended → suspended at yield → dead", function() + local co = coroutine.create(function() coroutine.yield() end) + tested.assert({ given = "before first resume", expected = "suspended", actual = coroutine.status(co) }) + coroutine.resume(co) + tested.assert({ given = "paused at yield", expected = "suspended", actual = coroutine.status(co) }) + coroutine.resume(co) + tested.assert({ given = "after completion", expected = "dead", actual = coroutine.status(co) }) +end) + +tested.test("error inside coroutine does not propagate to caller", function() + local co = coroutine.create(function() error("something went wrong") end) + local ok, err = coroutine.resume(co) + tested.assert({ given = "resume ok flag", expected = false, actual = ok }) + tested.assert_truthy({ given = "error message", actual = err }) + tested.assert({ given = "status after error", expected = "dead", actual = coroutine.status(co) }) +end) + +tested.test("resuming a dead coroutine returns false", function() + local co = coroutine.create(function() end) + coroutine.resume(co) + local ok, err = coroutine.resume(co) + tested.assert({ given = "resume ok flag", expected = false, actual = ok }) + tested.assert_truthy({ given = "cannot resume dead coroutine message", actual = err }) +end) + +tested.test("coroutine.wrap creates a simple callable generator", function() + local gen = coroutine.wrap(function() + coroutine.yield("a") + coroutine.yield("b") + coroutine.yield("c") + end) + tested.assert({ given = "first call", expected = "a", actual = gen() }) + tested.assert({ given = "second call", expected = "b", actual = gen() }) + tested.assert({ given = "third call", expected = "c", actual = gen() }) +end) + +tested.test("coroutine as a for-iterator produces a sequence", function() + local function range(n) + return coroutine.wrap(function() + for i = 1, n do coroutine.yield(i) end + end) + end + local result = {} + for v in range(5) do table.insert(result, v) end + tested.assert({ given = "generated sequence", expected = { 1, 2, 3, 4, 5 }, actual = result }) +end) + +tested.test("producer-consumer pattern with coroutines", function() + local produced = {} + local consumed = {} + + local producer = coroutine.create(function() + for i = 1, 3 do + table.insert(produced, i) + coroutine.yield(i) + end + end) + + while coroutine.status(producer) ~= "dead" do + local ok, val = coroutine.resume(producer) + if ok and val ~= nil then + table.insert(consumed, val) + end + end + + tested.assert({ given = "produced values", expected = { 1, 2, 3 }, actual = produced }) + tested.assert({ given = "consumed values", expected = { 1, 2, 3 }, actual = consumed }) +end) + +tested.test("multiple coroutines interleave cooperatively", function() + local log = {} + + local co1 = coroutine.create(function() + table.insert(log, "co1-a") + coroutine.yield() + table.insert(log, "co1-b") + end) + local co2 = coroutine.create(function() + table.insert(log, "co2-a") + coroutine.yield() + table.insert(log, "co2-b") + end) + + coroutine.resume(co1) + coroutine.resume(co2) + coroutine.resume(co1) + coroutine.resume(co2) + + tested.assert({ given = "interleaved execution log", + expected = { "co1-a", "co2-a", "co1-b", "co2-b" }, + actual = log }) +end) + +return tested diff --git a/tests/lifecycle_test.lua b/tests/lifecycle_test.lua new file mode 100644 index 0000000..c037350 --- /dev/null +++ b/tests/lifecycle_test.lua @@ -0,0 +1,41 @@ +local tested = require("tested") + +local counts = { before = 0, after = 0, before_each = 0, after_each = 0 } + +tested.before(function() counts.before = counts.before + 1 end) +tested.after(function() counts.after = counts.after + 1 end) +tested.before_each(function() counts.before_each = counts.before_each + 1 end) +tested.after_each(function() counts.after_each = counts.after_each + 1 end) + +tested.test("before runs once before first test", function() + tested.assert({ given = "before count", should = "be 1", expected = 1, actual = counts.before }) + tested.assert({ given = "after count", should = "be 0", expected = 0, actual = counts.after }) + tested.assert({ given = "before_each count", should = "be 1", expected = 1, actual = counts.before_each }) + tested.assert({ given = "after_each count", should = "be 0", expected = 0, actual = counts.after_each }) +end) + +tested.test("after_each runs after first test, before_each runs again", function() + tested.assert({ given = "before count", should = "still be 1", expected = 1, actual = counts.before }) + tested.assert({ given = "after count", should = "still be 0", expected = 0, actual = counts.after }) + tested.assert({ given = "before_each count", should = "be 2", expected = 2, actual = counts.before_each }) + tested.assert({ given = "after_each count", should = "be 1", expected = 1, actual = counts.after_each }) +end) + +tested.test("this test is skipped", { run_when = false }, function() end) + +tested.test("before_each and after_each do not run for skipped tests", function() + tested.assert({ given = "before_each count", should = "be 3 not 4", expected = 3, actual = counts.before_each }) + tested.assert({ given = "after_each count", should = "be 2 not 3", expected = 2, actual = counts.after_each }) +end) + +tested.test("before_each is one ahead of after_each within a running test body", function() + tested.assert({ given = "before_each count", should = "be 4", expected = 4, actual = counts.before_each }) + tested.assert({ given = "after_each count", should = "be 3", expected = 3, actual = counts.after_each }) +end) + +tested.test("after has not run during any test", function() + tested.assert({ given = "after count", should = "be 0", expected = 0, actual = counts.after }) + tested.assert({ given = "before count", should = "be 1", expected = 1, actual = counts.before }) +end) + +return tested From fa0bade03f1beafcc2d62f89a5d0218a8d2bae2c Mon Sep 17 00:00:00 2001 From: FourierTransformer Date: Tue, 12 May 2026 06:14:27 -0500 Subject: [PATCH 2/3] updated docs for lifecycle and re-ordered for legibility --- docs/api-reference.md | 10 ++- docs/unit-testing.md | 161 +++++++++++++++++++++++++----------------- 2 files changed, 106 insertions(+), 65 deletions(-) diff --git a/docs/api-reference.md b/docs/api-reference.md index 4d3e6c5..3750cd4 100644 --- a/docs/api-reference.md +++ b/docs/api-reference.md @@ -13,7 +13,7 @@ - ex: `tested.test("luajit only", {run_when=type(jit) == 'table'}, function())` - will only run when executing via LuaJIT ## Asserts -All the asserts in `tested` take in a table with a couple of values that should hopefully make debugging your unit tests. The `given` and `should` are [optional] text representations of what your unit test are doing. It can be useful to have text representations so you're not having to rely on the values alone. It's also nice if you're passing in a bunch of test files and use the filename in `given`, so that it appears in the output if something goes wrong. +All the asserts in `tested` take in a table with a couple of values that should hopefully make debugging your unit tests. The `given` and `should` are _optional_ text representations of what your unit test are doing. It can be useful to have text representations so you're not having to rely on the values alone. It's also nice if you're passing in a bunch of test files and use the filename in `given`, so that it appears in the output if something goes wrong. - `tested.assert({given?: string, should?: string, expected, actual})` - `tested.assert_truthy({given?: string, should?: string, actual})` @@ -21,6 +21,14 @@ All the asserts in `tested` take in a table with a couple of values that should - `tested.assert_throws_exception({given?: string, should?: string, expected?: any, actual: function()})` - `expected` is also optional here, but if passed in, `tested` will check if it matches the error that comes back from the function. If `expected` is a `string`, it should match the exact string that is thrown in your error command. +## Test Lifecycle +All the lifecycle methods take in a function that will be executed at the corresponding time. For any skipped test, the `before_each` and `after_each` will not run. + +- `tested.before(fn: function())` - executes before any test in a file run +- `tested.after(fn: function())` - executes after all the tests in a file have run +- `tested.before_each(fn: function())` - executes before each test +- `tested.after_each(fn: function())` - executes after each test + ## How `tested` works (high level) 1. Recursively search through the `tests` folder (from where it's called) or the folders specfied [on the commandline](./cli.md#tested-base-command) looking for files with the suffix `_test.lua` (or `_test.tl`) and makes a list of them 2. Before running a test file, it notes which packages have been loaded. diff --git a/docs/unit-testing.md b/docs/unit-testing.md index 7f6fad8..9cc9fcc 100644 --- a/docs/unit-testing.md +++ b/docs/unit-testing.md @@ -1,9 +1,9 @@ # Unit Testing `tested` as a framework, tries to let you _just write tests_. If you want multiple asserts in one test, go for it. Dynamically generate tests? No Problem! `tested` aims to be flexible enough to work with a wide variety of testing scenarios and philosophies. -## Testing tables +## Tests -`tested.assert` will also deep compare tables, and will generate a little summary of the differences as well as print out the expected and actual table. +Below is an example of basic test comparing two tables, `tested.assert` will deep compare the tables, and generate a little summary of the differences as well as print out the expected and actual table. === "Test" @@ -15,7 +15,7 @@ scores = {10, 20, 30}, config = { debug = true, port = 8080, crazy_table = {"hello", "world"} } } - + local t2 = { name = 'Bob', age = 30, @@ -23,7 +23,7 @@ config = { debug = false, port = 8080 }, email = 'bob@example.com' } - + tested.assert({ given = "a basic table", should = "not be the same as the other table", @@ -55,7 +55,7 @@ name = "Bob", scores = { 10, 25, 30 } } - + Expected: { age = 30, @@ -90,63 +90,8 @@ tested.test("tables with self-cycles, but the same structure should be equal", f end) ``` -## Truthy/Falsy tests - -Sometimes in Lua you want to check if _anything_ returned (like a `string.match` or that a value exists in a table), we've added in an `assert_truthy` and `assert_falsy` to help out in those cases. - -We would recommend if you're looking for explicitly looking for `true` or `false`, maybe stick with the regular `assert` so your tests are more semantically correct, but if checking "exists" and "not exists", `assert_truthy` and `assert_falsy` are good candidates. - -```lua -tested.test("truthy", function() - tested.assert_truthy({given="empty string", actual=""}) - tested.assert_truthy({given="a number", actual=0}) - tested.assert_truthy({given="a function", actual=function() end}) - tested.assert_truthy({given="a table", actual={}}) - tested.assert_truthy({given="an unpack", actual=table.unpack({"a", "b"})}) - tested.assert_truthy({given="true boolean", actual=true}) - tested.assert_truthy({given="not false", actual=not false}) - tested.assert_truthy({given="not nil", actual=not nil}) - tested.assert_truthy({given="string.find he in hello", actual=string.find("hello", "he")}) -end) - -tested.test("falsy", function() - local b - tested.assert_falsy({given="nil", actual=nil}) - tested.assert_falsy({given="false", actual=false}) - tested.assert_falsy({given="unset variable", actual=b}) -end) -``` - -## Testing exceptions -When writing assertions that check that an exception has been thrown, the `actual` should be a function taking no arguments, that when run raises an exception. `tested` also has the ability to capture an error (using `pcall` under the hood) and check if that returns as expected as well. - -```lua --- simple check that exception will be raised -tested.test("assert_throws_exception handles exception in assert", function() - tested.assert_throws_exception({ - given = "an explicit error", - actual = function() error("gets raised, but handled!") end - }) -end) - --- check that a specific exception was thrown -tested.test("example with exceptions and error checking", function() - - -- will throw the specific exception in "expected" below - local function_that_throws = function() - local options = {loadFromString=true, headers=false, fieldsToKeep={1, 2}} - ftcsv.parse("apple>banana>carrot\ndiamond>emerald>pearl", ">", options) - end - tested.assert_throws_exception({ - given="no headers and no renaming takes place", - expected="ftcsv: fieldsToKeep only works with header-less files when using the 'rename' functionality", - actual=function_that_throws - }) -end) -``` - -## Skipping & Only tests +### Skipping & Only tests For quick debugging purposes, there are `tested.skip` and `tested.only`. These allow you to quickly isolate testing when running selective tests a particular file. For things that are going to broken longer term, you should set the `expected` option. @@ -179,7 +124,9 @@ end) Both of these work on a _per-test file_ basis, so it may also be useful to pass the specific test file that you are working with to `tested` as well: `tested ./tests/file_with_only_test.lua` -## Options + + +## Test Options ### Conditional Skipping If you want to _conditionally_ skip tests based on something that can be determined at runtime (LuaJIT, operating system, dependency present or not), there is the `run_when` options @@ -222,10 +169,96 @@ end) -## Invalid tests -If a test file has a test that throws an unhandled exception, `tested` finds a test without any asserts, or a test with `expected` set returns without that result, they are considered "invalid", and will display as such in the results and will be listed in the summary as "invalid". +## Assertions + +### Truthy/Falsy tests +Sometimes in Lua you want to check if _anything_ returned (like a `string.match` or that a value exists in a table), we've added in an `assert_truthy` and `assert_falsy` to help out in those cases. +We would recommend if you're looking for explicitly looking for `true` or `false`, maybe stick with the regular `assert` so your tests are more semantically correct, but if checking "exists" and "not exists", `assert_truthy` and `assert_falsy` are good candidates. + +```lua +tested.test("truthy", function() + tested.assert_truthy({given="empty string", actual=""}) + tested.assert_truthy({given="a number", actual=0}) + tested.assert_truthy({given="a function", actual=function() end}) + tested.assert_truthy({given="a table", actual={}}) + tested.assert_truthy({given="an unpack", actual=table.unpack({"a", "b"})}) + tested.assert_truthy({given="true boolean", actual=true}) + tested.assert_truthy({given="not false", actual=not false}) + tested.assert_truthy({given="not nil", actual=not nil}) + tested.assert_truthy({given="string.find he in hello", actual=string.find("hello", "he")}) +end) + +tested.test("falsy", function() + local b + tested.assert_falsy({given="nil", actual=nil}) + tested.assert_falsy({given="false", actual=false}) + tested.assert_falsy({given="unset variable", actual=b}) +end) +``` + +### Testing exceptions +When writing assertions that check that an exception has been thrown, the `actual` should be a function taking no arguments, that when run raises an exception. `tested` also has the ability to capture an error (using `pcall` under the hood) and check if that returns as expected as well. + +```lua +-- simple check that exception will be raised +tested.test("assert_throws_exception handles exception in assert", function() + tested.assert_throws_exception({ + given = "an explicit error", + actual = function() error("gets raised, but handled!") end + }) +end) + +-- check that a specific exception was thrown +tested.test("example with exceptions and error checking", function() + + -- will throw the specific exception in "expected" below + local function_that_throws = function() + local options = {loadFromString=true, headers=false, fieldsToKeep={1, 2}} + ftcsv.parse("apple>banana>carrot\ndiamond>emerald>pearl", ">", options) + end + tested.assert_throws_exception({ + given="no headers and no renaming takes place", + expected="ftcsv: fieldsToKeep only works with header-less files when using the 'rename' functionality", + actual=function_that_throws + }) +end) +``` + +## Test Lifecycle +`tested` has support for a couple of test lifecycle methods. They allow you to register a function to run `before` any tests within the file have fun, `after` all tests have run, `before_each` test, and `after_each` test. If a test is skipped for any reason (`test.skip`, `run_when` is `false`, filtering, etc) the `before_each` and `after_each` will **not** be run. Test lifecycle hooks can be useful if you want to setup/teardown connections/services/configs, create or clean up temporary files, or even one day setup stubs and mocks! + +Here's a simple example of what can be done: + +```lua +local counts = { before = 0, after = 0, before_each = 0, after_each = 0 } + +tested.before(function() counts.before = counts.before + 1 end) +tested.after(function() counts.after = counts.after + 1 end) +tested.before_each(function() counts.before_each = counts.before_each + 1 end) +tested.after_each(function() counts.after_each = counts.after_each + 1 end) + +tested.test("before runs once before first test", function() + tested.assert({ given = "before count", should = "be 1", expected = 1, actual = counts.before }) + tested.assert({ given = "after count", should = "be 0", expected = 0, actual = counts.after }) + tested.assert({ given = "before_each count", should = "be 1", expected = 1, actual = counts.before_each }) + tested.assert({ given = "after_each count", should = "be 0", expected = 0, actual = counts.after_each }) +end) + +tested.test("after_each runs after first test, before_each runs again", function() + tested.assert({ given = "before count", should = "still be 1", expected = 1, actual = counts.before }) + tested.assert({ given = "after count", should = "still be 0", expected = 0, actual = counts.after }) + tested.assert({ given = "before_each count", should = "be 2", expected = 2, actual = counts.before_each }) + tested.assert({ given = "after_each count", should = "be 1", expected = 1, actual = counts.after_each }) +end) + +-- before_each and after_each will not run on skipped tests! +tested.test("this test is skipped", { run_when = false }, function() end) +``` + +## Invalid tests +If a test file has a test that throws an unhandled exception, `tested` finds a test without any asserts, or a test with `expected` set returns without that result, they are considered "invalid", and will display as such in the results and will be listed in the summary as "invalid".

From b74c4939e6a9f874fdfad7c444df33dd4c4b60c1 Mon Sep 17 00:00:00 2001
From: FourierTransformer 
Date: Tue, 12 May 2026 06:15:15 -0500
Subject: [PATCH 3/3] updated roadmap

---
 docs/roadmap.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/roadmap.md b/docs/roadmap.md
index 1fda0eb..8d3b998 100644
--- a/docs/roadmap.md
+++ b/docs/roadmap.md
@@ -9,7 +9,7 @@ Things that I am one day planning to add (in no particular order):
     - [x] `run_when` for conditional running tests
     - [ ] `retries` and (maybe) `retry_timeout` for automatically retrying failing tests
     - [ ] tags for filtering
-- [ ] Lifecycle management (`before`, `after`, `before_each`, `after_each`)
+- [x] Lifecycle management (`before`, `after`, `before_each`, `after_each`)
 - [ ] Table driven assertion (no more for loops around asserts!)
 - [ ] Stubbing
 - [ ] Mocking