Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
venv
33 changes: 33 additions & 0 deletions Copy_of_Untitled7.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,39 @@
"metadata": {}
}
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# --- UNIT TESTS ---\n",
"import numpy as np\n",
"\n",
"def test_coherence_index():\n",
" # Test with r=0, should be 1.0\n",
" assert np.isclose(coherence_index(0, 1.5), 1.0), \"Failed on r=0\"\n",
" \n",
" # Test with typical values\n",
" r = 10\n",
" rho_info = 1.0\n",
" expected = np.exp(-10 / (10 * 1.0))\n",
" assert np.isclose(coherence_index(r, rho_info), expected), \"Failed on typical values\"\n",
" \n",
" # Test with array input\n",
" r_arr = np.array([0, 10, 20])\n",
" expected_arr = np.exp(-r_arr / (10 * 1.0))\n",
" np.testing.assert_allclose(coherence_index(r_arr, 1.0), expected_arr, err_msg=\"Failed on array input\")\n",
Comment on lines +239 to +248
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

While these tests are a great start, they can be made more robust and maintainable. Currently, the expected values are calculated by re-implementing the logic from the coherence_index function. This makes the tests brittle; if the implementation of coherence_index changes (e.g., the constant 10 is updated), these tests might still pass if the same logic error is made in both places, or they will require updating the same logic in two places.

A better practice is to test against pre-calculated, known-good values. This verifies that the function produces the correct output for a given input, making the test's intent clearer and more resilient to implementation changes.

    # Test with typical values
    r = 10
    rho_info = 1.0
    expected = 0.36787944  # np.exp(-1)
    assert np.isclose(coherence_index(r, rho_info), expected), "Failed on typical values"
    
    # Test with array input
    r_arr = np.array([0, 10, 20])
    expected_arr = np.array([1.0, 0.36787944, 0.13533528])  # [np.exp(0), np.exp(-1), np.exp(-2)]
    np.testing.assert_allclose(coherence_index(r_arr, 1.0), expected_arr, err_msg="Failed on array input")

" \n",
" # Test error condition (e.g. division by zero if rho_info is 0)\n",
" # Although the function doesn't explicitly handle it, it will return inf or raise warning. \n",
" # We focus on the functional math values.\n",
Comment on lines +250 to +252
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The comment here correctly identifies the division-by-zero edge case when rho_info is 0. While you've noted you're focusing on functional values, a comprehensive test suite should cover such predictable edge cases to prevent future regressions and clarify the function's behavior under these conditions.

I suggest replacing these comments with actual test assertions for rho_info = 0. For r > 0, the function should return 0.0 (and raise a RuntimeWarning). For r = 0, it should return NaN.

    # Test error condition (e.g. division by zero if rho_info is 0)
    # This should produce a RuntimeWarning and result in 0.0 for r > 0.
    assert np.isclose(coherence_index(10, 0), 0.0), "Failed on rho_info=0"

    # Test with r=0 and rho_info=0, which should result in NaN.
    assert np.isnan(coherence_index(0, 0)), "Failed on r=0, rho_info=0"

" print(\"All tests for coherence_index passed!\")\n",
"\n",
"if __name__ == \"__main__\":\n",
" test_coherence_index()\n"
]
}
]
}