diff --git a/.github/actions/setup-ci-tools/action.yml b/.github/actions/setup-ci-tools/action.yml
index b1f545a5b..a1ec988ed 100644
--- a/.github/actions/setup-ci-tools/action.yml
+++ b/.github/actions/setup-ci-tools/action.yml
@@ -88,3 +88,10 @@ runs:
- name: Ignore ci directory
shell: bash
run: echo "ci/" >> .gitignore
+
+ - name: Stub ipywidgets for headless kernel execution
+ shell: bash
+ run: |
+ mkdir -p ~/.ipython/profile_default/startup
+ cp ${{ github.action_path }}/stub_widgets.py ~/.ipython/profile_default/startup/00-stub-widgets.py
+ echo "Installed ipywidgets stub to IPython startup"
diff --git a/.github/actions/setup-ci-tools/stub_widgets.py b/.github/actions/setup-ci-tools/stub_widgets.py
new file mode 100644
index 000000000..df4b4786f
--- /dev/null
+++ b/.github/actions/setup-ci-tools/stub_widgets.py
@@ -0,0 +1,94 @@
+# Stub ipywidgets for headless/CI execution.
+# Replaces blocking widget calls with no-ops so notebooks execute without hanging.
+# In Colab/Jupyter with a real frontend, the real ipywidgets is used instead.
+#
+# Installed into ~/.ipython/profile_default/startup/ by the setup-ci-tools action
+# so it runs automatically before any notebook cell when nbconvert spawns a kernel.
+import sys
+import types
+import inspect
+
+
+class _NoOpWidget:
+ """A no-op stand-in for any ipywidgets widget class."""
+
+ children = []
+
+ def __init__(self, *args, **kwargs):
+ # Preserve value/options so _Interact can extract call defaults
+ object.__setattr__(self, "value", kwargs.get("value", None))
+ object.__setattr__(self, "options", kwargs.get("options", []))
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ pass
+
+ def __setattr__(self, name, value):
+ object.__setattr__(self, name, value)
+
+ def __getattr__(self, name):
+ # Return a no-op callable for any unknown method/attribute
+ return lambda *args, **kwargs: None
+
+
+class _Interact:
+ """Stub for widgets.interact / widgets.interactive.
+
+ Calls the wrapped function once with default values extracted from
+ widget stubs so that matplotlib outputs are captured by nbconvert.
+ """
+
+ def __call__(self, *args, **kwargs):
+ if len(args) == 1 and callable(args[0]) and not kwargs:
+ # Bare @widgets.interact — extract defaults from widget params
+ return self._call_with_defaults(args[0])
+ # @widgets.interact(param=slider) — return decorator
+ widget_kwargs = kwargs
+
+ def decorator(f):
+ return self._call_with_defaults(f, widget_kwargs)
+
+ return decorator
+
+ def _call_with_defaults(self, f, widget_kwargs=None):
+ sig = inspect.signature(f)
+ call_kwargs = {}
+ for name, param in sig.parameters.items():
+ widget = (widget_kwargs or {}).get(name)
+ if widget is None and param.default is not inspect.Parameter.empty:
+ widget = param.default
+ if isinstance(widget, _NoOpWidget) and widget.value is not None:
+ call_kwargs[name] = widget.value
+ elif widget is not None and not isinstance(widget, _NoOpWidget):
+ call_kwargs[name] = widget
+ try:
+ f(**call_kwargs)
+ except Exception as e:
+ print(f"[stub] interact call skipped: {e}")
+ return f
+
+
+class _StubModule(types.ModuleType):
+ """ipywidgets stub module.
+
+ Any attribute access returns _NoOpWidget so that
+ 'from ipywidgets import AnythingAtAll' always succeeds.
+ """
+
+ interact = _Interact()
+ interactive = _Interact()
+
+ def __getattr__(self, name):
+ if name.startswith("__"):
+ raise AttributeError(name)
+ return _NoOpWidget
+
+
+stub = _StubModule("ipywidgets")
+stub.widgets = stub # support: from ipywidgets import widgets
+sys.modules["ipywidgets"] = stub
+sys.modules["ipywidgets.widgets"] = stub
+
+print("ipywidgets stubbed for headless CI execution")
diff --git a/tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb b/tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb
index 490c9aaa0..755d4556b 100644
--- a/tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb
+++ b/tutorials/W2D4_Macrolearning/W2D4_Tutorial3.ipynb
@@ -2,13 +2,14 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "b96e93dc",
"metadata": {
"colab_type": "text",
"execution": {},
"id": "view-in-github"
},
"source": [
- "
"
+ "
"
]
},
{
@@ -77,8 +78,7 @@
},
"source": [
"---\n",
- "# Setup\n",
- "\n"
+ "# Setup"
]
},
{
@@ -160,7 +160,7 @@
"logging.getLogger('matplotlib.font_manager').disabled = True\n",
"\n",
"%matplotlib inline\n",
- "%config InlineBackend.figure_format = 'retina' # perfrom high definition rendering for images and plots\n",
+ "%config InlineBackend.figure_format = 'retina' # perform high definition rendering for images and plots\n",
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle\")"
]
},
@@ -444,7 +444,7 @@
"\n",
" def __len__(self):\n",
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
- " thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
+ " thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
"\n",
" return self.num_epochs * self.num_tasks\n",
"\n",
@@ -758,7 +758,7 @@
"\n",
" def __len__(self):\n",
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
- " thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
+ " thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
"\n",
" return self.num_epochs * self.num_tasks\n",
"\n",
@@ -903,7 +903,7 @@
"source": [
"### Coding Exercise 1 Discussion\n",
"\n",
- "1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?\n"
+ "1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?"
]
},
{
@@ -1416,7 +1416,7 @@
"execution": {}
},
"source": [
- "*Estimated timing to here from start of tutorial: 35 minutes*\n"
+ "*Estimated timing to here from start of tutorial: 35 minutes*"
]
},
{
@@ -1753,7 +1753,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.22"
+ "version": "3.10.19"
}
},
"nbformat": 4,
diff --git a/tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb b/tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb
index a2af66c57..3dc2f9b22 100644
--- a/tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb
+++ b/tutorials/W2D4_Macrolearning/instructor/W2D4_Tutorial3.ipynb
@@ -2,13 +2,14 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "d6cf2c1d",
"metadata": {
"colab_type": "text",
"execution": {},
"id": "view-in-github"
},
"source": [
- "
"
+ "
"
]
},
{
@@ -77,8 +78,7 @@
},
"source": [
"---\n",
- "# Setup\n",
- "\n"
+ "# Setup"
]
},
{
@@ -160,7 +160,7 @@
"logging.getLogger('matplotlib.font_manager').disabled = True\n",
"\n",
"%matplotlib inline\n",
- "%config InlineBackend.figure_format = 'retina' # perfrom high definition rendering for images and plots\n",
+ "%config InlineBackend.figure_format = 'retina' # perform high definition rendering for images and plots\n",
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle\")"
]
},
@@ -444,7 +444,7 @@
"\n",
" def __len__(self):\n",
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
- " thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
+ " thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
"\n",
" return self.num_epochs * self.num_tasks\n",
"\n",
@@ -758,7 +758,7 @@
"\n",
" def __len__(self):\n",
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
- " thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
+ " thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
"\n",
" return self.num_epochs * self.num_tasks\n",
"\n",
@@ -905,7 +905,7 @@
"source": [
"### Coding Exercise 1 Discussion\n",
"\n",
- "1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?\n"
+ "1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?"
]
},
{
@@ -1420,7 +1420,7 @@
"execution": {}
},
"source": [
- "*Estimated timing to here from start of tutorial: 35 minutes*\n"
+ "*Estimated timing to here from start of tutorial: 35 minutes*"
]
},
{
@@ -1759,7 +1759,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.22"
+ "version": "3.10.19"
}
},
"nbformat": 4,
diff --git a/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb b/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb
index f0a79d6ff..0a0aca320 100644
--- a/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb
+++ b/tutorials/W2D4_Macrolearning/student/W2D4_Tutorial3.ipynb
@@ -2,13 +2,14 @@
"cells": [
{
"cell_type": "markdown",
+ "id": "252fd2fe",
"metadata": {
"colab_type": "text",
"execution": {},
"id": "view-in-github"
},
"source": [
- "
"
+ "
"
]
},
{
@@ -77,8 +78,7 @@
},
"source": [
"---\n",
- "# Setup\n",
- "\n"
+ "# Setup"
]
},
{
@@ -160,7 +160,7 @@
"logging.getLogger('matplotlib.font_manager').disabled = True\n",
"\n",
"%matplotlib inline\n",
- "%config InlineBackend.figure_format = 'retina' # perfrom high definition rendering for images and plots\n",
+ "%config InlineBackend.figure_format = 'retina' # perform high definition rendering for images and plots\n",
"plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/main/nma.mplstyle\")"
]
},
@@ -444,7 +444,7 @@
"\n",
" def __len__(self):\n",
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
- " thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
+ " thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
"\n",
" return self.num_epochs * self.num_tasks\n",
"\n",
@@ -758,7 +758,7 @@
"\n",
" def __len__(self):\n",
" \"\"\"Calculate the length of the dataset. It is obligatory for PyTorch to know in advance how many samples to expect (before training),\n",
- " thus we enforced to icnlude number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
+ " thus we enforced to include number of epochs and tasks per epoch in `FruitSupplyDataset` parameters.\"\"\"\n",
"\n",
" return self.num_epochs * self.num_tasks\n",
"\n",
@@ -801,8 +801,7 @@
"execution": {}
},
"source": [
- "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_08b01bcf.py)\n",
- "\n"
+ "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_08b01bcf.py)"
]
},
{
@@ -842,7 +841,7 @@
"source": [
"### Coding Exercise 1 Discussion\n",
"\n",
- "1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?\n"
+ "1. Do you think these particular tasks are similar? Do you expect the model to learn their general nature?"
]
},
{
@@ -852,8 +851,7 @@
"execution": {}
},
"source": [
- "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_576c8d87.py)\n",
- "\n"
+ "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_576c8d87.py)"
]
},
{
@@ -1015,8 +1013,7 @@
"execution": {}
},
"source": [
- "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_593cdcd4.py)\n",
- "\n"
+ "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_593cdcd4.py)"
]
},
{
@@ -1277,7 +1274,7 @@
"execution": {}
},
"source": [
- "*Estimated timing to here from start of tutorial: 35 minutes*\n"
+ "*Estimated timing to here from start of tutorial: 35 minutes*"
]
},
{
@@ -1344,8 +1341,7 @@
"execution": {}
},
"source": [
- "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_2753b5eb.py)\n",
- "\n"
+ "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W2D4_Macrolearning/solutions/W2D4_Tutorial3_Solution_2753b5eb.py)"
]
},
{
@@ -1589,7 +1585,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.22"
+ "version": "3.10.19"
}
},
"nbformat": 4,