diff --git a/.readthedocs.yml b/.readthedocs.yml index e0e218b..ecbc107 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -10,13 +10,11 @@ build: os: ubuntu-24.04 tools: python: "3.12" - jobs: - post_create_environment: - # Install UV - # https://docs.astral.sh/uv/getting-started/installation/#__tabbed_1_1 - - curl -LsSf https://astral.sh/uv/install.sh | sh - post_install: - - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH uv sync + +python: + install: + - method: uv + command: sync # Build documentation in the "docs/" directory with Sphinx sphinx: diff --git a/CHANGELOG.md b/CHANGELOG.md index 3503c80..2543049 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,6 @@ # Changelog -## v0.2.0 - -- Optimization module has been refactored. This is a breaking change. -- Added initialization base class, He initialization, and Xavier initialization. -- Added "full-batch" training by setting `batch_size` model parameter to `None`. -- Documentation updated to include overview. +## v0.2.1 +- Fixed a bug where `SequentialBuilder`could not do full batch training. +- Added Hinge and Huber loss. diff --git a/pyproject.toml b/pyproject.toml index d54fd51..9dbe76d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "phitodeep" -version = "0.2.0" +version = "0.2.1" authors = [ { name = "Ralph Dugue", email = "ralph@phito.dev" } ] diff --git a/src/phitodeep/loss.py b/src/phitodeep/loss.py index 5a5aec3..5c76d84 100644 --- a/src/phitodeep/loss.py +++ b/src/phitodeep/loss.py @@ -54,3 +54,29 @@ def loss_func(self, y_pred, y_true): def loss_gradient(self, y_pred, y_true): return (y_pred - y_true) / (y_pred * (1 - y_pred) + 1e-8) + +class Hinge(LossBase): + def __init__(self) -> None: + super().__init__("Hinge") + + def loss_func(self, y_pred, y_true): + return np.maximum(0, 1 - y_pred * y_true) + + def loss_gradient(self, y_pred, y_true): + yz = y_pred * y_true + return np.where(yz < 1, -y_true, 0) + +class Huber(LossBase): + def __init__(self, delta=1.0): + super().__init__("Huber") + self.delta = delta + + def loss_func(self, y_pred, y_true): + error = y_true - y_pred + L1 = error ** 2 / 2 + L2 = self.delta * (np.abs(error) - (self.delta / 2)) + return np.where(np.abs(error) > self.delta, L1, L2) + + def loss_gradient(self, y_pred, y_true): + error = y_pred - y_true + return np.where(np.abs(error) > self.delta, self.delta * np.sign(error), error) diff --git a/src/phitodeep/model.py b/src/phitodeep/model.py index 5fa1033..7388e5e 100644 --- a/src/phitodeep/model.py +++ b/src/phitodeep/model.py @@ -160,7 +160,7 @@ def __init__(self): self.layers = [] self.alpha_value = 1 self.optimizer_type = o.Adam() - self.batch_size = 1 + self.batch_size = None self.epochs_value = 1000 self.loss_class = ls.MeanSquaredError()