diff --git a/CHANGELOG.md b/CHANGELOG.md index 2543049..406c338 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,5 @@ # Changelog -## v0.2.1 +## v0.2.2 -- Fixed a bug where `SequentialBuilder`could not do full batch training. -- Added Hinge and Huber loss. +- Crash fix for Huber and Hinge loss. diff --git a/pyproject.toml b/pyproject.toml index 9dbe76d..29762fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "phitodeep" -version = "0.2.1" +version = "0.2.2" authors = [ { name = "Ralph Dugue", email = "ralph@phito.dev" } ] diff --git a/src/phitodeep/loss.py b/src/phitodeep/loss.py index 5c76d84..7394b87 100644 --- a/src/phitodeep/loss.py +++ b/src/phitodeep/loss.py @@ -60,7 +60,7 @@ def __init__(self) -> None: super().__init__("Hinge") def loss_func(self, y_pred, y_true): - return np.maximum(0, 1 - y_pred * y_true) + return np.mean(np.maximum(0, 1 - y_pred * y_true)) def loss_gradient(self, y_pred, y_true): yz = y_pred * y_true @@ -75,7 +75,7 @@ def loss_func(self, y_pred, y_true): error = y_true - y_pred L1 = error ** 2 / 2 L2 = self.delta * (np.abs(error) - (self.delta / 2)) - return np.where(np.abs(error) > self.delta, L1, L2) + return np.mean(np.where(np.abs(error) > self.delta, L1, L2)) def loss_gradient(self, y_pred, y_true): error = y_pred - y_true