From fa223a9f40b70bde8b8397284f89cb2e2c784a5b Mon Sep 17 00:00:00 2001 From: jrosenfeld13 Date: Sat, 24 Jan 2026 21:30:57 +0000 Subject: [PATCH] fix: use numpy reshape instead of keras.ops.reshape in SequenceEstimator Fixes #5 - torch backend LSTMRegressor fit fails with CUDA tensors. The issue was that `ops.reshape()` converts numpy arrays to backend tensors. With torch backend + CUDA GPU, this creates CUDA tensors that `numpy.asarray()` cannot convert back, causing the error: "can't convert cuda:0 device type tensor to numpy" The fix uses numpy's native `reshape()` method instead, keeping data as numpy arrays until `model.fit()` where Keras handles the conversion internally. This matches the pattern used by other estimators (dense.py, autoencoder.py, tree.py) which don't use keras.ops for preprocessing. --- .../model_estimators/keras_estimators/sequence.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/centimators/model_estimators/keras_estimators/sequence.py b/src/centimators/model_estimators/keras_estimators/sequence.py index 9cd3540..19a832b 100644 --- a/src/centimators/model_estimators/keras_estimators/sequence.py +++ b/src/centimators/model_estimators/keras_estimators/sequence.py @@ -10,7 +10,7 @@ import numpy from .base import BaseKerasEstimator, _ensure_numpy -from keras import ops, layers, models +from keras import layers, models @dataclass(kw_only=True) @@ -25,15 +25,14 @@ def __post_init__(self): def _reshape(self, X: IntoFrame, validation_data: tuple[Any, Any] | None = None): X = _ensure_numpy(X) - X_reshaped = ops.reshape( - X, (X.shape[0], self.seq_length, self.n_features_per_timestep) + X_reshaped = X.reshape( + (X.shape[0], self.seq_length, self.n_features_per_timestep) ) if validation_data: X_val, y_val = validation_data X_val = _ensure_numpy(X_val) - X_val_reshaped = ops.reshape( - X_val, + X_val_reshaped = X_val.reshape( (X_val.shape[0], self.seq_length, self.n_features_per_timestep), ) validation_data = X_val_reshaped, _ensure_numpy(y_val)