diff --git a/pyproject.toml b/pyproject.toml index 242e22eb..81f6e0ce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "helical" -version = "1.4.12" +version = "1.4.13" authors = [ { name="Helical Team", email="support@helical-ai.com" }, ] @@ -27,7 +27,7 @@ dependencies = [ 'scikit-learn>=1.5.0', 'scipy==1.13.1', 'gitpython==3.1.43', - 'torch==2.6.0', + 'torch==2.7.0', 'accelerate==1.4.0', 'transformers==4.49.0', 'loompy==3.0.7', @@ -46,8 +46,8 @@ dependencies = [ [project.optional-dependencies] mamba-ssm = [ - 'mamba-ssm==2.2.4', - 'causal-conv1d==1.5.0.post8', + 'mamba-ssm==2.2.5', + 'causal-conv1d==1.5.1', ] evo-2 = [ diff --git a/requirements_cuda.txt b/requirements_cuda.txt new file mode 100644 index 00000000..20038bf5 --- /dev/null +++ b/requirements_cuda.txt @@ -0,0 +1,6 @@ +--extra-index-url https://pypi.nvidia.com +# rapids-singlecell[rapids12] +https://github.com/state-spaces/mamba/releases/download/v2.2.5/mamba_ssm-2.2.5+cu12torch2.7cxx11abiFALSE-cp311-cp311-linux_x86_64.whl +https://github.com/Dao-AILab/causal-conv1d/releases/download/v1.5.1/causal_conv1d-1.5.1+cu12torch2.7cxx11abiFALSE-cp311-cp311-linux_x86_64.whl +https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.7cxx11abiFALSE-cp311-cp311-linux_x86_64.whl +