Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,9 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/

# WandB
wandb

.DS_Store
./src/.DS_Store

Expand Down
1 change: 1 addition & 0 deletions changelog/815.added.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add modular experiment logging for finetuning with `experiment_logger` parameter, including `WandbLogger` for W&B tracking and a `FinetuningLogger` protocol for custom integrations.
1 change: 1 addition & 0 deletions changelog/862.added.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add three-tier authentication flow: browser-based login for graphical environments, headless interactive login with clipboard copy for SSH/cluster sessions, and clear step-by-step instructions for fully non-interactive environments.
1 change: 1 addition & 0 deletions changelog/864.added.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add telemetry funnel for the license acceptance flow to track user success rates and churn across graphical, headless, and non-interactive environments.
3 changes: 3 additions & 0 deletions examples/finetune_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,9 @@ def main() -> None:
print("--- 2. Initializing and Fitting Model ---\n")

# Instantiate the wrapper with your desired hyperparameters
# To enable WandB logging, pass an experiment_logger:
# . from tabpfn.finetuning.logging import WandbLogger
# experiment_logger=WandbLogger(project="my-project", run_name="my-run", entity="my-entity")
finetuned_clf = FinetunedTabPFNClassifier(
device="cuda",
epochs=NUM_EPOCHS,
Expand Down
3 changes: 3 additions & 0 deletions examples/finetune_regressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ def main() -> None:
print("--- 2. Initializing and Fitting Model ---\n")

# Instantiate the wrapper with your desired hyperparameters
# To enable WandB logging, pass an experiment_logger:
# . from tabpfn.finetuning.logging import WandbLogger
# experiment_logger=WandbLogger(project="my-project", run_name="my-run", entity="my-entity")
finetuned_reg = FinetunedTabPFNRegressor(
device="cuda",
epochs=NUM_EPOCHS,
Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ classifiers = [
]
license = { file = "LICENSE" }

[project.optional-dependencies]
wandb = ["wandb>=0.25.1"]

[project.urls]
documentation = "https://priorlabs.ai/docs"
source = "https://github.com/priorlabs/tabpfn"
Expand Down
54 changes: 54 additions & 0 deletions src/tabpfn/auth_token.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""Token cache I/O for TabPFN authentication.

Pure I/O helpers with no dependencies on other TabPFN modules, so they
can be imported from both ``browser_auth`` and ``telemetry`` without
creating a circular import.
"""

from __future__ import annotations

import logging
import os
from pathlib import Path

logger = logging.getLogger(__name__)

_CACHE_DIR = Path.home() / ".cache" / "tabpfn"
_TOKEN_FILE = _CACHE_DIR / "auth_token"

# tabpfn-client stores its token here — we read it as a fallback.
_CLIENT_TOKEN_FILE = Path.home() / ".tabpfn" / "token"


def get_cached_token() -> str | None:
"""Return a cached token.

Checks (in priority order):

1. ``TABPFN_TOKEN`` environment variable
2. ``~/.cache/tabpfn/auth_token``
3. ``~/.tabpfn/token`` (tabpfn-client's cache)
"""
env_token = os.environ.get("TABPFN_TOKEN")
if env_token:
return env_token.strip() or None

for path in (_TOKEN_FILE, _CLIENT_TOKEN_FILE):
if path.is_file():
token = path.read_text().strip()
if len(token) > 0:
return token

return None


def save_token(token: str) -> None:
"""Persist *token* to ``~/.cache/tabpfn/auth_token``."""
_CACHE_DIR.mkdir(parents=True, exist_ok=True)
_TOKEN_FILE.write_text(token)
logger.debug("Token saved to %s", _TOKEN_FILE)


def delete_cached_token() -> None:
"""Remove the cached token file (if it exists)."""
_TOKEN_FILE.unlink(missing_ok=True)
Loading
Loading