Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ Market Data -> Composite Fair Value -> Dynamic Spread -> Inventory Skew -> Multi
| Google Gemini | `gemini-2.0-flash` (default), `gemini-2.5-pro` | `GEMINI_API_KEY` |
| Anthropic Claude | `claude-haiku-4-5-20251001`, `claude-sonnet-4-20250514` | `ANTHROPIC_API_KEY` |
| OpenAI | `gpt-4o`, `gpt-4o-mini`, `o3-mini` | `OPENAI_API_KEY` |
| Venice | `claude-opus-4-6`, `kimi-k2-5`, `openai-gpt-54-pro`, `zai-org-glm-5` | `VENICE_API_KEY` |
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Issue (Medium): Verify model names

openai-gpt-54-pro and zai-org-glm-5 don't appear to be real model identifiers. Are these Venice-specific proxy names? If so, please link to Venice's model catalog so we can verify. We don't want to advertise model names that don't resolve.

Also worth clarifying that claude-opus-4-6 here is Venice-proxied, not direct Anthropic API access.


---

Expand Down Expand Up @@ -524,7 +525,7 @@ One-click deploy of a full OpenClaw agent that uses our CLI as the tool backend.
| Variable | Required | Default | Description |
|----------|----------|---------|-------------|
| `HL_PRIVATE_KEY` | Yes | — | Your Hyperliquid private key |
| `AI_PROVIDER` | Yes | — | `anthropic`, `openai`, `gemini`, or `openrouter` |
| `AI_PROVIDER` | Yes | — | `anthropic`, `openai`, `gemini`, `openrouter`, or `venice` |
| `AI_API_KEY` | Yes | — | API key for the chosen AI provider |
| `TELEGRAM_BOT_TOKEN` | Yes | — | Telegram bot token (from @BotFather) |
| `TELEGRAM_USERNAME` | Yes | — | Your Telegram @username |
Expand Down Expand Up @@ -651,6 +652,7 @@ hl run my_strategies.my_strategy:MyStrategy -i ETH-PERP --tick 10
| `ANTHROPIC_API_KEY` | No | For `claude_agent` with Claude |
| `GEMINI_API_KEY` | No | For `claude_agent` with Gemini |
| `OPENAI_API_KEY` | No | For `claude_agent` with OpenAI |
| `VENICE_API_KEY` | No | For `claude_agent` with Venice |

\* Either `HL_PRIVATE_KEY` or a keystore with `HL_KEYSTORE_PASSWORD` is required.

Expand Down
5 changes: 4 additions & 1 deletion deploy/openclaw-railway/.env.example
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
# Required
HL_PRIVATE_KEY=0x... # Hyperliquid private key
AI_PROVIDER=anthropic # anthropic, openai, gemini, openrouter
AI_PROVIDER=anthropic # anthropic, openai, gemini, openrouter, venice
AI_API_KEY=sk-ant-... # API key for chosen provider
TELEGRAM_BOT_TOKEN=123456789:AA... # From @BotFather
TELEGRAM_USERNAME=your_username # Your Telegram @username

# Optional
HL_TESTNET=true # true (default) or false for mainnet
SETUP_PASSWORD= # Password for control UI (recommended)

# Venice-specific (for claude_agent strategy)
# VENICE_API_KEY=... # Venice API key (if using venice: models directly)
3 changes: 3 additions & 0 deletions deploy/openclaw-railway/src/bootstrap.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ const PROVIDER_MAP = {
gemini: { key: "gemini-api-key", provider: "google" },
google: { key: "gemini-api-key", provider: "google" },
openrouter: { key: "apiKey", provider: "openrouter" },
venice: { key: "openai-api-key", provider: "openai", baseUrl: "https://api.venice.ai/api/v1" },
};

export async function bootstrap() {
Expand Down Expand Up @@ -88,6 +89,8 @@ function buildConfig() {
// AI provider
provider: providerInfo.provider,
[providerInfo.key]: aiKey,
// Venice uses OpenAI-compatible API with custom base URL
...(providerInfo.baseUrl && { "openai-base-url": providerInfo.baseUrl }),

// MCP servers — our trading CLI is the primary tool provider
mcpServers: {
Expand Down
88 changes: 66 additions & 22 deletions strategies/claude_agent.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,26 @@
"""LLM-powered trading agent — supports Claude and Gemini.
"""LLM-powered trading agent — supports Gemini, Claude, OpenAI, and Venice.

Uses structured tool/function calling to make trading decisions each tick.
The LLM receives market data, position state, and risk context, then decides
to place orders or hold.

Usage:
# Gemini (default — fast, free tier available)
hl run claude_agent --mock --max-ticks 5 --tick 15
hl run claude_agent -i ETH-PERP --tick 15
# Gemini (default)
export GEMINI_API_KEY=...
hl run claude_agent -i ETH-PERP --tick 15 --model gemini-2.0-flash

# Claude
hl run claude_agent -i ETH-PERP --tick 15 --model claude-haiku-4-5-20251001
export ANTHROPIC_API_KEY=...
hl run claude_agent -i ETH-PERP --tick 15 --model claude-sonnet-4-20250514

# Gemini Flash
hl run claude_agent -i ETH-PERP --tick 15 --model gemini-2.0-flash
# OpenAI
export OPENAI_API_KEY=...
hl run claude_agent -i ETH-PERP --tick 15 --model gpt-4o

# Venice
export VENICE_API_KEY=...
hl run claude_agent -i ETH-PERP --tick 15 --model claude-opus-4-6
hl run claude_agent -i ETH-PERP --tick 15 --model kimi-k2-5
"""
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: The Venice examples should use an explicit prefix (e.g. venice:claude-opus-4-6) to match the detection fix above. Bare claude-opus-4-6 would route to Anthropic without the prefix.

from __future__ import annotations

Expand Down Expand Up @@ -102,15 +109,24 @@


def _detect_provider(model: str) -> str:
"""Detect LLM provider from model name."""
"""Detect LLM provider from model name.

Venice takes priority if VENICE_API_KEY is set — it's OpenAI-compatible
and routes any model name to the appropriate backend.
"""
# Venice: if API key is set, use it (handles all models)
if os.environ.get("VENICE_API_KEY"):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Issue (High): Venice hijacks all providers when VENICE_API_KEY is set

This makes Venice take priority over every other provider regardless of the user's intent. If someone has both ANTHROPIC_API_KEY and VENICE_API_KEY set (common in dev environments), running --model claude-sonnet-4-20250514 silently routes to Venice instead of Anthropic directly.

Venice should only activate when explicitly chosen — not by env var sniffing.

Suggested fix:

def _detect_provider(model: str) -> str:
    if model.startswith("venice:"):
        return "venice"
    if model.startswith("gemini"):
        return "gemini"
    if model.startswith("claude"):
        return "claude"
    if model.startswith("gpt") or model.startswith("o1") or model.startswith("o3") or model.startswith("o4"):
        return "openai"
    # Venice as fallback for unknown models when key is available
    if os.environ.get("VENICE_API_KEY"):
        return "venice"
    return "gemini"

return "venice"

# Direct provider access (requires their own API keys)
if model.startswith("gemini"):
return "gemini"
if model.startswith("claude"):
return "claude"
if model.startswith("gpt") or model.startswith("o1") or model.startswith("o3") or model.startswith("o4"):
return "openai"
# Default to gemini
return "gemini"

return "gemini" # default


# ---------------------------------------------------------------------------
Expand Down Expand Up @@ -148,10 +164,10 @@ def __init__(
self._total_output_tokens = 0
self._api_calls = 0

# Lazy-init clients
# Lazy-init clients (keyed by base_url for OpenAI-compatible APIs)
self._anthropic_client = None
self._gemini_client = None
self._openai_client = None
self._openai_clients = {} # base_url -> client

# ------------------------------------------------------------------
# Client initialization
Expand Down Expand Up @@ -187,19 +203,36 @@ def _get_gemini_client(self):
self._gemini_client = genai.Client(api_key=api_key)
return self._gemini_client

def _get_openai_client(self):
if self._openai_client is None:
def _get_openai_compatible_client(self, base_url: str = None, api_key_env: str = "OPENAI_API_KEY"):
"""Get OpenAI-compatible client. Caches by base_url."""
cache_key = base_url or "openai"
if cache_key not in self._openai_clients:
try:
import openai
except ImportError:
raise ImportError(
"openai package required. Install: pip3 install openai"
)
api_key = os.environ.get("OPENAI_API_KEY")
api_key = os.environ.get(api_key_env)
if not api_key:
raise ValueError("OPENAI_API_KEY environment variable required")
self._openai_client = openai.OpenAI(api_key=api_key)
return self._openai_client
raise ValueError(f"{api_key_env} environment variable required")
self._openai_clients[cache_key] = openai.OpenAI(
api_key=api_key,
base_url=base_url
)
return self._openai_clients[cache_key]

def _get_venice_client(self):
"""Get Venice API client (OpenAI-compatible)."""
return self._get_openai_compatible_client(
base_url="https://api.venice.ai/api/v1",
api_key_env="VENICE_API_KEY"
)

def _get_openai_client(self):
"""Get OpenAI client (supports custom base URL via OPENAI_BASE_URL)."""
base_url = os.environ.get("OPENAI_BASE_URL")
return self._get_openai_compatible_client(base_url=base_url, api_key_env="OPENAI_API_KEY")

# ------------------------------------------------------------------
# Build prompt
Expand Down Expand Up @@ -398,10 +431,13 @@ def _build_openai_tools(self) -> List[Dict]:
for t in TOOLS
]

def _call_openai(self, user_msg: str, snapshot: MarketSnapshot) -> List[StrategyDecision]:
def _call_openai_compatible(
self, user_msg: str, snapshot: MarketSnapshot, provider: str = "OpenAI"
) -> List[StrategyDecision]:
"""Unified handler for OpenAI-compatible APIs (OpenAI, Venice, etc.)."""
import json as _json

client = self._get_openai_client()
client = self._get_venice_client() if provider == "Venice" else self._get_openai_client()
t0 = time.time()

response = client.chat.completions.create(
Expand All @@ -422,8 +458,8 @@ def _call_openai(self, user_msg: str, snapshot: MarketSnapshot) -> List[Strategy
self._total_input_tokens += usage.prompt_tokens or 0
self._total_output_tokens += usage.completion_tokens or 0
log.info(
"OpenAI: %dms, %d/%d tokens (total: %d calls, %d/%d tokens)",
elapsed_ms, usage.prompt_tokens or 0, usage.completion_tokens or 0,
"%s: %dms, %d/%d tokens (total: %d calls, %d/%d tokens)",
provider, elapsed_ms, usage.prompt_tokens or 0, usage.completion_tokens or 0,
self._api_calls, self._total_input_tokens, self._total_output_tokens,
)

Expand All @@ -435,6 +471,12 @@ def _call_openai(self, user_msg: str, snapshot: MarketSnapshot) -> List[Strategy
decisions.extend(self._parse_tool_call(tc.function.name, args, snapshot))
return decisions

def _call_openai(self, user_msg: str, snapshot: MarketSnapshot) -> List[StrategyDecision]:
return self._call_openai_compatible(user_msg, snapshot, provider="OpenAI")

def _call_venice(self, user_msg: str, snapshot: MarketSnapshot) -> List[StrategyDecision]:
return self._call_openai_compatible(user_msg, snapshot, provider="Venice")

# ------------------------------------------------------------------
# Shared tool call parsing
# ------------------------------------------------------------------
Expand Down Expand Up @@ -506,6 +548,8 @@ def on_tick(
decisions = self._call_claude(user_msg, snapshot)
elif provider == "openai":
decisions = self._call_openai(user_msg, snapshot)
elif provider == "venice":
decisions = self._call_venice(user_msg, snapshot)
else:
decisions = self._call_gemini(user_msg, snapshot)

Expand Down