Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: CI

on:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
ci:
runs-on: ubuntu-latest
strategy:
matrix:
node-version: [18.x, 20.x]
steps:
- uses: actions/checkout@v4

- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: npm

- run: npm ci

- run: npm run lint

- run: npm test

- run: npm run build
136 changes: 102 additions & 34 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,55 +1,123 @@
# OpenGradient TypeScript SDK

A TypeScript/JavaScript SDK for performing on-chain inference using the OpenGradient network. Run machine learning models and LLMs directly on the blockchain with robust transaction handling and retry mechanisms.
A TypeScript/JavaScript SDK for performing LLM chat and completion via OpenGradient's TEE (Trusted Execution Environment) with [x402](https://x402.org) payment protocol support.

## Installation

```bash
npm install opengradient-sdk
```

## Requirements

- Node.js 18+ (for global `fetch`)
- A funded EVM wallet on Base (settlement happens in OPG on the Base network via [x402](https://x402.org))

## Quick Start

```typescript
import { Client, InferenceMode, LLMInferenceMode } from 'opengradient-sdk';
import { Client, TEE_LLM } from "opengradient-sdk";

// Initialize the client
const client = new Client({
privateKey: 'your-private-key'
privateKey: process.env.PRIVATE_KEY!, // EVM private key (with or without 0x prefix)
});

// Non-streaming chat
const result = await client.llm.chat({
model: TEE_LLM.CLAUDE_3_5_HAIKU,
messages: [{ role: "user", content: "Hello!" }],
maxTokens: 100,
});
console.log(result.chatOutput?.content);
console.log("payment hash:", result.paymentHash);
```

### Streaming chat

```typescript
import { Client, TEE_LLM } from "opengradient-sdk";

const client = new Client({ privateKey: process.env.PRIVATE_KEY! });

const stream = client.llm.chat({
model: TEE_LLM.CLAUDE_3_5_HAIKU,
messages: [{ role: "user", content: "Stream me a haiku." }],
stream: true,
});

for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta.content ?? "");
}
```

### Tool / function calling

```typescript
const result = await client.llm.chat({
model: TEE_LLM.GPT_4O,
messages: [{ role: "user", content: "What's the weather in Paris?" }],
tools: [
{
type: "function",
function: {
name: "get_weather",
description: "Get current weather for a city",
parameters: {
type: "object",
properties: { city: { type: "string" } },
required: ["city"],
},
},
},
],
});
console.log(result.chatOutput?.tool_calls);
```

### Completion

```typescript
const result = await client.llm.completion({
model: TEE_LLM.CLAUDE_3_5_HAIKU,
prompt: "The capital of France is",
maxTokens: 20,
});
console.log(result.completionOutput);
```

## x402 Settlement Modes

```typescript
import { X402SettlementMode } from "opengradient-sdk";

await client.llm.chat({
model: TEE_LLM.GPT_4O,
messages: [{ role: "user", content: "Hi" }],
x402SettlementMode: X402SettlementMode.SETTLE_BATCH, // default
});
```

- `SETTLE` — records input/output hashes only (most privacy-preserving).
- `SETTLE_METADATA` — records full model info, complete input/output, and metadata.
- `SETTLE_BATCH` — aggregates multiple inferences into a single on-chain settlement (most cost-efficient, default).

// Run LLM chat inference
const [txHash, finishReason, response] = await client.llmChat(
'Qwen/Qwen2.5-72B-Instruct',
LLMInferenceMode.VANILLA,
[{ role: 'user', content: 'Hello!' }],
100 // max tokens
);

// Run general model inference
const modelInput = {
num_input1: [1.0, 2.0, 3.0],
num_input2: 10,
str_input1: ["hello", "ONNXY"],
str_input2: " world"
};

const [txHash, output] = await client.infer(
"QmbUqS93oc4JTLMHwpVxsE39mhNxy6hpf6Py3r9oANr8aZ",
InferenceMode.VANILLA,
modelInput
);
## Development

```bash
npm install # install deps
npm run lint # ESLint over src/
npm test # Jest unit tests
npm run build # tsc → dist/
npm run format # prettier --write
```

## Features
CI runs `lint`, `test`, and `build` on Node 18 and 20 — see `.github/workflows/ci.yml`.

- On-chain ML model inference
- LLM completion and chat interfaces
- Support for vanilla, ZKML and TEE (Trusted Execution Environment) inference modes
- Automatic transaction retry with configurable parameters
- Built-in gas estimation and management
- Tool calling support for LLM chat
## Available models

## Contributing
See `TEE_LLM` for the supported models, including:

We welcome contributions! Please check our contribution guidelines for more details.
- `TEE_LLM.GPT_4O`, `TEE_LLM.GPT_4_1_2025_04_14`, `TEE_LLM.O4_MINI`
- `TEE_LLM.CLAUDE_3_5_HAIKU`, `TEE_LLM.CLAUDE_3_7_SONNET`, `TEE_LLM.CLAUDE_4_0_SONNET`
- `TEE_LLM.GEMINI_2_0_FLASH`, `TEE_LLM.GEMINI_2_5_FLASH`, `TEE_LLM.GEMINI_2_5_FLASH_LITE`, `TEE_LLM.GEMINI_2_5_PRO`
- `TEE_LLM.GROK_2_1212`, `TEE_LLM.GROK_2_VISION_LATEST`, `TEE_LLM.GROK_3_BETA`, `TEE_LLM.GROK_3_MINI_BETA`, `TEE_LLM.GROK_4_1_FAST`, `TEE_LLM.GROK_4_1_FAST_NON_REASONING`
29 changes: 29 additions & 0 deletions eslint.config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
const tseslint = require("@typescript-eslint/eslint-plugin");
const tsparser = require("@typescript-eslint/parser");

module.exports = [
{
ignores: ["dist/**", "node_modules/**", "examples/**"],
},
{
files: ["src/**/*.ts"],
languageOptions: {
parser: tsparser,
parserOptions: {
ecmaVersion: 2020,
sourceType: "module",
},
},
plugins: {
"@typescript-eslint": tseslint,
},
rules: {
...tseslint.configs.recommended.rules,
"@typescript-eslint/no-unused-vars": [
"error",
{ argsIgnorePattern: "^_", varsIgnorePattern: "^_" },
],
"@typescript-eslint/no-explicit-any": "off",
},
},
];
35 changes: 35 additions & 0 deletions examples/llm_chat.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// Run a non-streaming chat completion against a TEE-hosted LLM through
// OpenGradient with x402 payments.
//
// Run with: OG_PRIVATE_KEY=0x... npx ts-node examples/llm_chat.ts

import { Client, TEE_LLM, X402SettlementMode } from "../src";

async function main() {
const privateKey = process.env.OG_PRIVATE_KEY;
if (!privateKey) {
throw new Error("OG_PRIVATE_KEY environment variable is not set");
}

const client = new Client({ privateKey });

const messages = [
{ role: "user", content: "What is Python?" },
{ role: "assistant", content: "Python is a high-level programming language." },
{ role: "user", content: "What makes it good for beginners?" },
];

const result = await client.llm.chat({
model: TEE_LLM.GPT_4_1_2025_04_14,
messages,
x402SettlementMode: X402SettlementMode.SETTLE_METADATA,
});

console.log(`Response: ${result.chatOutput?.content}`);
console.log(`Payment hash: ${result.paymentHash}`);
}

main().catch((err) => {
console.error(err);
process.exit(1);
});
38 changes: 38 additions & 0 deletions examples/llm_chat_stream.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
// Stream a chat completion from a TEE-hosted LLM through OpenGradient
// with x402 payments.
//
// Run with: OG_PRIVATE_KEY=0x... npx ts-node examples/llm_chat_stream.ts

import { Client, TEE_LLM, X402SettlementMode } from "../src";

async function main() {
const privateKey = process.env.OG_PRIVATE_KEY;
if (!privateKey) {
throw new Error("OG_PRIVATE_KEY environment variable is not set");
}

const client = new Client({ privateKey });

const messages = [
{ role: "user", content: "Describe to me the 7 network layers?" },
];

const stream = client.llm.chat({
model: TEE_LLM.GPT_4_1_2025_04_14,
messages,
x402SettlementMode: X402SettlementMode.SETTLE_METADATA,
stream: true,
maxTokens: 1000,
});

for await (const chunk of stream) {
const content = chunk.choices[0]?.delta.content;
if (content) process.stdout.write(content);
}
process.stdout.write("\n");
}

main().catch((err) => {
console.error(err);
process.exit(1);
});
Loading
Loading