diff --git a/examples/nextjs-realtime/README.md b/examples/nextjs-realtime/README.md
index 34c28119..c3a2d718 100644
--- a/examples/nextjs-realtime/README.md
+++ b/examples/nextjs-realtime/README.md
@@ -47,4 +47,4 @@ This example uses `lucy-restyle-2` for style transformation. You can also use:
- `lucy-restyle` - MirageLSD video restyling model (older)
- `lucy` - Lucy for video editing (add objects, change elements)
-- `lucy-2` - Lucy 2 for video editing with reference image support (better quality)
+- `lucy-2.1` - Lucy 2.1 for video editing with reference image support
diff --git a/examples/react-vite/README.md b/examples/react-vite/README.md
index a5c438fb..40e0224c 100644
--- a/examples/react-vite/README.md
+++ b/examples/react-vite/README.md
@@ -45,4 +45,4 @@ This example uses `lucy-restyle-2` for style transformation. You can also use:
- `lucy-restyle` - MirageLSD video restyling model (older)
- `lucy` - Lucy for video editing (add objects, change elements)
-- `lucy-2` - Lucy 2 for video editing with reference image support (better quality)
+- `lucy-2.1` - Lucy 2.1 for video editing with reference image support
diff --git a/examples/sdk-core/README.md b/examples/sdk-core/README.md
index 6d749b8b..30a3664a 100644
--- a/examples/sdk-core/README.md
+++ b/examples/sdk-core/README.md
@@ -38,7 +38,7 @@ Image models use the synchronous Process API - they return immediately with a Bl
Video models use the asynchronous Queue API - jobs are submitted and polled for completion.
- `video/video-to-video.ts` - Transform existing video with a prompt (`lucy-clip`)
-- `video/video-editing.ts` - Edit video with prompt, reference image, or both (`lucy-2`)
+- `video/video-editing.ts` - Edit video with prompt, reference image, or both (`lucy-2.1`)
- `video/long-form-video-restyle.ts` - Transform existing video with `lucy-restyle-2`
- `video/manual-polling.ts` - Manual job status polling
@@ -50,7 +50,7 @@ See `examples/nextjs-realtime` or `examples/react-vite` for runnable demos.
- `realtime/mirage-basic.ts` - Basic Mirage connection (style transformation)
- `realtime/mirage-v2-basic.ts` - Mirage v2 connection (improved style transformation)
- `realtime/lucy-v2v-720p.ts` - Lucy v2v realtime (video editing - add objects, change elements)
-- `realtime/lucy-2.ts` - Lucy 2 realtime (better quality video editing with reference image support)
+- `realtime/lucy-2.1.ts` - Lucy 2.1 realtime video editing with reference image support
- `realtime/live-avatar.ts` - Live avatar (audio-driven avatar with playAudio or mic input)
- `realtime/connection-events.ts` - Handling connection state and errors
- `realtime/prompt-update.ts` - Updating prompt dynamically
diff --git a/examples/sdk-core/realtime/lucy-2.ts b/examples/sdk-core/realtime/lucy-2.ts
deleted file mode 100644
index f6d9defc..00000000
--- a/examples/sdk-core/realtime/lucy-2.ts
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Browser-only example - requires WebRTC APIs
- * Lucy 2 for realtime video editing with reference image + prompt support
- * See examples/nextjs-realtime or examples/react-vite for runnable demos
- */
-
-import { createDecartClient, models } from "@decartai/sdk";
-
-async function main() {
- const model = models.realtime("lucy-2");
-
- const stream = await navigator.mediaDevices.getUserMedia({
- audio: true,
- video: {
- frameRate: model.fps,
- width: model.width,
- height: model.height,
- },
- });
-
- const client = createDecartClient({
- apiKey: process.env.DECART_API_KEY!,
- });
-
- const realtimeClient = await client.realtime.connect(stream, {
- model,
- onRemoteStream: (editedStream) => {
- const video = document.getElementById("output") as HTMLVideoElement;
- video.srcObject = editedStream;
- },
- initialState: {
- prompt: {
- text: "Add a small dog in the background",
- enhance: true,
- },
- },
- });
-
- // set() replaces the full state — prompt + image atomically in a single message
- await realtimeClient.set({
- prompt: "A person wearing a superhero costume",
- enhance: true,
- image: "https://example.com/superhero-reference.png",
- });
-
- // Prompt-only set() clears the reference image.
- await realtimeClient.set({ prompt: "Add sunglasses to the person" });
-
- // Accepts File, Blob, base64 string, or URL
- const fileInput = document.getElementById("image-upload") as HTMLInputElement;
- fileInput.addEventListener("change", async () => {
- const file = fileInput.files?.[0];
- if (file) {
- await realtimeClient.set({ image: file });
- }
- });
-
- // setPrompt() as syntactic sugar for set() with prompt only
- realtimeClient.setPrompt("Change the person's shirt to red");
-
- console.log("Session ID:", realtimeClient.sessionId);
-}
-
-main();
diff --git a/examples/sdk-core/video/video-editing.ts b/examples/sdk-core/video/video-editing.ts
index c07242b5..c0f16197 100644
--- a/examples/sdk-core/video/video-editing.ts
+++ b/examples/sdk-core/video/video-editing.ts
@@ -12,13 +12,13 @@ run(async () => {
apiKey,
});
- console.log("Editing video with lucy-2...");
+ console.log("Editing video with lucy-2.1...");
const inputVideo = fs.readFileSync("input.mp4");
// Option 1: Use a text prompt
const result = await client.queue.submitAndPoll({
- model: models.video("lucy-2"),
+ model: models.video("lucy-2.1"),
prompt: "Transform to watercolor painting style with soft brushstrokes",
data: new Blob([inputVideo]),
onStatusChange: (job) => {
@@ -29,7 +29,7 @@ run(async () => {
// Option 2: Use a reference image to guide the edit (with empty prompt)
// const referenceImage = fs.readFileSync("reference.png");
// const result = await client.queue.submitAndPoll({
- // model: models.video("lucy-2"),
+ // model: models.video("lucy-2.1"),
// prompt: "",
// reference_image: new Blob([referenceImage]),
// data: new Blob([inputVideo]),
@@ -41,7 +41,7 @@ run(async () => {
// Option 3: Use both a prompt and a reference image together
// const referenceImage = fs.readFileSync("reference.png");
// const result = await client.queue.submitAndPoll({
- // model: models.video("lucy-2"),
+ // model: models.video("lucy-2.1"),
// prompt: "Apply the style from the reference image",
// reference_image: new Blob([referenceImage]),
// data: new Blob([inputVideo]),
diff --git a/examples/tanstack-streamer/README.md b/examples/tanstack-streamer/README.md
index 08b5b58b..78cf78df 100644
--- a/examples/tanstack-streamer/README.md
+++ b/examples/tanstack-streamer/README.md
@@ -27,7 +27,7 @@ pnpm dev
## Features
-- Real-time webcam video transformation using `lucy-2`
+- Real-time webcam video transformation using `lucy-2.1`
- Producer + subscriber streaming pattern
- Shareable viewer link via subscribe token
- Dynamic style prompt updates
@@ -38,7 +38,7 @@ pnpm dev
| Route | Description |
|-------|-------------|
-| `/` | **Producer** — streams your camera through `lucy-2`, shows styled output, and generates a shareable viewer link |
+| `/` | **Producer** — streams your camera through `lucy-2.1`, shows styled output, and generates a shareable viewer link |
| `/watch?token=...` | **Subscriber** — watches the producer's styled stream (receive-only, no camera needed) |
## How it works
@@ -51,7 +51,7 @@ pnpm dev
## Models
-This example uses `lucy-2` for video editing with reference image support. You can also use:
+This example uses `lucy-2.1` for video editing with reference image support. You can also use:
- `lucy-restyle` - MirageLSD video restyling model (older)
- `lucy-restyle-2` - MirageLSD v2 for style transformation
diff --git a/examples/tanstack-streamer/src/routes/index.tsx b/examples/tanstack-streamer/src/routes/index.tsx
index 7b5882e0..1a2ed0eb 100644
--- a/examples/tanstack-streamer/src/routes/index.tsx
+++ b/examples/tanstack-streamer/src/routes/index.tsx
@@ -19,7 +19,7 @@ function ProducerPage() {
const start = useCallback(async () => {
try {
- const model = models.realtime("lucy-2");
+ const model = models.realtime("lucy-2.1");
setStatus("requesting-camera");
const stream = await navigator.mediaDevices.getUserMedia({
@@ -94,7 +94,7 @@ function ProducerPage() {
Producer
- Streams your camera through lucy-2 and generates a subscribe link for viewers.
+ Streams your camera through lucy-2.1 and generates a subscribe link for viewers.
{status === "idle" && (
diff --git a/examples/ws-signaling-proxy/README.md b/examples/ws-signaling-proxy/README.md
index e786586c..bf13467b 100644
--- a/examples/ws-signaling-proxy/README.md
+++ b/examples/ws-signaling-proxy/README.md
@@ -23,7 +23,7 @@ pnpm dev # starts proxy on ws://localhost:8080
Clients connect to:
```
-ws://localhost:8080/v1/stream?model=lucy_2_rt
+ws://localhost:8080/v1/stream?model=lucy-2.1
```
## Environment variables
diff --git a/examples/ws-signaling-proxy/src/index.ts b/examples/ws-signaling-proxy/src/index.ts
index e1253123..4117ea94 100644
--- a/examples/ws-signaling-proxy/src/index.ts
+++ b/examples/ws-signaling-proxy/src/index.ts
@@ -19,10 +19,10 @@ const server = createServer((_req, res) => {
const wss = new WebSocketServer({ server });
wss.on("connection", (clientWs: WebSocket, req) => {
- // Accept Decart-style URLs: /v1/stream?api_key=...&model=lucy_2_rt
+ // Accept Decart-style URLs: /v1/stream?api_key=...&model=lucy-2.1
// The proxy ignores api_key from the client and uses its own.
const url = new URL(req.url ?? "/", `http://${req.headers.host}`);
- const model = url.searchParams.get("model") ?? "lucy_2_rt";
+ const model = url.searchParams.get("model") ?? "lucy-2.1";
console.log(`[proxy] client connected from ${req.url} (model=${model})`);
@@ -37,7 +37,7 @@ wss.on("connection", (clientWs: WebSocket, req) => {
server.listen(PORT, () => {
console.log(`[proxy] listening on ws://localhost:${PORT}`);
- console.log(`[proxy] connect with: ws://localhost:${PORT}/?model=lucy_2_rt`);
+ console.log(`[proxy] connect with: ws://localhost:${PORT}/?model=lucy-2.1`);
});
const shutdown = () => {
diff --git a/examples/ws-signaling-proxy/test/e2e.ts b/examples/ws-signaling-proxy/test/e2e.ts
index 610e6fc0..f08a4f8e 100644
--- a/examples/ws-signaling-proxy/test/e2e.ts
+++ b/examples/ws-signaling-proxy/test/e2e.ts
@@ -37,7 +37,7 @@ test("e2e: signaling flow through proxy", { timeout: 30_000 }, async (t) => {
});
// Connect client
- const ws = new WebSocket(`ws://localhost:${port}/v1/stream?model=lucy_2_rt`);
+ const ws = new WebSocket(`ws://localhost:${port}/v1/stream?model=lucy-2.1`);
await new Promise((r, e) => {
ws.on("open", r);
ws.on("error", e);
diff --git a/packages/sdk/AGENTS.md b/packages/sdk/AGENTS.md
index 26b40a1d..d4f5bb53 100644
--- a/packages/sdk/AGENTS.md
+++ b/packages/sdk/AGENTS.md
@@ -66,13 +66,13 @@
- `mirage` - Real-time video restyling model
- `mirage_v2` - Real-time video restyling model (v2)
- `lucy_v2v_720p_rt` - Real-time video editing model
-- `lucy_2_rt` - Real-time video editing model (supports reference image)
+- `lucy-2.1` - Real-time video editing model (supports reference image)
### Video Models (Queue API)
- `lucy-pro-v2v` - video-to-video (Pro quality)
- `lucy-motion` - motion-based image-to-video (trajectory-guided animation)
- `lucy-restyle-v2v` - video restyling (video-to-video)
-- `lucy-2-v2v` - video-to-video editing (long-form, 720p)
+- `lucy-2.1` - video-to-video editing (long-form, 720p)
### Image Models (Process API)
- `lucy-pro-i2i` - image-to-image (Pro quality)
diff --git a/packages/sdk/index.html b/packages/sdk/index.html
index 2d5f9488..4791bef4 100644
--- a/packages/sdk/index.html
+++ b/packages/sdk/index.html
@@ -238,7 +238,7 @@