|
| 1 | +/** |
| 2 | + * Example: Anthropic Prompt Caching - Multi-Message Conversation (Effect AI) |
| 3 | + * |
| 4 | + * This example demonstrates Anthropic prompt caching in a multi-message conversation |
| 5 | + * via OpenRouter using Effect AI. |
| 6 | + * |
| 7 | + * Pattern: User message cache in multi-turn conversation using Effect patterns |
| 8 | + */ |
| 9 | + |
| 10 | +import * as OpenRouterClient from '@effect/ai-openrouter/OpenRouterClient'; |
| 11 | +import * as OpenRouterLanguageModel from '@effect/ai-openrouter/OpenRouterLanguageModel'; |
| 12 | +import * as LanguageModel from '@effect/ai/LanguageModel'; |
| 13 | +import * as Prompt from '@effect/ai/Prompt'; |
| 14 | +import { FetchHttpClient } from '@effect/platform'; |
| 15 | +import * as BunContext from '@effect/platform-bun/BunContext'; |
| 16 | +import { LARGE_SYSTEM_PROMPT } from '@openrouter-examples/shared/constants'; |
| 17 | +import { Console, Effect, Layer, Redacted } from 'effect'; |
| 18 | + |
| 19 | +const program = Effect.gen(function* () { |
| 20 | + const testId = Date.now(); |
| 21 | + const largeContext = `Test ${testId}: Context:\n\n${LARGE_SYSTEM_PROMPT}`; |
| 22 | + |
| 23 | + yield* Console.log( |
| 24 | + '╔════════════════════════════════════════════════════════════════════════════╗', |
| 25 | + ); |
| 26 | + yield* Console.log( |
| 27 | + '║ Anthropic Prompt Caching - Multi-Message (Effect AI) ║', |
| 28 | + ); |
| 29 | + yield* Console.log( |
| 30 | + '╚════════════════════════════════════════════════════════════════════════════╝', |
| 31 | + ); |
| 32 | + yield* Console.log(''); |
| 33 | + yield* Console.log('Testing cache_control in multi-turn conversation'); |
| 34 | + yield* Console.log(''); |
| 35 | + |
| 36 | + const makePrompt = () => |
| 37 | + Prompt.make([ |
| 38 | + { |
| 39 | + role: 'user' as const, |
| 40 | + content: [ |
| 41 | + { |
| 42 | + type: 'text' as const, |
| 43 | + text: largeContext, |
| 44 | + options: { |
| 45 | + openrouter: { |
| 46 | + cacheControl: { type: 'ephemeral' as const }, |
| 47 | + }, |
| 48 | + }, |
| 49 | + }, |
| 50 | + { |
| 51 | + type: 'text' as const, |
| 52 | + text: "Hello, what's your purpose?", |
| 53 | + }, |
| 54 | + ], |
| 55 | + }, |
| 56 | + { |
| 57 | + role: 'assistant' as const, |
| 58 | + content: "I'm an AI assistant designed to help with various tasks.", |
| 59 | + }, |
| 60 | + { |
| 61 | + role: 'user' as const, |
| 62 | + content: 'What programming languages do you know?', |
| 63 | + }, |
| 64 | + ]); |
| 65 | + |
| 66 | + yield* Console.log('First Call (Cache Miss Expected)'); |
| 67 | + const response1 = yield* LanguageModel.generateText({ |
| 68 | + prompt: makePrompt(), |
| 69 | + }); |
| 70 | + const cached1 = response1.usage.cachedInputTokens ?? 0; |
| 71 | + yield* Console.log(` Response: ${response1.text.substring(0, 80)}...`); |
| 72 | + yield* Console.log(` cached_tokens=${cached1}`); |
| 73 | + |
| 74 | + yield* Effect.sleep('1 second'); |
| 75 | + |
| 76 | + yield* Console.log('\nSecond Call (Cache Hit Expected)'); |
| 77 | + const response2 = yield* LanguageModel.generateText({ |
| 78 | + prompt: makePrompt(), |
| 79 | + }); |
| 80 | + const cached2 = response2.usage.cachedInputTokens ?? 0; |
| 81 | + yield* Console.log(` Response: ${response2.text.substring(0, 80)}...`); |
| 82 | + yield* Console.log(` cached_tokens=${cached2}`); |
| 83 | + |
| 84 | + // Analysis |
| 85 | + yield* Console.log('\n' + '='.repeat(80)); |
| 86 | + yield* Console.log('ANALYSIS'); |
| 87 | + yield* Console.log('='.repeat(80)); |
| 88 | + yield* Console.log(`First call: cached_tokens=${cached1} (expected: 0)`); |
| 89 | + yield* Console.log(`Second call: cached_tokens=${cached2} (expected: >0)`); |
| 90 | + |
| 91 | + const success = cached1 === 0 && cached2 > 0; |
| 92 | + |
| 93 | + if (success) { |
| 94 | + yield* Console.log('\n✓ SUCCESS - Multi-message caching is working correctly'); |
| 95 | + } else { |
| 96 | + yield* Console.log('\n✗ FAILURE - Multi-message caching is not working as expected'); |
| 97 | + } |
| 98 | + |
| 99 | + yield* Console.log('='.repeat(80)); |
| 100 | +}); |
| 101 | + |
| 102 | +const OpenRouterClientLayer = OpenRouterClient.layer({ |
| 103 | + apiKey: Redacted.make(process.env.OPENROUTER_API_KEY!), |
| 104 | +}).pipe(Layer.provide(FetchHttpClient.layer)); |
| 105 | + |
| 106 | +const OpenRouterModelLayer = OpenRouterLanguageModel.layer({ |
| 107 | + model: 'anthropic/claude-3.5-sonnet', |
| 108 | + config: { |
| 109 | + stream_options: { include_usage: true }, |
| 110 | + }, |
| 111 | +}).pipe(Layer.provide(OpenRouterClientLayer)); |
| 112 | + |
| 113 | +await program.pipe( |
| 114 | + Effect.provide(OpenRouterModelLayer), |
| 115 | + Effect.provide(BunContext.layer), |
| 116 | + Effect.runPromise, |
| 117 | +); |
| 118 | + |
| 119 | +console.log('\n✓ Program completed successfully'); |
0 commit comments