@@ -60,7 +60,7 @@ const client = new SVECTOR({
6060});
6161
6262const result = await client .conversations .create ({
63- model: ' spec-3-turbo:latest ' ,
63+ model: ' spec-3-turbo' ,
6464 instructions: ' You are a helpful AI assistant that explains complex topics clearly.' ,
6565 input: ' What is artificial intelligence?' ,
6666});
@@ -81,7 +81,7 @@ const client = new SVECTOR({
8181});
8282
8383const result = await client .conversations .create ({
84- model: ' spec-3-turbo:latest ' ,
84+ model: ' spec-3-turbo' ,
8585 instructions: ' You are a helpful AI assistant that explains complex topics clearly.' ,
8686 input: ' What is artificial intelligence?' ,
8787});
@@ -176,7 +176,7 @@ const client = new SVECTOR({
176176});
177177
178178const result = await client .conversations .create ({
179- model: ' spec-3-turbo:latest ' ,
179+ model: ' spec-3-turbo' ,
180180 instructions: ' You are a helpful assistant that explains things clearly.' ,
181181 input: ' What is machine learning?' ,
182182 temperature: 0.7 ,
@@ -191,7 +191,7 @@ console.log(result.output);
191191
192192``` typescript
193193const result = await client .conversations .create ({
194- model: ' spec-3-turbo:latest ' ,
194+ model: ' spec-3-turbo' ,
195195 instructions: ' You are a programming tutor that helps students learn coding.' ,
196196 input: ' Can you show me an example?' ,
197197 context: [
@@ -206,7 +206,7 @@ const result = await client.conversations.create({
206206
207207``` typescript
208208const stream = await client .conversations .createStream ({
209- model: ' spec-3-turbo:latest ' ,
209+ model: ' spec-3-turbo' ,
210210 instructions: ' You are a creative storyteller.' ,
211211 input: ' Tell me a short story about robots and humans.' ,
212212 stream: true ,
@@ -238,7 +238,7 @@ async function analyzeDocument(filePath, question = "Analyze this document and p
238238 );
239239
240240 const result = await client .conversations .create ({
241- model: ' spec-3-turbo:latest ' ,
241+ model: ' spec-3-turbo' ,
242242 instructions: ' You are a document analyst. Provide clear, concise analysis.' ,
243243 input: ` ${question }\n\n Document content:\n ${fileResponse .data .content } ` ,
244244 temperature: 0.3 ,
@@ -259,7 +259,7 @@ For full control over the conversation structure, use the Chat Completions API w
259259
260260``` typescript
261261const completion = await client .chat .create ({
262- model: ' spec-3-turbo:latest ' ,
262+ model: ' spec-3-turbo' ,
263263 messages: [
264264 { role: ' system' , content: ' You are a helpful assistant.' },
265265 { role: ' user' , content: ' Hello, how are you?' }
@@ -282,7 +282,7 @@ const conversation = [
282282];
283283
284284const response = await client .chat .create ({
285- model: ' spec-3-turbo:latest ' ,
285+ model: ' spec-3-turbo' ,
286286 messages: conversation ,
287287 temperature: 0.5 ,
288288});
@@ -292,7 +292,7 @@ const response = await client.chat.create({
292292
293293``` typescript
294294const response = await client .chat .create ({
295- model: ' spec-3-turbo:latest ' ,
295+ model: ' spec-3-turbo' ,
296296 messages: [
297297 { role: ' developer' , content: ' You are an expert code reviewer. Provide detailed feedback.' },
298298 { role: ' user' , content: ' Please review this Python code: def add(a, b): return a + b' }
@@ -308,7 +308,7 @@ Both Conversations and Chat APIs support real-time streaming:
308308
309309``` typescript
310310const stream = await client .conversations .createStream ({
311- model: ' spec-3-turbo:latest ' ,
311+ model: ' spec-3-turbo' ,
312312 instructions: ' You are a creative writer.' ,
313313 input: ' Write a poem about technology.' ,
314314 stream: true ,
@@ -327,7 +327,7 @@ for await (const event of stream) {
327327
328328``` typescript
329329const stream = await client .chat .createStream ({
330- model: ' spec-3-turbo:latest ' ,
330+ model: ' spec-3-turbo' ,
331331 messages: [
332332 { role: ' system' , content: ' You are a helpful assistant.' },
333333 { role: ' user' , content: ' Explain quantum computing' }
@@ -413,7 +413,7 @@ const doc1 = await client.files.create(fs.readFileSync('manual.pdf'), 'default',
413413const doc2 = await client .files .create (fs .readFileSync (' faq.docx' ), ' default' , ' faq.docx' );
414414
415415const answer = await client .conversations .create ({
416- model: ' spec-3-turbo:latest ' ,
416+ model: ' spec-3-turbo' ,
417417 instructions: ' You are a helpful assistant that answers questions based on provided documents.' ,
418418 input: ` What are the key features mentioned in the manual?\n\n Document 1: ${doc1 .data .content }\n Document 2: ${doc2 .data .content } ` ,
419419});
@@ -430,7 +430,7 @@ const result2 = await client.knowledge.addFile('collection-123', 'file-789');
430430
431431// Use the entire collection in conversations
432432const response = await client .conversations .create ({
433- model: ' spec-3-turbo:latest ' ,
433+ model: ' spec-3-turbo' ,
434434 instructions: ' You are a research assistant with access to our knowledge base.' ,
435435 input: ' Summarize all the information about our products.' ,
436436 files: [{ type: ' collection' , id: ' collection-123' }],
@@ -451,32 +451,32 @@ console.log(models.models);
451451
452452** SVECTOR's Foundational Models:**
453453
454- - ** ` spec-3-turbo:latest ` ** - Fast, efficient model for most use cases
455- - ** ` spec-3:latest ` ** - Standard model with balanced performance
456- - ** ` theta-35-mini:latest ` ** - Lightweight model for simple tasks
457- - ** ` theta-35:latest ` ** - Advanced model for complex reasoning
454+ - ** ` spec-3-turbo ` ** - Fast, efficient model for most use cases
455+ - ** ` spec-3 ` ** - Standard model with balanced performance
456+ - ** ` theta-35-mini ` ** - Lightweight model for simple tasks
457+ - ** ` theta-35 ` ** - Advanced model for complex reasoning
458458
459459### Model Selection Guide
460460
461461``` typescript
462462// For quick responses and general tasks
463463const quickResponse = await client .conversations .create ({
464- model: ' spec-3-turbo:latest ' ,
464+ model: ' spec-3-turbo' ,
465465 instructions: ' You are a helpful assistant.' ,
466466 input: ' What time is it?' ,
467467});
468468
469469// For complex reasoning and analysis
470470const complexAnalysis = await client .conversations .create ({
471- model: ' theta-35:latest ' ,
471+ model: ' theta-35' ,
472472 instructions: ' You are an expert data analyst.' ,
473473 input: ' Analyze the trends in this quarterly report.' ,
474474 files: [{ type: ' file' , id: ' report-file-id' }],
475475});
476476
477477// For lightweight tasks
478478const simpleTask = await client .conversations .create ({
479- model: ' theta-35-mini:latest ' ,
479+ model: ' theta-35-mini' ,
480480 instructions: ' You help with simple questions.' ,
481481 input: ' What is 2 + 2?' ,
482482});
@@ -499,7 +499,7 @@ const client = new SVECTOR();
499499
500500try {
501501 const response = await client .conversations .create ({
502- model: ' spec-3-turbo:latest ' ,
502+ model: ' spec-3-turbo' ,
503503 instructions: ' You are a helpful assistant.' ,
504504 input: ' Hello world' ,
505505 });
@@ -552,7 +552,7 @@ const client = new SVECTOR({
552552``` typescript
553553const response = await client .conversations .create (
554554 {
555- model: ' spec-3-turbo:latest ' ,
555+ model: ' spec-3-turbo' ,
556556 instructions: ' You are a helpful assistant.' ,
557557 input: ' Hello' ,
558558 },
@@ -572,7 +572,7 @@ const response = await client.conversations.create(
572572``` typescript
573573// Get both response data and raw HTTP response
574574const { data, response } = await client .conversations .createWithResponse ({
575- model: ' spec-3-turbo:latest ' ,
575+ model: ' spec-3-turbo' ,
576576 instructions: ' You are a helpful assistant.' ,
577577 input: ' Hello' ,
578578});
@@ -634,7 +634,7 @@ export default {
634634 });
635635
636636 const result = await client .conversations .create ({
637- model: ' spec-3-turbo:latest ' ,
637+ model: ' spec-3-turbo' ,
638638 instructions: ' You are a helpful assistant.' ,
639639 input: ' Hello from Cloudflare Workers!' ,
640640 });
@@ -664,7 +664,7 @@ class IntelligentChat {
664664 this .conversationHistory .push (userMessage );
665665
666666 const result = await this .client .conversations .create ({
667- model: ' spec-3-turbo:latest ' ,
667+ model: ' spec-3-turbo' ,
668668 instructions: systemInstructions || ' You are a helpful and friendly AI assistant.' ,
669669 input: userMessage ,
670670 context: this .conversationHistory .slice (- 10 ), // Keep last 10 messages
@@ -681,7 +681,7 @@ class IntelligentChat {
681681 console .log (' Assistant: ' );
682682
683683 const stream = await this .client .conversations .createStream ({
684- model: ' spec-3-turbo:latest ' ,
684+ model: ' spec-3-turbo' ,
685685 instructions: ' You are a helpful AI assistant. Be conversational and engaging.' ,
686686 input: userMessage ,
687687 context: this .conversationHistory .slice (- 6 ),
@@ -760,7 +760,7 @@ class DocumentAnalyzer {
760760 };
761761
762762 const result = await this .client .conversations .create ({
763- model: ' spec-3-turbo:latest ' ,
763+ model: ' spec-3-turbo' ,
764764 instructions: instructions [analysisType ],
765765 input: query ,
766766 files: this .uploadedFiles .map (id => ({ type: ' file' , id })),
@@ -814,14 +814,14 @@ console.log('💡 Insights:', insights);
814814``` typescript
815815// Recommended: Clean and simple
816816const result = await client .conversations .create ({
817- model: ' spec-3-turbo:latest ' ,
817+ model: ' spec-3-turbo' ,
818818 instructions: ' You are a helpful assistant.' ,
819819 input: userMessage ,
820820});
821821
822822// More complex: Manual role management
823823const result = await client .chat .create ({
824- model: ' spec-3-turbo:latest ' ,
824+ model: ' spec-3-turbo' ,
825825 messages: [
826826 { role: ' system' , content: ' You are a helpful assistant.' },
827827 { role: ' user' , content: userMessage }
@@ -843,13 +843,13 @@ try {
843843### 3. Use Appropriate Models
844844``` typescript
845845// For quick responses
846- model : ' spec-3-turbo:latest '
846+ model : ' spec-3-turbo'
847847
848848// For complex reasoning
849- model : ' theta-35:latest '
849+ model : ' theta-35'
850850
851851// For simple tasks
852- model : ' theta-35-mini:latest '
852+ model : ' theta-35-mini'
853853```
854854
855855### 4. Optimize File Usage
0 commit comments