From 7f40769d130fbbcf9df681e1419f9051352dd6f6 Mon Sep 17 00:00:00 2001 From: OpenRouter Team Date: Fri, 20 Feb 2026 19:17:24 +0000 Subject: [PATCH] Commits included in this export: - bb413e979a46ef67c2f266743f7ebd798af42afb - b60e92d5dcec83f31986164f51c010d1728b2b17 - 45687f423707f5838461585c568a024a19eb2f06 - 3103d3ca68b6e42ccdb6136588ec2b6edc8dcab0 - fd9f991606027cd86116317c71f8eb7b4e2aa3eb - 0f312019161edd76c1e5923d142c788d22f096d5 - b94b8ad48e2dc7c2379391093712e3c81364fec2 - 5dbab2094bc744db4a90b3097520d897e5d0d634 - 4c47d1af1735db18a1c1a7f463e801c7f71ad4e5 - 6ddaaa6989d725bbe966e45f7aed0c1bd72f9153 - 4779741b6ba710b1299612b82e8544516bd9dd6a - 22083fcc8f58884d8212d6f3314b231c4ddd21cd - a925a620f97196d509e073a163caac053bd82316 - 5c05d8829784601ffd151acef1a6978c266f913d - f89c632902372adc214d3591e1fbcaf41729c0c3 - 6f4fff535fbf4274255c255ae23620b8e00e614f - ef358e96cb01e469b77d44683603a602b0813116 - e6299b4ebfe5c2eed6c598771f0fd99f84555401 - 24d3f0ee1c2aa19a62f7902836034080b9246813 - 68922847c2969b773887310eb7aa9a814e199c5a - 729ee2338c39df22461526153e7f4ba868fd498e - e1074ff86fccf5e48406b7595298873106d3cc5a - 8288fa51fbc499fc8689d79853d00cadc3b7d823 - 123bbdaf841d299162a82ee2573adfbb70a45b8f - 70dbc74eeb11775c0cb59981b350251c8212da38 - 4d48e4f6bb936e785787314a965298da6b78fea0 - 718729a573ee98893a4408d5a52ab4686661fe54 - 12cba68b6d64b8974a2a6a81c518a789489017b7 - b1edfb513a2f1f1056f7a0cf9d39d9c45272acc0 - b867adcde6b64f3f2a11c4626254b39098e4b0b4 - 21912c097b4066d109766c6b4492826595cd57c5 - bc1ed85adc976a4634ca8a41b82e4fc2c39a864f - 8d10b121356a215a589092326b8f604293147de1 - 7828708558cfa789b51d6d3ee35fc0e893dea32d - d145c89c941937a312ed1357590d2bfda9a3603d - 12f05b5dee397ead47cfcd545a7a2e3c7ff57524 - dd28f75a19a015809797c7abf3f8758f7c37262e - b8b107ca04e2e4dc39dd8c97c285dff665de6f05 - c96e569781e07ab44172d9c69940ec17bafd81f4 - c9d5a90e3c2b5077a2bdd49773f1a46d1a5a79c7 - cdf5dfb908ef3afe57e29b9c4e1bedcdd31c7c49 - 58b08f87116ceab9076de2fdc0a4c74c919b44c4 - 2592bdc8b4b7c695a3d689390aedc9a42914acc8 - 95dc994c2fcb701f3a8d8ffe410aa2e91ea8120b - a552a86da33c602a8f0eb8e40724d9154b63cac9 - 7b49f98f8599508b9aca6921a48d446ca04d41ce - 2841134445f6a925c9072b6eccb1be9adcf3797c - 5d59d49f32086b2f4d38961e4d11744bdbda72c1 - edeedb0ad02de9951408298c66a6a6ccbd683f7f - 037329dc548fbdefbfcc1d84d85c67d227afce0d - 67ac9ff744a5c802380876ebf56b939625a44e13 - 74c003f4ecf72520cb9097d38495f1f9839379a5 - 5140ef0b1fdbb58e8aa1d20ca08bdd948f4dadaf - a3d789c36cdaf821e400a6fe9eefbe1a82bc5819 - 2a87802bcec26e7e3cea535c680f4b71dbb10b15 - 16e9a45c7816adf1e33efce1e384eb9879dc2b28 - 7358fd8e8c7476dc6b75b300084e225ece79bceb - 8199c8abea6e6acefa8c37a1336a56c7fcdd3a07 - d17e8505a8d46baf3192a92a530986bbce7014b2 - 7ab5200031221f71aaff2503c9af1c706ccb7634 - 0f4cd4840b46f21e843d0d40bdaf587ba0e5b2cd - b19e35320304f4313b4fa27c5dc8d348e4a5a5c2 - 9994ceca26eab71d3b22a5de2f7fd530f21a071d - ee2da3156a55471cf3d3c66d4c00944eebdd898d - 0a4e2198ab076e9b8530bdf1cf481367b1fed971 - a5804a88b7417d2c18a741de8683fa3f59f29028 - bf787ed35487961a465347bde6d952f5b985ae53 - 6ece2ee3b6782c672d487f857d4b9a5255e97d27 - 57ff7bba84d38a88e31eeef75fa3d3de34f55af3 - afa7f5af2d4c9cbdf18bbafdb7ce70b882c02d10 - aae3911dcc666a818f9b58f3ca47a25de252c6da - 449925fbabb1b1c3eac565f0ee99176402e5762c - 491e5ac99a7a4b3f76e2a5e2dff3b833e3a1fbfb - e9f17b7f0df89e2159f37631fd0a890c0510401a - a28de4933b3212eb4c31601d07b0e9120f33aaa1 - 5657366b127cb27f2ae49dedf4cd776bb4c0bb5c - 18216342b2bd775c35b0b6619a24801224445de2 - 6590c4d866f9ff23fff051574e609c09ab8ddb12 - f0f552336ce1215d2ec768c0bdca5028a968680c - 70b651e298470af96f40625c6094cd4aae008483 - b395cc4176ff5decc52636ec18b6db8fe89d4ed1 - d0b664a88597f79c5b0d5f19a2179b6d8fabddbc - d3862269e3fa6e044cf7f3a307f5d8f07bff863f - 6c895dcb22935a2a164767c7d09840123b6fae8e - 86f3da290f484de3b6620c65c408d2a8437c0e67 - 21a4f580479b3b0fa2b4381802dbaf90eb005d2b - fb941b1ca6ce407fd344d1ba1efe0d443f0dddd0 - 20c2d10632637680294895ac242bca1805b52b7a - 20425ada8aa09714f5d46e151d4df55e950a34a1 - 716848405ea7296b735f8ed026d4da07b49a651f - 638eec62dcc7c000010a40962e61ec35e7e5d3a0 - 8ef06dcfce6842466412935d935a57ff57b10d76 - 72c52708b8f989005ca8837e5491462798a51e46 - ebaac50f2143ae5a6f3025e997aa1ad70d110a1f - ace3723b9883c46d2d7c33c21146a59d403093ef - 79cf17a40fed709c93b3f7e35d7065e76a355996 - 66fa5870420336037921914fd69e3a01e14223e4 - 95d711fd9ac03bd2831208b12c3841640464c093 - 779c34de0724d6b72150b39cd6193dd84625ddb8 - d91887e755fbb466326ed27b74a1dcd66f64639d (And 51 more changes) GitOrigin-RevId: bb413e979a46ef67c2f266743f7ebd798af42afb --- .speakeasy/gen.lock | 1111 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/in.openapi.yaml | 4463 +++++++++-------- .speakeasy/out.openapi.yaml | 4438 ++++++++-------- .speakeasy/workflow.lock | 8 +- docs/models/assistantmessage.md | 22 +- docs/models/assistantmessagecontent.md | 14 +- docs/models/assistantmessageimages.md | 19 + docs/models/assistantmessageimagesimageurl.md | 17 + docs/models/assistantmessageimageurl.md | 17 - docs/models/assistantmessagerole.md | 15 + docs/models/{sortenum.md => by.md} | 8 +- docs/models/chatcompletionfinishreason.md | 2 +- docs/models/chaterrorerror.md | 21 - docs/models/chatgenerationparams.md | 72 +- docs/models/chatgenerationparamsby.md | 19 + docs/models/chatgenerationparamsignore.md | 17 + docs/models/chatgenerationparamsmaxprice.md | 21 + ...schema5.md => chatgenerationparamsonly.md} | 6 +- docs/models/chatgenerationparamsorder.md | 17 + docs/models/chatgenerationparamspartition.md | 19 + ...> chatgenerationparamspluginautorouter.md} | 6 +- ...> chatgenerationparamspluginfileparser.md} | 6 +- ...> chatgenerationparamspluginmoderation.md} | 6 +- ...tgenerationparamspluginresponsehealing.md} | 6 +- .../models/chatgenerationparamspluginunion.md | 45 + ...eb.md => chatgenerationparamspluginweb.md} | 6 +- docs/models/chatgenerationparamsprovider.md | 29 + .../chatgenerationparamsprovidersort.md | 19 + .../chatgenerationparamsprovidersortconfig.md | 16 + ...tgenerationparamsprovidersortconfigenum.md | 15 + ...generationparamsprovidersortconfigunion.md | 17 + docs/models/chatgenerationparamssortenum.md | 17 + docs/models/chatgenerationparamssortunion.md | 25 + docs/models/chatgenerationparamstrace.md | 22 + docs/models/chatgenerationtokenusage.md | 18 +- docs/models/chatmessagecontentitem.md | 23 +- docs/models/chatmessagecontentitem1.md | 27 + docs/models/chatmessagecontentitemaudio.md | 4 +- .../chatmessagecontentitemaudioinputaudio.md | 8 +- .../models/chatmessagecontentitemaudiotype.md | 15 + .../chatmessagecontentitemcachecontrol.md | 10 +- .../chatmessagecontentitemcachecontroltype.md | 15 + docs/models/chatmessagecontentitemimage.md | 6 +- .../chatmessagecontentitemimagedetail.md | 2 + .../chatmessagecontentitemimageimageurl.md | 4 +- .../models/chatmessagecontentitemimagetype.md | 15 + docs/models/chatmessagecontentitemtext.md | 14 +- docs/models/chatmessagecontentitemtexttype.md | 15 + docs/models/chatmessagecontentitemvideo.md | 26 +- .../chatmessagecontentitemvideoinputvideo.md | 21 - .../chatmessagecontentitemvideolegacy.md | 25 + .../chatmessagecontentitemvideovideourl.md | 21 - docs/models/chatmessagetokenlogprob.md | 24 +- docs/models/chatmessagetokenlogprobs.md | 32 +- docs/models/chatmessagetoolcall.md | 12 +- docs/models/chatmessagetoolcallfunction.md | 8 +- docs/models/chatmessagetoolcalltype.md | 15 + docs/models/chatresponse.md | 36 +- docs/models/chatresponsechoice.md | 16 +- docs/models/chatresponseobject.md | 15 + docs/models/chatstreamingchoice.md | 18 +- docs/models/chatstreamingmessagechunk.md | 18 +- docs/models/chatstreamingmessagechunkrole.md | 2 + docs/models/chatstreamingmessagetoolcall.md | 16 +- .../chatstreamingmessagetoolcallfunction.md | 10 +- .../chatstreamingmessagetoolcalltype.md | 17 + docs/models/chatstreamingresponsechunk.md | 39 +- docs/models/chatstreamingresponsechunkdata.md | 28 - .../models/chatstreamingresponsechunkerror.md | 19 - .../chatstreamingresponsechunkobject.md | 15 + docs/models/chatstreamoptions.md | 8 +- docs/models/code.md | 18 +- docs/models/codeenum.md | 17 - docs/models/completion.md | 23 - docs/models/completiontokensdetails.md | 14 +- .../{modelscountresponsedata.md => data.md} | 6 +- docs/models/debug.md | 15 - docs/models/debugoptions.md | 17 + docs/models/developermessage.md | 14 +- docs/models/developermessagecontent.md | 4 +- docs/models/effort.md | 4 +- docs/models/engine.md | 17 - docs/models/errors/chaterror.md | 15 - docs/models/errort.md | 21 + docs/models/image.md | 19 - docs/models/jsonschemaconfig.md | 16 +- docs/models/message.md | 12 +- docs/models/modelscountresponse.md | 6 +- docs/models/namedtoolchoice.md | 6 +- docs/models/namedtoolchoicefunction.md | 8 +- docs/models/namedtoolchoicetype.md | 15 + docs/models/objectt.md | 15 - docs/models/openairesponsesreasoningconfig.md | 8 +- .../openresponsesnonstreamingresponse.md | 2 +- ...openresponsesnonstreamingresponseobject.md | 15 + docs/models/openresponsesreasoningconfig.md | 12 +- docs/models/openresponsesrequest.md | 7 +- .../openresponsesrequestpluginautorouter.md | 19 + .../openresponsesrequestpluginfileparser.md | 19 + ...> openresponsesrequestpluginmoderation.md} | 6 +- ...enresponsesrequestpluginresponsehealing.md | 18 + .../models/openresponsesrequestpluginunion.md | 45 + docs/models/openresponsesrequestpluginweb.md | 21 + ...der.md => openresponsesrequestprovider.md} | 6 +- docs/models/openresponsesrequesttrace.md | 22 + .../createauthkeyscoderequestbody.md | 4 +- docs/models/operations/providername.md | 2 +- .../sendchatcompletionrequestrequest.md | 17 +- .../sendchatcompletionrequestresponse.md | 18 +- .../sendchatcompletionrequestresponsebody.md | 31 + docs/models/operations/usagelimittype.md | 19 + docs/models/partition.md | 4 +- docs/models/pdf.md | 15 - docs/models/pdfengine.md | 17 - docs/models/plugin.md | 45 - docs/models/prompt.md | 23 - docs/models/prompttokensdetails.md | 14 +- docs/models/providername.md | 2 +- docs/models/providerpreferences.md | 2 +- docs/models/providerpreferencesby.md | 19 + docs/models/providerpreferencespartition.md | 2 + .../models/providerpreferencesprovidersort.md | 4 +- .../providerpreferencesprovidersortconfig.md | 8 +- ...oviderpreferencesprovidersortconfigenum.md | 15 + ...iderpreferencesprovidersortconfigunion.md} | 6 +- docs/models/providerpreferencessortenum.md | 17 + docs/models/providerpreferencessortunion.md | 10 +- docs/models/providersort.md | 2 + docs/models/providersortconfig.md | 10 +- docs/models/providersortconfigenum.md | 15 - docs/models/providersortunion.md | 17 - docs/models/reasoning.md | 10 +- docs/models/reasoningdetailencrypted.md | 24 + docs/models/reasoningdetailencryptedformat.md | 17 + docs/models/reasoningdetailsummary.md | 25 + ...a21.md => reasoningdetailsummaryformat.md} | 6 +- docs/models/reasoningdetailtext.md | 24 + docs/models/reasoningdetailtextformat.md | 17 + docs/models/reasoningdetailunion.md | 34 + docs/models/reasoningsummaryverbosity.md | 2 +- docs/models/responseformat.md | 10 +- docs/models/responseformatjsonobject.md | 2 + docs/models/responseformatjsonschema.md | 12 +- docs/models/responseformattext.md | 2 + docs/models/responseformattextconfig.md | 4 +- docs/models/responseformattextgrammar.md | 12 +- ...tpython.md => responseformattextpython.md} | 8 +- docs/models/responseserrorfield.md | 8 +- docs/models/responsesformatjsonobject.md | 19 - docs/models/route.md | 17 - docs/models/schema0.md | 27 - docs/models/schema10.md | 19 - docs/models/schema14.md | 23 - docs/models/schema15.md | 18 - docs/models/schema15union.md | 17 - docs/models/schema17.md | 45 - docs/models/schema17autorouter.md | 19 - docs/models/schema17fileparser.md | 19 - docs/models/schema17responsehealing.md | 18 - docs/models/schema17web.md | 21 - docs/models/schema19.md | 31 - docs/models/schema19reasoningencrypted.md | 22 - docs/models/schema19reasoningsummary.md | 22 - docs/models/schema19reasoningtext.md | 22 - docs/models/schema3.md | 17 - docs/models/schema5enum.md | 17 - docs/models/schema8.md | 17 - docs/models/stop.md | 15 +- docs/models/systemmessage.md | 14 +- docs/models/systemmessagecontent.md | 4 +- docs/models/toolchoiceoption.md | 36 + docs/models/toolchoiceoptionauto.md | 15 + docs/models/toolchoiceoptionnone.md | 15 + docs/models/toolchoiceoptionrequired.md | 15 + docs/models/tooldefinitionjson.md | 13 +- docs/models/tooldefinitionjsonfunction.md | 16 +- docs/models/tooldefinitionjsontype.md | 15 + docs/models/toolresponsemessage.md | 16 +- docs/models/toolresponsemessagecontent.md | 11 +- docs/models/usermessage.md | 14 +- docs/models/usermessagecontent.md | 11 +- docs/models/videoinput.md | 19 + docs/models/videourl1.md | 17 - docs/models/videourl2.md | 17 - docs/sdks/chat/README.md | 45 +- examples/nextjs-example/package.json | 4 +- jsr.json | 2 +- package.json | 2 +- src/funcs/chatSend.ts | 119 +- src/lib/config.ts | 4 +- src/lib/security.ts | 2 +- src/models/assistantmessage.ts | 205 +- src/models/assistantmessageimages.ts | 105 + src/models/chaterror.ts | 53 - src/models/chatgenerationparams.ts | 1137 ++++- src/models/chatgenerationtokenusage.ts | 48 + src/models/chatmessagecontentitem.ts | 80 +- src/models/chatmessagecontentitemaudio.ts | 34 +- .../chatmessagecontentitemcachecontrol.ts | 29 +- src/models/chatmessagecontentitemimage.ts | 41 +- src/models/chatmessagecontentitemtext.ts | 31 +- src/models/chatmessagecontentitemvideo.ts | 206 +- .../chatmessagecontentitemvideolegacy.ts | 79 + src/models/chatmessagetokenlogprob.ts | 15 + src/models/chatmessagetokenlogprobs.ts | 9 + src/models/chatmessagetoolcall.ts | 37 +- src/models/chatresponse.ts | 36 +- src/models/chatresponsechoice.ts | 20 +- src/models/chatstreamingchoice.ts | 20 +- src/models/chatstreamingmessagechunk.ts | 36 +- src/models/chatstreamingmessagetoolcall.ts | 47 +- src/models/chatstreamingresponsechunk.ts | 106 +- src/models/chatstreamoptions.ts | 8 + src/models/debugoptions.ts | 38 + src/models/developermessage.ts | 12 + src/models/errors/chaterror.ts | 47 - src/models/errors/index.ts | 1 - src/models/index.ts | 20 +- src/models/jsonschemaconfig.ts | 21 +- src/models/message.ts | 11 +- src/models/modelscountresponse.ts | 19 +- src/models/namedtoolchoice.ts | 23 +- src/models/openairesponsesreasoningconfig.ts | 4 +- .../openresponsesnonstreamingresponse.ts | 14 +- src/models/openresponsesreasoningconfig.ts | 6 +- src/models/openresponsesrequest.ts | 347 +- src/models/operations/createauthkeyscode.ts | 31 + src/models/operations/getgeneration.ts | 1 + .../operations/sendchatcompletionrequest.ts | 48 +- src/models/providername.ts | 1 + src/models/providerpreferences.ts | 113 +- src/models/providersort.ts | 6 + src/models/providersortconfig.ts | 37 +- src/models/providersortunion.ts | 31 - src/models/reasoningdetailencrypted.ts | 94 + src/models/reasoningdetailsummary.ts | 94 + src/models/reasoningdetailtext.ts | 98 + src/models/reasoningdetailunion.ts | 77 + src/models/responseformatjsonobject.ts | 53 + src/models/responseformatjsonschema.ts | 6 + src/models/responseformattext.ts | 34 + src/models/responseformattextconfig.ts | 18 +- src/models/responseformattextgrammar.ts | 6 + src/models/responseformattextpython.ts | 34 + src/models/responseserrorfield.ts | 12 +- src/models/responsesformatjsonobject.ts | 53 - src/models/schema10.ts | 70 - src/models/schema14.ts | 19 - src/models/schema17.ts | 234 - src/models/schema19.ts | 241 - src/models/schema5.ts | 101 - src/models/systemmessage.ts | 12 + src/models/toolchoiceoption.ts | 79 + src/models/tooldefinitionjson.ts | 60 +- src/models/toolresponsemessage.ts | 12 + src/models/usermessage.ts | 12 + src/models/videoinput.ts | 50 + src/sdk/chat.ts | 2 +- src/sdk/sdk.ts | 10 +- 260 files changed, 10260 insertions(+), 7502 deletions(-) create mode 100644 docs/models/assistantmessageimages.md create mode 100644 docs/models/assistantmessageimagesimageurl.md delete mode 100644 docs/models/assistantmessageimageurl.md create mode 100644 docs/models/assistantmessagerole.md rename docs/models/{sortenum.md => by.md} (62%) delete mode 100644 docs/models/chaterrorerror.md create mode 100644 docs/models/chatgenerationparamsby.md create mode 100644 docs/models/chatgenerationparamsignore.md create mode 100644 docs/models/chatgenerationparamsmaxprice.md rename docs/models/{schema5.md => chatgenerationparamsonly.md} (52%) create mode 100644 docs/models/chatgenerationparamsorder.md create mode 100644 docs/models/chatgenerationparamspartition.md rename docs/models/{pluginautorouter.md => chatgenerationparamspluginautorouter.md} (96%) rename docs/models/{pluginfileparser.md => chatgenerationparamspluginfileparser.md} (91%) rename docs/models/{schema17moderation.md => chatgenerationparamspluginmoderation.md} (66%) rename docs/models/{pluginresponsehealing.md => chatgenerationparamspluginresponsehealing.md} (89%) create mode 100644 docs/models/chatgenerationparamspluginunion.md rename docs/models/{pluginweb.md => chatgenerationparamspluginweb.md} (94%) create mode 100644 docs/models/chatgenerationparamsprovider.md create mode 100644 docs/models/chatgenerationparamsprovidersort.md create mode 100644 docs/models/chatgenerationparamsprovidersortconfig.md create mode 100644 docs/models/chatgenerationparamsprovidersortconfigenum.md create mode 100644 docs/models/chatgenerationparamsprovidersortconfigunion.md create mode 100644 docs/models/chatgenerationparamssortenum.md create mode 100644 docs/models/chatgenerationparamssortunion.md create mode 100644 docs/models/chatgenerationparamstrace.md create mode 100644 docs/models/chatmessagecontentitem1.md create mode 100644 docs/models/chatmessagecontentitemaudiotype.md create mode 100644 docs/models/chatmessagecontentitemcachecontroltype.md create mode 100644 docs/models/chatmessagecontentitemimagetype.md create mode 100644 docs/models/chatmessagecontentitemtexttype.md delete mode 100644 docs/models/chatmessagecontentitemvideoinputvideo.md create mode 100644 docs/models/chatmessagecontentitemvideolegacy.md delete mode 100644 docs/models/chatmessagecontentitemvideovideourl.md create mode 100644 docs/models/chatmessagetoolcalltype.md create mode 100644 docs/models/chatresponseobject.md create mode 100644 docs/models/chatstreamingmessagetoolcalltype.md delete mode 100644 docs/models/chatstreamingresponsechunkdata.md delete mode 100644 docs/models/chatstreamingresponsechunkerror.md create mode 100644 docs/models/chatstreamingresponsechunkobject.md delete mode 100644 docs/models/codeenum.md delete mode 100644 docs/models/completion.md rename docs/models/{modelscountresponsedata.md => data.md} (82%) delete mode 100644 docs/models/debug.md create mode 100644 docs/models/debugoptions.md delete mode 100644 docs/models/engine.md delete mode 100644 docs/models/errors/chaterror.md create mode 100644 docs/models/errort.md delete mode 100644 docs/models/image.md create mode 100644 docs/models/namedtoolchoicetype.md delete mode 100644 docs/models/objectt.md create mode 100644 docs/models/openresponsesnonstreamingresponseobject.md create mode 100644 docs/models/openresponsesrequestpluginautorouter.md create mode 100644 docs/models/openresponsesrequestpluginfileparser.md rename docs/models/{pluginmoderation.md => openresponsesrequestpluginmoderation.md} (66%) create mode 100644 docs/models/openresponsesrequestpluginresponsehealing.md create mode 100644 docs/models/openresponsesrequestpluginunion.md create mode 100644 docs/models/openresponsesrequestpluginweb.md rename docs/models/{provider.md => openresponsesrequestprovider.md} (99%) create mode 100644 docs/models/openresponsesrequesttrace.md create mode 100644 docs/models/operations/sendchatcompletionrequestresponsebody.md create mode 100644 docs/models/operations/usagelimittype.md delete mode 100644 docs/models/pdf.md delete mode 100644 docs/models/pdfengine.md delete mode 100644 docs/models/plugin.md delete mode 100644 docs/models/prompt.md create mode 100644 docs/models/providerpreferencesby.md create mode 100644 docs/models/providerpreferencesprovidersortconfigenum.md rename docs/models/{providersortconfigunion.md => providerpreferencesprovidersortconfigunion.md} (50%) create mode 100644 docs/models/providerpreferencessortenum.md delete mode 100644 docs/models/providersortconfigenum.md delete mode 100644 docs/models/providersortunion.md create mode 100644 docs/models/reasoningdetailencrypted.md create mode 100644 docs/models/reasoningdetailencryptedformat.md create mode 100644 docs/models/reasoningdetailsummary.md rename docs/models/{schema21.md => reasoningdetailsummaryformat.md} (67%) create mode 100644 docs/models/reasoningdetailtext.md create mode 100644 docs/models/reasoningdetailtextformat.md create mode 100644 docs/models/reasoningdetailunion.md rename docs/models/{responseformatpython.md => responseformattextpython.md} (66%) delete mode 100644 docs/models/responsesformatjsonobject.md delete mode 100644 docs/models/route.md delete mode 100644 docs/models/schema0.md delete mode 100644 docs/models/schema10.md delete mode 100644 docs/models/schema14.md delete mode 100644 docs/models/schema15.md delete mode 100644 docs/models/schema15union.md delete mode 100644 docs/models/schema17.md delete mode 100644 docs/models/schema17autorouter.md delete mode 100644 docs/models/schema17fileparser.md delete mode 100644 docs/models/schema17responsehealing.md delete mode 100644 docs/models/schema17web.md delete mode 100644 docs/models/schema19.md delete mode 100644 docs/models/schema19reasoningencrypted.md delete mode 100644 docs/models/schema19reasoningsummary.md delete mode 100644 docs/models/schema19reasoningtext.md delete mode 100644 docs/models/schema3.md delete mode 100644 docs/models/schema5enum.md delete mode 100644 docs/models/schema8.md create mode 100644 docs/models/toolchoiceoption.md create mode 100644 docs/models/toolchoiceoptionauto.md create mode 100644 docs/models/toolchoiceoptionnone.md create mode 100644 docs/models/toolchoiceoptionrequired.md create mode 100644 docs/models/tooldefinitionjsontype.md create mode 100644 docs/models/videoinput.md delete mode 100644 docs/models/videourl1.md delete mode 100644 docs/models/videourl2.md create mode 100644 src/models/assistantmessageimages.ts delete mode 100644 src/models/chaterror.ts create mode 100644 src/models/chatmessagecontentitemvideolegacy.ts create mode 100644 src/models/debugoptions.ts delete mode 100644 src/models/errors/chaterror.ts delete mode 100644 src/models/providersortunion.ts create mode 100644 src/models/reasoningdetailencrypted.ts create mode 100644 src/models/reasoningdetailsummary.ts create mode 100644 src/models/reasoningdetailtext.ts create mode 100644 src/models/reasoningdetailunion.ts create mode 100644 src/models/responseformatjsonobject.ts create mode 100644 src/models/responseformattext.ts create mode 100644 src/models/responseformattextpython.ts delete mode 100644 src/models/responsesformatjsonobject.ts delete mode 100644 src/models/schema10.ts delete mode 100644 src/models/schema14.ts delete mode 100644 src/models/schema17.ts delete mode 100644 src/models/schema19.ts delete mode 100644 src/models/schema5.ts create mode 100644 src/models/toolchoiceoption.ts create mode 100644 src/models/videoinput.ts diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6d78967e..810f7136 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,23 +1,24 @@ lockVersion: 2.0.0 id: 8b6cd71c-ea04-44da-af45-e43968b5928d management: - docChecksum: 0dda1c9b912f3b0986c559a9ae99ea1e + docChecksum: 07d09d7593f4a95024bb4b3dfef853df docVersion: 1.0.0 speakeasyVersion: 1.680.0 generationVersion: 2.788.4 - releaseVersion: 0.8.0 - configChecksum: 51650e178782499be5772cddc3911720 + releaseVersion: 0.9.4 + configChecksum: e7390359b394f24b61a13300c873d8f2 repoURL: https://github.com/OpenRouterTeam/typescript-sdk.git installationURL: https://github.com/OpenRouterTeam/typescript-sdk published: true persistentEdits: - generation_id: a2c7ea46-9d3a-4bbd-8d6c-5e16d0c2ec64 - pristine_commit_hash: 451c1e75861ae1ad385622e9a902052fbb5ec0ba - pristine_tree_hash: bd7d7f5dedbcdc8555d1bec59229aea147b56cc6 + generation_id: fe25cfdb-a760-4b37-b279-7dcb42e58928 + pristine_commit_hash: 02d4c4ba617f9c035e806de79aa7c1042447927e + pristine_tree_hash: 7fb4ab57eb4142c6e15752fb814eff4d98f71a1e features: typescript: acceptHeaders: 2.81.2 additionalDependencies: 0.1.0 + additionalProperties: 0.1.2 constsAndDefaults: 0.1.12 core: 3.26.16 customCodeRegions: 0.1.0 @@ -88,16 +89,24 @@ trackedFiles: pristine_git_object: 342542484892f40ebac8a4113f6016114e16f3ee docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:52f189d2543df2212359cf830829f03afe2fbd66 - pristine_git_object: 76711bea4c838e5c7dbeb8621a0623143f30decc + last_write_checksum: sha1:79480b70e560f69bbf160f5d73854f14b9de1a80 + pristine_git_object: fefb42063808dd14e8937a53685b13de12903f27 docs/models/assistantmessagecontent.md: id: 9f1795bbe642 - last_write_checksum: sha1:dd290dd2754fd452f1ed8c026a00e310bfd7bd03 - pristine_git_object: 30a5052e885de59078f12eb3138aaf1a84e5489f - docs/models/assistantmessageimageurl.md: - id: 89b928e0a518 - last_write_checksum: sha1:68c6814ef6cbdb154d8273a74ef4eb24b6500de8 - pristine_git_object: 9d5905789df53eb9c10546de59f7c7fbea379869 + last_write_checksum: sha1:4239958c4e27af3c7e5007f1215f811a962e36dd + pristine_git_object: b3a4c9eb2a1ae59e7673d88563484b920068d41d + docs/models/assistantmessageimages.md: + id: 8fc46dca9711 + last_write_checksum: sha1:5a0ef7f22809ab84b8184aa31f038997db3a4272 + pristine_git_object: 3d865524956603f9c1157c04acb8b41aa8ba732d + docs/models/assistantmessageimagesimageurl.md: + id: eb84e2891721 + last_write_checksum: sha1:c62dd4d7f8522068bfae7d70b343b936b22c4cab + pristine_git_object: a58509e95acb0426c9a3a72bbfa7d0e659140958 + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:3848d540d6b9b85f6ea05e1698dd7b661eac93fc + pristine_git_object: 7f02f827cf3e2368e90b4bba7aafa3719dccf4c2 docs/models/badgatewayresponseerrordata.md: id: 15451fc8cf63 last_write_checksum: sha1:7d1bd0317b5d98eaf83c5d46b9feca3f7f0b0d59 @@ -106,154 +115,250 @@ trackedFiles: id: fe209eb1ee34 last_write_checksum: sha1:c2101780fede667a7a2e49ba9955792fb0df34f9 pristine_git_object: 84693b01441744a1f1ff1f663d03bed57d5d3c49 + docs/models/by.md: + id: 197cbd9579c6 + last_write_checksum: sha1:f76a32fe44b8bad9178da6f7c7d60cc55804b97d + pristine_git_object: 8e95225b33f27c5c4b0f2cb8f80bcb41c1f0501b docs/models/chainid.md: id: 9136f718f4f8 last_write_checksum: sha1:795deccf3a2947bd47f6ae9efbfbbebeea6eb01c pristine_git_object: d7ceb6611e9cdb38186a504fa4f6b01226de1822 docs/models/chatcompletionfinishreason.md: id: 586041c9ec02 - last_write_checksum: sha1:f686d21331cb8006536ba853651187f9d4c98d20 - pristine_git_object: b130130896f42075686407ac0c98e815eac87864 - docs/models/chaterrorerror.md: - id: 7af074899bbb - last_write_checksum: sha1:e6bee676c6bf4a56a5293714809698abca7e81b9 - pristine_git_object: f0436c70d31701f55c4b4668c93c59ac65f8be37 + last_write_checksum: sha1:44e626b5da7ca77d5771fb6a5558931265ba8580 + pristine_git_object: 8c39f973adcc92ce38497cd05bc2b7e4d246eccc docs/models/chatgenerationparams.md: id: bb3446bdc077 - last_write_checksum: sha1:039f4493f147bb0764a05a09002ca4c89df9d49b - pristine_git_object: c5802561a7e21f233133c8a021f68b3ec9a8dc9c + last_write_checksum: sha1:fe3ffbbdf176de4168f9486299f6b8e2c2736ef2 + pristine_git_object: c79297696743c4581780e9bafe5bca0309985dde + docs/models/chatgenerationparamsby.md: + id: e6ee58ce066c + last_write_checksum: sha1:bde6c752c109deb4c3b4e1e620e659cfc076ddde + pristine_git_object: 775431756fc354007b0152cfbbf83fd97cef7198 + docs/models/chatgenerationparamsignore.md: + id: 8e90bca6b12a + last_write_checksum: sha1:3e46d99acc0ff9d7de8d7bba2824d6b2b8e3a6eb + pristine_git_object: beb05ef9356259c27979473b273016cd57e0bb86 docs/models/chatgenerationparamsimageconfig.md: id: 84ed14ec0766 last_write_checksum: sha1:d1a564dc909fc6a87a45bc37e3fb2cae1ee29021 pristine_git_object: e4521110960a1d15788210ee7f2f61cbbe935367 + docs/models/chatgenerationparamsmaxprice.md: + id: 6cc065ac30f3 + last_write_checksum: sha1:50797ea15a3ae2921b83e1d78d142702f2c15dc4 + pristine_git_object: e35aa9d978dedfe05bdca6d5f5f2c767b4d06156 + docs/models/chatgenerationparamsonly.md: + id: 59b08927e61e + last_write_checksum: sha1:66e7909b71ade0cd255cf2e1d6e4e86087434367 + pristine_git_object: 10fe5deff70fbff9801bc31cf6bd346cc6e8c98f + docs/models/chatgenerationparamsorder.md: + id: 6b82837a7273 + last_write_checksum: sha1:01b17a6e61686302a0f88214a1857462d790ec20 + pristine_git_object: b01d92ed5dc20fee2ed1c3762c52717527df89c6 + docs/models/chatgenerationparamspartition.md: + id: 189358db8f67 + last_write_checksum: sha1:e8281ada2943226e0b665996278f6fe483559469 + pristine_git_object: 0245cc5c21877ac2b301b85ca2eaa832c34432c6 + docs/models/chatgenerationparamspluginautorouter.md: + id: 83e3a7365cc3 + last_write_checksum: sha1:41ac18ab91d2d547ec428f72c93cb406a0a1c1fa + pristine_git_object: bc272191873753c1d3369a4f3ef41306c0c8a544 + docs/models/chatgenerationparamspluginfileparser.md: + id: 600ec63a57f7 + last_write_checksum: sha1:09829a43e23c51c19250f38550ec447ee4f4b2c0 + pristine_git_object: 4ac46f9cf29a94e25feae88f004392ebc2aa088f + docs/models/chatgenerationparamspluginmoderation.md: + id: fab1a3eee253 + last_write_checksum: sha1:93577cebe7bcc90ebbef7128c92cc08dba2d8579 + pristine_git_object: 651179b69cd47a08f54123075543b982f20cc8c8 + docs/models/chatgenerationparamspluginresponsehealing.md: + id: 0b7e09c7d969 + last_write_checksum: sha1:9df862a3fadf46c1f5835e7c91a253ba5f2c700c + pristine_git_object: 7688b175bea6007ec0343537c984458cdce814d1 + docs/models/chatgenerationparamspluginunion.md: + id: e0e521e5432e + last_write_checksum: sha1:68ceb4389dc4d58eeaaaff1bc7ca77aabdc1e294 + pristine_git_object: fcecb0aba42cabb6aa43d1f251397e3f77309dbb + docs/models/chatgenerationparamspluginweb.md: + id: d0d73d469fd5 + last_write_checksum: sha1:1d9b501d38f404c0a4d2bc71f3fbd6af9cf9fe66 + pristine_git_object: cd592c9ec6e98fba491172160b84a7f8cfc722f1 + docs/models/chatgenerationparamsprovider.md: + id: 53100e96f9b3 + last_write_checksum: sha1:b74cfbb32881d7d8daf477016b1ac3aa9177d74f + pristine_git_object: 5e19af77045dd4b6e4c37d26b9737fd578d5bbf7 + docs/models/chatgenerationparamsprovidersort.md: + id: 4a9d5832e067 + last_write_checksum: sha1:e59f556279035757372c8ce5f901ca589dbf7bd0 + pristine_git_object: 49d01bd7be211abde1e1c0983d03465ef1bef861 + docs/models/chatgenerationparamsprovidersortconfig.md: + id: b3cf60c0750d + last_write_checksum: sha1:317bf9fbe1722530e4296fe2f370474ba6560c34 + pristine_git_object: 6fabc8e68ccebf055c9f4b129081c4f273775169 + docs/models/chatgenerationparamsprovidersortconfigenum.md: + id: fe34c7066332 + last_write_checksum: sha1:cd0a294bcf8620af28b9d240da90e19685cfaa06 + pristine_git_object: a194eedc46e79a127ef3127f2a582241d4925554 + docs/models/chatgenerationparamsprovidersortconfigunion.md: + id: 7ed8535c0167 + last_write_checksum: sha1:d6857aa4d6a3db20b6f49bd1ff11961e95f2e623 + pristine_git_object: 9c9e2bdbd1d0a44d411857ac40efc3e55f8de167 + docs/models/chatgenerationparamssortenum.md: + id: 90d490370327 + last_write_checksum: sha1:c76c151e30c91e1ac9871951ca3be450857894f1 + pristine_git_object: 586baae6e7f15c34aa2a8fd6aa056c624f38ff19 + docs/models/chatgenerationparamssortunion.md: + id: 9fad910ac1d9 + last_write_checksum: sha1:013faf67ad4629b78434266a2eba1be5da699814 + pristine_git_object: bc8cf7937760f55a8594a210d2a191541781cf91 + docs/models/chatgenerationparamstrace.md: + id: ba491d2073bb + last_write_checksum: sha1:7f9af19cadeb77c2d319157de92ee9c4d2fcb14c + pristine_git_object: 62191c84b075653224bc35315f1f028c7f8b7f25 docs/models/chatgenerationtokenusage.md: id: e19063b1bf4d - last_write_checksum: sha1:7bba63fadbb3db3536fa07a176c45665b85bb2ad - pristine_git_object: e5ba65824e38314d1a8ba1e49b52a7e8d78f12de + last_write_checksum: sha1:972fcae6f5f46ac450ddaf10cdf098ef17e156ff + pristine_git_object: 8b5bd765193cd052ba5798dda01da7fc0bb7127c docs/models/chatmessagecontentitem.md: id: 4e59fab9efd9 - last_write_checksum: sha1:55a1273c472b0e861293d75f6b40a63295a36fc6 - pristine_git_object: c66c9f77ced62621d5c199a25127fda38134fb91 + last_write_checksum: sha1:fc44afbda59b46caf1f201c563c7f6b6ffd36510 + pristine_git_object: 60b3ec3fe06be1f7d72863188439b49572d031b8 + docs/models/chatmessagecontentitem1.md: + id: f2881fb1f290 + last_write_checksum: sha1:d9a46a8c31f9fa362f6b41fef52882c58ad8e1c6 + pristine_git_object: f975ea8ebefea3ee2b28d136875467cb93ffbdf2 docs/models/chatmessagecontentitemaudio.md: id: b939043fc5e5 - last_write_checksum: sha1:88a58907d467a44abad2901d31a1476ad77467b6 - pristine_git_object: 5553e7f36fb0043f79f1d1ae60423483b34cbf11 + last_write_checksum: sha1:dae6a99beebf2f1ad434602216b7a007138bcf1c + pristine_git_object: 8952ff6e239b2a61213aaaaa4f0c78219139d098 docs/models/chatmessagecontentitemaudioinputaudio.md: id: 17c7abcf6cf3 - last_write_checksum: sha1:01651cda7ab1fc3f792b39a202a45308278972f7 - pristine_git_object: 6bc501cdad419bd391621092c775028572eb4ddb + last_write_checksum: sha1:07394bc05c505b0154ee72d0862157ed2745adfa + pristine_git_object: 67f13de3d7f9de49a61eebefb0d5f05042ef5452 + docs/models/chatmessagecontentitemaudiotype.md: + id: 797b9f501f55 + last_write_checksum: sha1:2f8248cb01d4aef1fa579b6043f8ea45ae38280c + pristine_git_object: 8b322e992781ec9ac3cae0179093cfe960cd6dd8 docs/models/chatmessagecontentitemcachecontrol.md: id: c39918266a37 - last_write_checksum: sha1:3bf16757551bf5d8e6bd7b0494aad8f802292e6f - pristine_git_object: 1ed1fa42ecf4d8eae94409b6b9854bbb6550abdc + last_write_checksum: sha1:5bc0a8e326f09695cd18a78ca363aee509d7be64 + pristine_git_object: 7af306bc63bdc12425deb60b4ce291020d5c49e6 + docs/models/chatmessagecontentitemcachecontroltype.md: + id: 284323c24fb7 + last_write_checksum: sha1:1ada70dcd1b95d01eae4a15eb15a7f824f607221 + pristine_git_object: 9fa295c5718f4e3ebd74790fda55e362253bd05e docs/models/chatmessagecontentitemimage.md: id: 1667af650eed - last_write_checksum: sha1:57bb5db3f39e53cdfd5fd9d1f1fb58e0e30c1847 - pristine_git_object: 662a771cd4a8a01da0cdbf498d9c9e378148e448 + last_write_checksum: sha1:fbfb3b0141e9691f1b46d72c0363d7a0d33b1fbc + pristine_git_object: 7e24ee550d87e6cfa9213f75436f5fc1f488b674 docs/models/chatmessagecontentitemimagedetail.md: id: 272727de90f6 - last_write_checksum: sha1:326bb87cf34234ec6cff3b11315fcccf1989480c - pristine_git_object: 20db7846d9843b673c46cd0818b981c7c84d6920 + last_write_checksum: sha1:cc6540d43a12a1c54c450a5cbc95c426b6810773 + pristine_git_object: 109e3c156c6fdeda7b89314790e32ba06b98c8e5 docs/models/chatmessagecontentitemimageimageurl.md: id: c964e80bffd8 - last_write_checksum: sha1:5314ed41308fb1c30878ed11adbfbcc1d5a5aa99 - pristine_git_object: a3891c144a1b56bae45dd6b2f88d6a0197b24692 + last_write_checksum: sha1:fa31f3bd34ef12b3b2adcfe9f8a34d677d50167b + pristine_git_object: 697ca9f400ce79a2ba13c35340d1c35532773505 + docs/models/chatmessagecontentitemimagetype.md: + id: e16e87bad566 + last_write_checksum: sha1:b477e049e1e0e00fd8beb914de9a53b14928e728 + pristine_git_object: 3c1a6e9f6888020c6bc60570723b61484e286743 docs/models/chatmessagecontentitemtext.md: id: bf54b5b4ee77 - last_write_checksum: sha1:eeeba8f13a0abf0c29c7cdcb7aba2a6f1f62e04f - pristine_git_object: 1aa324ddfcb969d93d0a7607e772142b41e76de8 + last_write_checksum: sha1:47e32c9d2d33d9185cc2eadfc1d802c819270bbe + pristine_git_object: c401763a9938e418903f9856df56263cce2b3d11 + docs/models/chatmessagecontentitemtexttype.md: + id: be5adbc9c6ea + last_write_checksum: sha1:aac75f8bd735f6e39a1b2c5ec84068a790d21a49 + pristine_git_object: 78d047036ee17fd89c643a11aada753527b2102f docs/models/chatmessagecontentitemvideo.md: id: 7db278ad338b - last_write_checksum: sha1:630d589ed8db06315ca196034192eeb59a884d44 - pristine_git_object: 8bcb8beb35f2bc1fceddbc73f30df485e61f0381 - docs/models/chatmessagecontentitemvideoinputvideo.md: - id: 0d7fa71f4d7e - last_write_checksum: sha1:6dbef776235b8eb05816af357771950b00703f5e - pristine_git_object: 44fd37392a91ea3e9dcd3a09bfb7561ea9169674 - docs/models/chatmessagecontentitemvideovideourl.md: - id: 314ae7edc2d5 - last_write_checksum: sha1:9661e73a322af81c3f5c4bcd87785f2ae3d3440d - pristine_git_object: 6853983e06def8a1a1928eaea953469e502981bb + last_write_checksum: sha1:6fe89749ac3274f02b82dd017a8d6d873f8b235c + pristine_git_object: 75c62e557a5c832513e925431b8a40347fa475d0 + docs/models/chatmessagecontentitemvideolegacy.md: + id: 8f7d0d9c94b0 + last_write_checksum: sha1:9f6fb87cbf472babe0a2499edb44103df64d9590 + pristine_git_object: 8b6d626677d43ed9567429b2103088b789c2150f docs/models/chatmessagetokenlogprob.md: id: d4b24af16d32 - last_write_checksum: sha1:449d0392438ae9002fbe114bb70c6c17ff6972b5 - pristine_git_object: 886381e6209c6293d4c4d70633b6f4b0d1507a91 + last_write_checksum: sha1:5da4b36e30bcf22d2988b79aeca689686e32096d + pristine_git_object: 9062deeab754e8e7ea12dfb5ed1eb5abb84f7fa7 docs/models/chatmessagetokenlogprobs.md: id: e987211f8936 - last_write_checksum: sha1:00220d98932f5ae1e4c24d4e9e47344175e8d68c - pristine_git_object: 01cd792f73eb9609c3b65281cae43858f90e2945 + last_write_checksum: sha1:44c3ae9877d0fd848fc7c6fbc407e39a6686bf93 + pristine_git_object: e125f27d0ec978467f87c157f80f08e349dfc4a7 docs/models/chatmessagetokenlogprobtoplogprob.md: id: e0875b3ca7c0 last_write_checksum: sha1:687f38dfac49d7f7487cb96c33f6a05e96db665e pristine_git_object: 766a215302c909def13c477abe085b43e444caed docs/models/chatmessagetoolcall.md: id: 8f50e7f37c2e - last_write_checksum: sha1:f1dea21b85173ba18d87fd0bde883313d3cf964a - pristine_git_object: a56e6bc8795597ac8a43a0dedcdec90b52e6744c + last_write_checksum: sha1:6f7db5afb620e1ce93e804356d3a6bd7ba8febd1 + pristine_git_object: 98c9ec0fc8bb693a91c3a7b4f989b1a6f89983e1 docs/models/chatmessagetoolcallfunction.md: id: b47a9728d421 - last_write_checksum: sha1:ac7582828480df3ca53af5ed4d1ec7f079bbf5db - pristine_git_object: 124477b6f65e09a16bf26ed09c72dda1a41aad35 + last_write_checksum: sha1:9942bbf2f0217d9b6bd9d8db72273adbd2cf4bb7 + pristine_git_object: 02aada4f1f7bcf441f03d803a2118125e724b826 + docs/models/chatmessagetoolcalltype.md: + id: 139cbb37fa01 + last_write_checksum: sha1:2d6aa4e9511d32a38721157b564f82d930da8d9d + pristine_git_object: 527e75da63ec5798c87246bfb7888a0ff5d78c1a docs/models/chatresponse.md: id: f59beb8f6c98 - last_write_checksum: sha1:dd0cd493235d52b2d12709bb31d9f3022a828bfe - pristine_git_object: 2cab9bbcc7c800c6ea33f8310009f34dbffa3abc + last_write_checksum: sha1:718447999bb52bbe93236d5540bdd3f2f4baa942 + pristine_git_object: 89d69ec1f1fd49bf0bf0b38eadfbc13e0a06199a docs/models/chatresponsechoice.md: id: 7248e25b3cf8 - last_write_checksum: sha1:5f9fb35a59a0551011befe8c2260a5be6d5c9bcb - pristine_git_object: 58451f44fcb9fc051be685c8bdfde611b37cd0c1 + last_write_checksum: sha1:670b4402e0f31c29be3dafa681d5e8d9689f391a + pristine_git_object: bccdba823d903b91b0484dea6b4498301eebda1f + docs/models/chatresponseobject.md: + id: 51730f04be6c + last_write_checksum: sha1:8b79e90052d8a16ea7409f157526b04dec53e0a0 + pristine_git_object: cefb86e3479b68f9af3274ffe740ce612be00d71 docs/models/chatstreamingchoice.md: id: 0a39074d9acc - last_write_checksum: sha1:ff3df385fb1fd9bae1b7060aabcf6c07148eda69 - pristine_git_object: 8751bd5a0caaac21f3726acb067e08acd1432776 + last_write_checksum: sha1:054054209003c5288a129f7ad6899f5b8b702b78 + pristine_git_object: c915f1ae7119ed5685c13000212253e03b6ecdb3 docs/models/chatstreamingmessagechunk.md: id: f904722b1ed6 - last_write_checksum: sha1:70b26ba6365d38c7781af427355981908ef5a87b - pristine_git_object: 09d63c5b45f59d48540d38e532a980d7ad479511 + last_write_checksum: sha1:d5d8d2cb8039d976305286e8bd9ee7ef5bd3cc56 + pristine_git_object: 7cb663d9c5f93a7f1f366f2040a14cf58ce2dbe2 docs/models/chatstreamingmessagechunkrole.md: id: 570be00c9576 - last_write_checksum: sha1:43c1a234e149f4d104ea3403ff0eada9c25a04a9 - pristine_git_object: db525ff513c4a72209d0acceba815e494f4c60d3 + last_write_checksum: sha1:8459b6f6c158102abd1204d73ccb1b0eed4717e8 + pristine_git_object: 3c82f4ed3cdc978fd47c8d95654573a9ea140b41 docs/models/chatstreamingmessagetoolcall.md: id: fa1efad0ae99 - last_write_checksum: sha1:d1bd4cdd7c8321cb88b54ac259137234a199ffd1 - pristine_git_object: d090a9e694c5ab665b3f9472330c6f7d0311190c + last_write_checksum: sha1:59c88297baad74046886ebccd91e99bb266ff6d7 + pristine_git_object: 791022be2ac318e3d3b97aabb3b0c64a2c25aa9e docs/models/chatstreamingmessagetoolcallfunction.md: id: 39c48c3fc2e4 - last_write_checksum: sha1:53c8d278ab3c508129ba9b672eb3e4187bbae2e8 - pristine_git_object: 8c28ab9f3df3fd797552155edd4f495bd8d3922e + last_write_checksum: sha1:37c466ceabf1d939d2ed15d40a61d3d4508190ef + pristine_git_object: 253733e38e04d6ab7fd0ff888f5207298a83f1b8 + docs/models/chatstreamingmessagetoolcalltype.md: + id: f8ced7983b9a + last_write_checksum: sha1:cb96336bac7de3ae85d6347bcf2563ad9e803762 + pristine_git_object: 231785a38beef92c3501c989b1e6e36c8c575790 docs/models/chatstreamingresponsechunk.md: id: 3da86b48fe2b - last_write_checksum: sha1:42eecf920f2e68cea617a26c19a9de798f33aeaa - pristine_git_object: 21f49bbb8dd3ef67d288f6194c28f014b8f7648f - docs/models/chatstreamingresponsechunkdata.md: - id: 5cca13ea8417 - last_write_checksum: sha1:4b710cf7474cf50ddfaecf74d5952d88b8d44f4a - pristine_git_object: 65dbdc797df3df3ebe84455345c1b5130ccfe928 - docs/models/chatstreamingresponsechunkerror.md: - id: 69340a60017e - last_write_checksum: sha1:4115f45c77846ecd6eb1c7e2d84c89c9f851315d - pristine_git_object: fe862efc32f8ec834179530165895cac291dbe3e + last_write_checksum: sha1:e81c20186bf89a1d098b7b2ea1ec010211ee8ce7 + pristine_git_object: ac04d77f3e390978a49e51cd384c80d8e93e8e7a + docs/models/chatstreamingresponsechunkobject.md: + id: 5065667fa438 + last_write_checksum: sha1:a62488848bb648b3d8b8129c38facf98b17f2d81 + pristine_git_object: f812dd47a8b3a2ac10f91fc52d91c89a47f37c6f docs/models/chatstreamoptions.md: id: e423b323384a - last_write_checksum: sha1:b8d91753b213b125a02a9a014e969a4ca0e92e8e - pristine_git_object: 09880475ab74b01ce66ed7bc23cecc41c2d89445 + last_write_checksum: sha1:1da22c11da9de199208268493327242c36963aca + pristine_git_object: 0c3c5436e35955fc7e2d946c2a4b409a894c2063 docs/models/code.md: id: 2fcb3964c9c0 - last_write_checksum: sha1:9e619c2d9e0d8a2e3d4dfbcf90341369ebf2d278 - pristine_git_object: bf5f59b67a19259441eac5c2035c27a8f57b9494 - docs/models/codeenum.md: - id: 1a6fce72ce62 - last_write_checksum: sha1:51b8ebae2819c92bdb3591265f766c462fd404d2 - pristine_git_object: ad2c971be486a333de81497b42d708af52da0729 - docs/models/completion.md: - id: 2332ba16cc90 - last_write_checksum: sha1:97418f0f8a9002bf01865be25bb9e6f4bb31b352 - pristine_git_object: 2abe861d2bc516379c13976e37c90adad9aa91ca + last_write_checksum: sha1:510831346c635088accbbb7886c2132bb74f8c91 + pristine_git_object: 7a9e7e3c11a971f963598cb1a64db54551995064 docs/models/completiontokensdetails.md: id: 72beafa9a4c8 - last_write_checksum: sha1:9fdd32ee9ac0bfeff3a4757c7f570a1d3a7d5eda - pristine_git_object: 3c89a927cbc9c5b5a9c840678971e8ce9190165f + last_write_checksum: sha1:ab7fd2fb62512d53b92921c238fa94460eb4934d + pristine_git_object: f5a794e0d74f3ac046e339d3f12ac56e364b8c20 docs/models/costdetails.md: id: 35e3d2efb6b1 last_write_checksum: sha1:8e70b5ef0b5ff86b63dd487ea4cbabe9e4488c71 @@ -262,42 +367,42 @@ trackedFiles: id: 31dc6bdffc5e last_write_checksum: sha1:3a19c2cc0781ae2b694a7fb2adf84f88011c4ad6 pristine_git_object: 096fdbbc5d7d27e79223f32847a83553b83fb75b + docs/models/data.md: + id: 9a31987caf78 + last_write_checksum: sha1:4fa53da7aeae62c5764776d770c6673e4d9536d1 + pristine_git_object: fa243aa95737a1124bf591f364d18d49c40446c2 docs/models/datacollection.md: id: e9b57ccc9d48 last_write_checksum: sha1:445fff3f319baebba4dedc12725cd146cdc1bc19 pristine_git_object: 0fd355ae0d8d2d65c4463d5dad5aac537cb5828e - docs/models/debug.md: - id: 27952725ecc7 - last_write_checksum: sha1:a4e715351177389907d837da0388f7455bcfc17a - pristine_git_object: 5e67ef89456a08f1faaf2764359efb5fff9875be + docs/models/debugoptions.md: + id: ebab350e870a + last_write_checksum: sha1:a1f77a84d743b9b3f4135a178df9c9998b4224fb + pristine_git_object: a32e2f497a9e49c4171d83b6aa7c6ae80b386c25 docs/models/defaultparameters.md: id: 91207995478e last_write_checksum: sha1:d8d874b2c673e3c994fc31914592baeeba373602 pristine_git_object: 9a80e9d9bf49674f4c490852fb37761d32c90497 docs/models/developermessage.md: id: 2a6952605f1d - last_write_checksum: sha1:7a32552e4a16eb647659f8852003d18b5510f031 - pristine_git_object: 977250d1ebe9d3b79ee64f879c1f049c0abf9bff + last_write_checksum: sha1:abc5d7c2036ed534067a88e2ad200d2caed18c9f + pristine_git_object: b835eb71fd84a6e5d036fcc8d64690debf430aec docs/models/developermessagecontent.md: id: 835c3e263870 - last_write_checksum: sha1:8c6bd0b3c2a112e2cfcbb0e5ceac2863d6826e57 - pristine_git_object: ed6f7e592fa4c1bfa040472c9646aaba596a2358 + last_write_checksum: sha1:63240fc1c72933d19ab80833e48f1be3237a6d40 + pristine_git_object: 50dfe3f49dfcd664e6229b92e03f4ec97e459605 docs/models/edgenetworktimeoutresponseerrordata.md: id: fe26057fc099 last_write_checksum: sha1:f7704a9bacd44a03bccc6e2da0d9e3882cf5da16 pristine_git_object: 3e244ae203c0669d96602dc268001b601f40efe6 docs/models/effort.md: id: b0ec2947740c - last_write_checksum: sha1:a05b0140535a5f84ab10eac9f53633be25333291 - pristine_git_object: adcb3b7a0f8d47c6728f8834c97c27e0097e9333 + last_write_checksum: sha1:6dbaf5d262637b662cfe397ec89c39144cd5e572 + pristine_git_object: 64797c3eead583923c59d83e339cfa6289b0aee1 docs/models/endpointstatus.md: id: fcbb5455c001 last_write_checksum: sha1:e9daf88d4ef1b012a12b901aa68aeafd081b700b pristine_git_object: 9ecd2701ba2938052faa99415a3e52e19aac2ba7 - docs/models/engine.md: - id: 85f9124a575a - last_write_checksum: sha1:8c67190f45c1579e71730a5a589821f4900d865d - pristine_git_object: 0c80b2fb505b2805b808201ed32d2eae8873a375 docs/models/errors/badgatewayresponseerror.md: id: 0abbce1892eb last_write_checksum: sha1:eee3402a9874f0715c05cc9d7e05fb12f3abe21b @@ -306,10 +411,6 @@ trackedFiles: id: a4d74f0d0bb5 last_write_checksum: sha1:5af65d54698b0c919d4543605c48a51c87cf3c23 pristine_git_object: 344e10a91bc613a9692f15491788304a9c2d1b7d - docs/models/errors/chaterror.md: - id: ce5f3cdd0d7c - last_write_checksum: sha1:639c9dc6d9f515bb929cc29e87866b27ba406446 - pristine_git_object: 192cf484874dce73761024f89314c0a11a470411 docs/models/errors/edgenetworktimeoutresponseerror.md: id: c59578401a13 last_write_checksum: sha1:b3e22fe213270e8001d99fa00f55f42f61fedea4 @@ -358,6 +459,10 @@ trackedFiles: id: ba0b8363bf72 last_write_checksum: sha1:9130e36cddb031649f3959b40c774483e89439d7 pristine_git_object: 605d25deff53f7a3e3291a7f8973b8bae4ddf4ef + docs/models/errort.md: + id: 30c48d365f28 + last_write_checksum: sha1:bafe558ae11fb613584cd4ea64f473127fac5dbe + pristine_git_object: 60ef38cc99e56bc0c64ef5d48b45852af98f6328 docs/models/filecitation.md: id: 8bdcd41d649f last_write_checksum: sha1:26035a10a5ac9195ad1ad36980c6c2f0e9187976 @@ -370,10 +475,6 @@ trackedFiles: id: dcfaa46a8072 last_write_checksum: sha1:cbc76d78110922926f151f33e467c2b9821fb505 pristine_git_object: 35682871fd89355f201cd3a4fee14d6e86c39405 - docs/models/image.md: - id: 6f8629b533b3 - last_write_checksum: sha1:ea892b9c2a64f521cf80a4be3a54cf55498b1eac - pristine_git_object: 54823863c70d87ea6386213e81363864c1961cde docs/models/imagegenerationstatus.md: id: 3d5a75816868 last_write_checksum: sha1:0c95ad71f4207dfb87bf19129d84b06e4d3cc7e1 @@ -396,8 +497,8 @@ trackedFiles: pristine_git_object: 241d4b4d64511a03e3c4020ee846404471276f5a docs/models/jsonschemaconfig.md: id: c39d4e31386a - last_write_checksum: sha1:f8e7c5c55414212a6babc03ceb365af00fa1f51b - pristine_git_object: aa34788e2713691540b458a21142c537991c67ad + last_write_checksum: sha1:31d5cdc0b34801f4c457c9c90829f95bbe4add4c + pristine_git_object: 04d513044cf154a24378a05e4f66946d26e536f4 docs/models/listendpointsresponse.md: id: 538591337ba9 last_write_checksum: sha1:555c569b345924d6219465cca4593fd210b6a832 @@ -408,8 +509,8 @@ trackedFiles: pristine_git_object: 3ba44ac20d009c1d6249925ed2d328f615d4b882 docs/models/message.md: id: a9614076792b - last_write_checksum: sha1:e7c67c3d6a4ba6a56187dad75cb4ccb3ab176470 - pristine_git_object: 32d643638a2f336b596813b2ef3851bfcb89030c + last_write_checksum: sha1:6f2e98d851ee1b90a1316b6db411d5aa0a80afed + pristine_git_object: 351b6c0eaac29d79341b3b658fa8ee816ef78271 docs/models/modality.md: id: bf9d60290081 last_write_checksum: sha1:7aedfc6c59f0ecc8bb5ee91c0d0a14f4270d01cb @@ -432,32 +533,28 @@ trackedFiles: pristine_git_object: 1f32237967dce071639ba5d259d63a2250ec6f3e docs/models/modelscountresponse.md: id: 1b997e3738fa - last_write_checksum: sha1:c4468e4d1a850fbfa366fbc4ea9df58abd0de4cd - pristine_git_object: fb30c8523854571d327cfcefbaae398f74424d61 - docs/models/modelscountresponsedata.md: - id: cf2d39e28b1a - last_write_checksum: sha1:94c0b300aae7a54636a430a0e67518c7cf9ee93f - pristine_git_object: ac261a108261e5efc2a52208c45048444cc201c6 + last_write_checksum: sha1:77332e0a1d5fc3dcaa2ce64133e0bc97bfd62a10 + pristine_git_object: caf9b9a3836f505f002c7dd3779708165a5c2d7d docs/models/modelslistresponse.md: id: 62180dd98256 last_write_checksum: sha1:b647f2ce2c4b1f38914220821183e83e61df2a2d pristine_git_object: 30173e6226f406ac4599db10fb486904167e3805 docs/models/namedtoolchoice.md: id: a0833124ac01 - last_write_checksum: sha1:9ee7e0ad2711fd928ce8e60b4c187e0c6caba8de - pristine_git_object: a1fb110241bcf06fafb618670068d4559d86ee34 + last_write_checksum: sha1:347ba19079d1cd1bcb5e5f49fb13cb141dce6f20 + pristine_git_object: 44c711f45ac48d5e064bdd7c5eaa08546da7c08b docs/models/namedtoolchoicefunction.md: id: 5ddd8312d079 - last_write_checksum: sha1:1762f11785a8b1f7afd22708dd66c7032363b382 - pristine_git_object: bfa5de0d860dee21b285c50f8628d0e588c558b2 + last_write_checksum: sha1:927afe48b6774e576c83c08dcf6df7b1863ae409 + pristine_git_object: 46e429c0f07cfc5768ade573f970f8218a4ccc79 + docs/models/namedtoolchoicetype.md: + id: 21363922f4dd + last_write_checksum: sha1:bb2b88b41dc3a3cc2291f269358da0808033ff62 + pristine_git_object: 933269bdc278c78670b8a9d0ef9664261c546bf3 docs/models/notfoundresponseerrordata.md: id: 757633dd768c last_write_checksum: sha1:c11ecb97bcefe57923f48599d0eeaa6ed34d750a pristine_git_object: 4221a4ffa9583b10c4acd0c2f67d1d8372ba1f95 - docs/models/objectt.md: - id: 676fec8291de - last_write_checksum: sha1:71b669ee2989310dbf3c1fb075f8697f1f2bc1e0 - pristine_git_object: d614af0c015c2970708b15cfd9d7b90c98a86cb4 docs/models/openairesponsesannotation.md: id: e0fb0de332e3 last_write_checksum: sha1:b655ca2bfbf1d8444405715767a97dac35d44bcf @@ -564,8 +661,8 @@ trackedFiles: pristine_git_object: 810fe39685c8b8fc99865831f6f2ea16d95ce165 docs/models/openairesponsesreasoningconfig.md: id: 94b7db89a2fb - last_write_checksum: sha1:0fa760ce6523328b79ed2b68d103cc874f21d2df - pristine_git_object: d16880b5232d5ef54c8330e08a83eaca4c07a668 + last_write_checksum: sha1:0c27f5b3f0882f35b418aa24af5429e2cc4161fd + pristine_git_object: 1f1555f4c8722802c4cceea8387d79c4461245b1 docs/models/openairesponsesreasoningeffort.md: id: db682b74f402 last_write_checksum: sha1:b2743d2541acf3187f19b1dd11d623f38ba13571 @@ -752,8 +849,12 @@ trackedFiles: pristine_git_object: 378b69f703e65c59ff8b639ead80a32b3f7e4b4e docs/models/openresponsesnonstreamingresponse.md: id: d0ac728b4904 - last_write_checksum: sha1:cbdd12304978f23446743d381fc73584d5ff3769 - pristine_git_object: 3d8dd191ac763fc1ed70398711655ce18043ccb9 + last_write_checksum: sha1:1c37fae828ada0f763160afd9045d61fd7d72408 + pristine_git_object: 7a9ff6c578ddc49d7d944ced74ebe21f91914bb1 + docs/models/openresponsesnonstreamingresponseobject.md: + id: c11e5ad506a3 + last_write_checksum: sha1:3a7405d64aa7b5c9822fd317837a3e813e3e0ad8 + pristine_git_object: c92ea5fc88c98fda2c4982b8306256989121387c docs/models/openresponsesnonstreamingresponsetoolfunction.md: id: d4d0e54d3003 last_write_checksum: sha1:bb878cf3f28281a0a24c71a701385db8edbee373 @@ -768,8 +869,8 @@ trackedFiles: pristine_git_object: 927c73ccc3a5366c4d983d50e817985724f00d4d docs/models/openresponsesreasoningconfig.md: id: 75c3150a301f - last_write_checksum: sha1:a3dcd1718ca0792ef5bb65c17e48569250252900 - pristine_git_object: 04d610fe52b91c95646f861f9ae65fa4f0301a13 + last_write_checksum: sha1:93c125e16de98cd78739f7c1ef8bf1184468fd82 + pristine_git_object: 3af97c46d8f167c29bdd0b0110b379407d4da296 docs/models/openresponsesreasoningdeltaevent.md: id: e9cb41f3ca0a last_write_checksum: sha1:d3f49c63055fddcadc4910e494c787cea55ba2d9 @@ -816,8 +917,8 @@ trackedFiles: pristine_git_object: 58b79f7952dde1e36fc3696b72a2e7d5fff2d06b docs/models/openresponsesrequest.md: id: 31fbfb05398d - last_write_checksum: sha1:923e82298f26d9bc9214d3769047cab7d3104d73 - pristine_git_object: 15c300127dd9304088b3bca633c5d647ef026ee9 + last_write_checksum: sha1:5406f753b27db1d51d4869e0e291d9d6bfbb6f90 + pristine_git_object: 3134fc5600dd106c7deeb7dbd151500aaad87533 docs/models/openresponsesrequestignore.md: id: 1a7632be7d10 last_write_checksum: sha1:c0e3072d51d1b5f00239a5ae9c2c79bf46da8e19 @@ -838,6 +939,34 @@ trackedFiles: id: 5fadbc9bf2ea last_write_checksum: sha1:7a7fd54280c4363b0b1f980123db48c18972d791 pristine_git_object: e8180b1bb0aa84f2ae2304d6612927d07e5c6498 + docs/models/openresponsesrequestpluginautorouter.md: + id: 06093724cf28 + last_write_checksum: sha1:21792c84844bbf81c371f10ac3b9ceaed67ee88a + pristine_git_object: 9d4f2c148dbdab7f92d7b8229064a97dd94bede4 + docs/models/openresponsesrequestpluginfileparser.md: + id: f5035f920ff2 + last_write_checksum: sha1:47a5304db29ef1ca8267a5d74b5c20e080c9d7d2 + pristine_git_object: 759a754bf055119a46d2c0410c89d65b4ec44292 + docs/models/openresponsesrequestpluginmoderation.md: + id: e15390150618 + last_write_checksum: sha1:67605d3d91e13a351f0c10a61f5c82c2e142a79e + pristine_git_object: f0117c5f7eebae5490c41ce67afb57501bc94600 + docs/models/openresponsesrequestpluginresponsehealing.md: + id: ce3aa9cf38cf + last_write_checksum: sha1:2befb19739299e01b7c4cc5dd565d6bf6c0987ca + pristine_git_object: 915847518314c82da5c93e2076c3b6db4791324d + docs/models/openresponsesrequestpluginunion.md: + id: 43ff38d22485 + last_write_checksum: sha1:9e32216e48d28d62b2a3491e3112f166d4427ee3 + pristine_git_object: ebf50062aae9875480fd776600184800679e0898 + docs/models/openresponsesrequestpluginweb.md: + id: 38282c1e86f2 + last_write_checksum: sha1:d8ae45bfb7704c835e01394807827a22b9a09c40 + pristine_git_object: 333dc3c779b7a07246980f0edb554299a3273a1a + docs/models/openresponsesrequestprovider.md: + id: 73c771e9ab67 + last_write_checksum: sha1:662c3b7ba7e1aa1ab34ab6e29e437e540556808b + pristine_git_object: b66f084e406b087cee85b91974637947d64c79de docs/models/openresponsesrequestsort.md: id: 4a8a54014fb6 last_write_checksum: sha1:cf2bc7f9581a5adc8a6c14b0593c5753ea30e2d3 @@ -850,6 +979,10 @@ trackedFiles: id: d372a31e11b6 last_write_checksum: sha1:bec88093100fdc93195e221498f91d4bc82cce23 pristine_git_object: bcdfa06a6e4afbefcf808ccbc7d96b2424047f8d + docs/models/openresponsesrequesttrace.md: + id: 7195fc4fa929 + last_write_checksum: sha1:22a92eb871989a3390ccb7ac830d45a40dc28ad0 + pristine_git_object: 35fe5c2cd7844c640ca8e20dad9d110288730819 docs/models/openresponsesresponsetext.md: id: 2469186a7079 last_write_checksum: sha1:4c6070571056fcbc855dde49ba77ee8d011be6b7 @@ -1068,8 +1201,8 @@ trackedFiles: pristine_git_object: 9502d9d59e917351bec93c3c1c736ddb35d5bfc3 docs/models/operations/createauthkeyscoderequestbody.md: id: 41569d05ee8b - last_write_checksum: sha1:0600c727f5c22fdce81760b96956cfb835c5121a - pristine_git_object: dbb3cde4930120175d9a7d99bffb3272aa8a76ec + last_write_checksum: sha1:5d7996ddad24270c2c0e8f11498068d07be6f397 + pristine_git_object: 9e51f134d7964144989224d9ce193840877be666 docs/models/operations/createauthkeyscoderesponse.md: id: 94a1e2c0f03a last_write_checksum: sha1:8f4eaa9a5649fce1940c6d145138916b9076d5e3 @@ -1536,8 +1669,8 @@ trackedFiles: pristine_git_object: 6319d7beaa2d9709e126f07a3207533fcf24c1b8 docs/models/operations/providername.md: id: 616da1534f02 - last_write_checksum: sha1:a3f25ec3d8f6eb5543f367015ade7c805b8ee252 - pristine_git_object: 0bf0432a9d6cdb36ddc7c42e532809118f901a1e + last_write_checksum: sha1:b07571b39ad2771160d9c290e3d1f5b8069bd330 + pristine_git_object: b2bfaaf5c20d0d6ebbef2827a1d3d63e25dd1c52 docs/models/operations/providerresponse.md: id: 8af09d586b03 last_write_checksum: sha1:3969076345480f6e5738cc0299685a56331137f0 @@ -1552,12 +1685,16 @@ trackedFiles: pristine_git_object: 421b621958c2264cd41d7a13285cfbabaf7570ad docs/models/operations/sendchatcompletionrequestrequest.md: id: 20682772581d - last_write_checksum: sha1:1fb2260e8eb5c5798b7a622b0a9459cacdfd8068 - pristine_git_object: 095d1e16015da0cb5b0a5c98dd21ecf4b9eb9469 + last_write_checksum: sha1:f5c2b4798fee336c7931d2c0974996555969ddf6 + pristine_git_object: 2f37d1c203702d232527d45f4bb3c63c27630a7e docs/models/operations/sendchatcompletionrequestresponse.md: id: 3ca0009c143d - last_write_checksum: sha1:d980d9f239a33291c75cc23670685caee888f768 - pristine_git_object: 0f0bc60ac6266dee949e144880d2b394ab62d419 + last_write_checksum: sha1:61487861042d4397569eddb9e8e0acb36c740267 + pristine_git_object: 4a5ce2eff56b9fa36489af4015cf49f28b34cd7a + docs/models/operations/sendchatcompletionrequestresponsebody.md: + id: fb7605216c3b + last_write_checksum: sha1:f02e3992bb92521492cd139708fdc85b8a3f8342 + pristine_git_object: 2143a66a0575f24ee4580851b8f507c2e62db470 docs/models/operations/transferintent.md: id: 4585adc299cd last_write_checksum: sha1:20096e1501c818b6229900c48845a340e10301fc @@ -1618,6 +1755,10 @@ trackedFiles: id: 735448cf5655 last_write_checksum: sha1:4b96e81e4d478f4f489cc9b24728e5997641200f pristine_git_object: 9fd024851b0da0e68bd923c5ade17fffb9dc8410 + docs/models/operations/usagelimittype.md: + id: 22ad2742a599 + last_write_checksum: sha1:a335e4d7dd1a524b56249fb7e18c1bc3842036ff + pristine_git_object: d408cad7efc99651ffb7f2aee4d44ae1f13cab0c docs/models/operations/web3data.md: id: c7e71a30706c last_write_checksum: sha1:f20566610f7231c6e4b8616b55612cb645aec2b4 @@ -1684,8 +1825,8 @@ trackedFiles: pristine_git_object: bc698cccf3f4328954be675b16f50f46e9c71b1c docs/models/partition.md: id: c7e0d61fe8aa - last_write_checksum: sha1:e12a8eb2454d8ac2bffe10c1b0b091250dfeb146 - pristine_git_object: d95933ac46e3053b7daa46d24435bbfa708c4698 + last_write_checksum: sha1:0f7ceb9bc270534e259a3c3a152f81f9ba9d6c44 + pristine_git_object: f2b9277df563626f2d9f337c6dd6385b12fda069 docs/models/payloadtoolargeresponseerrordata.md: id: e61064d0e0cd last_write_checksum: sha1:ef0b16bc3bc6ab750c2ce627ce886b8633f8469a @@ -1694,14 +1835,6 @@ trackedFiles: id: fe17c49fda9f last_write_checksum: sha1:4e41cd560a0c30e3a63ccddafc1eec5358f26a5b pristine_git_object: 25760477441c4b246c48d1ac7fdbbe0cad64eb16 - docs/models/pdf.md: - id: f5096cc70ca2 - last_write_checksum: sha1:65ad16221809b3f0422777b3b02ad52ab877a5c3 - pristine_git_object: 678509133e60fc7e3896d587b9b68de0fcaae2af - docs/models/pdfengine.md: - id: 08d9b727d635 - last_write_checksum: sha1:b58665fed3ae860bad4a7638fe4755ba3f0373d6 - pristine_git_object: 5781ce26d6713b6393cf7e84741d7f9054a2ca7d docs/models/pdfparserengine.md: id: c7ee8484b003 last_write_checksum: sha1:c9b7d69d02bfd5fa3da43a1638aec46a2fec279e @@ -1726,30 +1859,6 @@ trackedFiles: id: 34d264d24d04 last_write_checksum: sha1:06a6fd664c3dfa29a160352bc4db448475e21b9c pristine_git_object: df2c4efd50c0e3e28237422a51d26d55019bf24b - docs/models/plugin.md: - id: 29c88e26ec0c - last_write_checksum: sha1:4473c4abd190da50782c1c1c52b56f740f6124e4 - pristine_git_object: 632bac239650906084286ff1066a2ac483a44ddb - docs/models/pluginautorouter.md: - id: 08654c1ae20a - last_write_checksum: sha1:c5e8fb7688146433442503a1d81404d47fdd00cc - pristine_git_object: fa36cf5460993ad0c7f2fde47765579fc30fd205 - docs/models/pluginfileparser.md: - id: 2983fd38a1bd - last_write_checksum: sha1:df5bacc25c339ed7cb6df75ec5bce59be37e2dd1 - pristine_git_object: 0fe4dc1be791b94d04d29d0564591a5580ff469c - docs/models/pluginmoderation.md: - id: ad7ca29a533d - last_write_checksum: sha1:b8658c5f8a417253a526fbdb2c518589c4b002b1 - pristine_git_object: 69a9dba0bdc6fe201281ae67b9657fdb7029f82a - docs/models/pluginresponsehealing.md: - id: f40032ab887b - last_write_checksum: sha1:97cd29c29a26fb5a042c63cbffc80eada766b648 - pristine_git_object: 440949a00b436f0747d1e89d9c868a03fd12d011 - docs/models/pluginweb.md: - id: b7b54ce39a0f - last_write_checksum: sha1:20fae126c08e0b46c2c192146f4948fadbf52914 - pristine_git_object: 9f1c1ed0efeda7e58e2a2f72be4e857515045964 docs/models/preferredmaxlatency.md: id: 6c99f91a353b last_write_checksum: sha1:e53440e449330e6feed91ca6a5646354c9f4b002 @@ -1762,30 +1871,26 @@ trackedFiles: id: 3f8579be3813 last_write_checksum: sha1:4523c496c667b90955f9e841ab9ccf3e3aa93c0a pristine_git_object: 7f9e3da97e12666c51ac69d9db1a8e1de46502f8 - docs/models/prompt.md: - id: faf3fe7a5a2e - last_write_checksum: sha1:8e5b767c29e36516dd637ba00ff2b5f055059861 - pristine_git_object: 9b0ca48ad12a98c366b4c7292f3f23a43e7c469f docs/models/prompttokensdetails.md: id: 1391892fce0a - last_write_checksum: sha1:1842c29f80f4a4d90e888316cf277943dd888dfa - pristine_git_object: 6b96206e454e8dc021b1d853c77a817094f8854c - docs/models/provider.md: - id: cedb2a98f8e3 - last_write_checksum: sha1:2325db3920584d1137c7ffc3bda16c6225c8f831 - pristine_git_object: 999451dd3bba7ed9c6ea46afd253b08f7e3f52e5 + last_write_checksum: sha1:3357d744d9a0c49c5a5b1eb839e95ebb687c5699 + pristine_git_object: ccc55e39bb505328a748b34c3978bf69e2e31fb5 docs/models/providername.md: id: 661ee6ccfa8a - last_write_checksum: sha1:d5d492a523f8f864ed0fb1f9c9392323fbfefb3b - pristine_git_object: 0483a53e988b0ddfef832dcdc69a99bb2d55aecf + last_write_checksum: sha1:152f83cae6c6392b06ca65f0e35025eb8f56532d + pristine_git_object: 0e34e26bcec148923dd00327722473a0f5394bd7 docs/models/provideroverloadedresponseerrordata.md: id: 23d66996874e last_write_checksum: sha1:93e27b1eabea198409d833c54370b889a138f6b0 pristine_git_object: da229099b7c8cdff965c40da5b86e910c47a29d2 docs/models/providerpreferences.md: id: f9bf4d106c4e - last_write_checksum: sha1:1fbbb5b8daa492ab61503eb623776c4ddb105689 - pristine_git_object: 029880ce92994516af5573a21b0194feeb90b038 + last_write_checksum: sha1:260d41833535a617691fdcada2469b8a7a557fe7 + pristine_git_object: 306c9db4c74770bac0d3ccb22c1ca343c1826829 + docs/models/providerpreferencesby.md: + id: c158490e5169 + last_write_checksum: sha1:48212c3bb5ba36e862691c2b33b5114b75316526 + pristine_git_object: 3fe7a639ad05aaf1003483e3c161b7a75defb1a8 docs/models/providerpreferencesignore.md: id: a4edb667e4e3 last_write_checksum: sha1:168239130d7819aaa9dc83613dc8a75564927122 @@ -1804,40 +1909,40 @@ trackedFiles: pristine_git_object: 0937f38f8fac992647d866acab87aece85f02472 docs/models/providerpreferencespartition.md: id: 8e25d1553b96 - last_write_checksum: sha1:acca7e3b058a11bdb62ef3262e3e5cef331045f8 - pristine_git_object: b47e3e072a07e807b1aa1eac7a9e71d88dec6d0b + last_write_checksum: sha1:5371e61ae3a86ee3063dba3908ae2de2aa85b2e3 + pristine_git_object: 20b636f3178aeeb6c2fedab5908d67b99f7a8897 docs/models/providerpreferencesprovidersort.md: id: 07b60010ff38 - last_write_checksum: sha1:08d3b2fbb5347ee736068e675b3cd793da0c1228 - pristine_git_object: cc1cd8a305afbf4bd619672f282c924544395198 + last_write_checksum: sha1:705b0e95024c0bf76f73a809bc8ff458422f0df5 + pristine_git_object: 9b947c3efe39792b3b46f683c6cffc41886da268 docs/models/providerpreferencesprovidersortconfig.md: id: 166996ff16db - last_write_checksum: sha1:ac6e04720f328c36681a2e6366448198d96557f2 - pristine_git_object: 8387bc4108ba17e2bbb95308d4edb031d34d3ef6 + last_write_checksum: sha1:080ad6b029f43acda782720deebc77e2a1a71322 + pristine_git_object: 5fe0b816124b2af1cf20e73210c65aaa4dd36f0b + docs/models/providerpreferencesprovidersortconfigenum.md: + id: 94d3da0f14bb + last_write_checksum: sha1:bb2d03c4af4fd34527528b47b70ef51db7c19d18 + pristine_git_object: 1fefe429109b5c52dc54d9ec3d2c1d2be8914f15 + docs/models/providerpreferencesprovidersortconfigunion.md: + id: 136affb233c5 + last_write_checksum: sha1:96df4339f16abe736c1a952c0b9a10e0f43fb921 + pristine_git_object: 1acd3e027a167965e58739bcf5bf9a9502acafe6 + docs/models/providerpreferencessortenum.md: + id: b6f1768bdd66 + last_write_checksum: sha1:4da6aa1714aca689c3a48b628fec81680c225e24 + pristine_git_object: 3feec34022c116e1afe1ff06a1ab744aad14e8be docs/models/providerpreferencessortunion.md: id: 92cd060f2cd4 - last_write_checksum: sha1:3a87c15e12ce65583115ca759a9ce79e61a0e1e7 - pristine_git_object: e75e3fd7e27f27cb3b1489d24b5c29ed9706e2c2 + last_write_checksum: sha1:3612d004faf30e803da3f3acd81a1d5c438c68a3 + pristine_git_object: 0532bfa028156557936144997048a511dcc9039e docs/models/providersort.md: id: dedd9969d635 - last_write_checksum: sha1:7da8a4b8a2314da1e810ba3b89fda31d27e21dcb - pristine_git_object: c4f9aa4a28251351ba7227cd1fa5da1f82d0280b + last_write_checksum: sha1:f8f0fd4a52d1b3c40168ffb4703cb3b6a5fd54e1 + pristine_git_object: 33c90282a34706f36bc89f14eda4563a59263db5 docs/models/providersortconfig.md: id: 4e7360c62f6f - last_write_checksum: sha1:f9c1ed9ada7483580a00a20105119a5638d748ed - pristine_git_object: 217c157d36aff7c42f5f6216cc7698e1170d4f5f - docs/models/providersortconfigenum.md: - id: 65e8867d54cb - last_write_checksum: sha1:15eff199df31065025d2d6f216c51bf4d3edb9f1 - pristine_git_object: 61ad06a2404b1836d486beefbee47f0c38098481 - docs/models/providersortconfigunion.md: - id: b3ef9cfac840 - last_write_checksum: sha1:c2226f09375d20ecb144fecd0d685000f48e8aac - pristine_git_object: bcd482db97c3ea19548007b3f6086e6ea536bee9 - docs/models/providersortunion.md: - id: c02b3985a8b5 - last_write_checksum: sha1:fec492a2cc11131a3810a6cf7284572cd661afe4 - pristine_git_object: 1a6cae19b495ff3fa4436053764bd031accad350 + last_write_checksum: sha1:1ec8d04ce90ab3a9558d3c73373998922f1ede59 + pristine_git_object: 4b6763492150066e02a4c24ae39d996dcd2679f0 docs/models/publicendpoint.md: id: 981d88d3c6db last_write_checksum: sha1:fe984708689687dc2cc975bb6e1182e47c58932c @@ -1860,8 +1965,36 @@ trackedFiles: pristine_git_object: 4f0c2c0d9ac52d5e12475bacecf3d72190603395 docs/models/reasoning.md: id: fb409fba3ea1 - last_write_checksum: sha1:3d5fe678a8559e77e1d8313a9c60fb209c5d70b5 - pristine_git_object: f62c63cc083e365ada25a2bea43b137364df3865 + last_write_checksum: sha1:0779de67c944ca3adace35f2381b0f18a6292d2e + pristine_git_object: f595b98f563af1ba5d58448796169d82da4bb0fe + docs/models/reasoningdetailencrypted.md: + id: 4b89489f8b93 + last_write_checksum: sha1:fb3157cf93459a9e52af9cd513fb251a5dcb183f + pristine_git_object: 42f226cbf68cad5341fa1999df7b61163239683e + docs/models/reasoningdetailencryptedformat.md: + id: 427fe9e4605c + last_write_checksum: sha1:67efb93da14c8d56ce2d16321287de67ba317605 + pristine_git_object: dee604b000a9b681564f0449e11c8da091d1092e + docs/models/reasoningdetailsummary.md: + id: 1aedc148a57f + last_write_checksum: sha1:336004273d0151d3885e6f80fab67e448b42a57b + pristine_git_object: 85c377e28e9d925f950fc861f05d3e3d3b47fe4c + docs/models/reasoningdetailsummaryformat.md: + id: df380badde4a + last_write_checksum: sha1:1474962ef77b44e0937b94000b19f351b9d44a55 + pristine_git_object: 74b3a21c2a6aa344f1cfec4dff02ce652963aead + docs/models/reasoningdetailtext.md: + id: 736fc4cb08d8 + last_write_checksum: sha1:521d0a0c0bdbbb2044fa42a0e2cbb5b603238755 + pristine_git_object: 0a428c24ffa94e2f97e7ed62fdc37198ca154adb + docs/models/reasoningdetailtextformat.md: + id: bfc492580515 + last_write_checksum: sha1:9dd0b2f6bfa769b28115e8e366ce868e5301abf9 + pristine_git_object: fc27098c820a0bf16b58e5b327260862cb2dbc8d + docs/models/reasoningdetailunion.md: + id: 7425c6e6d4ac + last_write_checksum: sha1:1dfcc9fa59cd35fc1c2c2876c255d0b78b9f9518 + pristine_git_object: 88328d825878c6109695e18e62819c722bd48036 docs/models/reasoningsummarytext.md: id: 18b35794819f last_write_checksum: sha1:002f812610352dbdbd727d53b3a8bca2426b1511 @@ -1872,8 +2005,8 @@ trackedFiles: pristine_git_object: 63ce1eb4b9607dfb736d4027049f1338205e0351 docs/models/reasoningsummaryverbosity.md: id: d925ace91d73 - last_write_checksum: sha1:e37af82bbccdf732cfeb4d820e21a8876ed88d77 - pristine_git_object: bcb3d98ecd8f7c0ac2a8233b1dacb91a9eac5711 + last_write_checksum: sha1:a29662ff0ea6f960f22bb08de165a449c452b182 + pristine_git_object: a7281603255b854dec06f351cba2f72f11b0e0ab docs/models/reasoningtextcontent.md: id: 720b21c43aad last_write_checksum: sha1:a5fb7d585418f1ae608305508e64d71ac8cbecee @@ -1888,32 +2021,32 @@ trackedFiles: pristine_git_object: 035e06a4d3553d4dd50a7f39bb788eae7e5ea48b docs/models/responseformat.md: id: 50a1e4140614 - last_write_checksum: sha1:aa503bb517e5a9019559ff84fcfc799fa91a0637 - pristine_git_object: ce8782c7b64295e7c592422c8bcde12e87894d04 + last_write_checksum: sha1:7d0a4f567ce641a065e6f9421ca151880453fc7f + pristine_git_object: 9e0c802702183bafa90362ee33fcf883c4ebb71c docs/models/responseformatjsonobject.md: id: 83559bf262cc - last_write_checksum: sha1:2525bcfbd258efb37268edf2e575d7ccbde478ed - pristine_git_object: bdc5e7fc6a74c9f4e8df0eb1bd73cefb258db52f + last_write_checksum: sha1:c7bed67f033659187659ca1091a79bf4a44589a1 + pristine_git_object: cfdb215fe9e425d5ea384538bbba877675e1a9bf docs/models/responseformatjsonschema.md: id: 3a7c34c63fa5 - last_write_checksum: sha1:fec048788701417a1563b1d829ecb6439842b242 - pristine_git_object: 176cbbe311016828995e38b4547c4d0474940b00 - docs/models/responseformatpython.md: - id: 3b543f48def4 - last_write_checksum: sha1:ddc5d1d05124564a2bd9b03bb84b9e0937725179 - pristine_git_object: b9dfc226d670a5d588fec89417c02d3e9975ca65 + last_write_checksum: sha1:ddaf8d697f572e87857007a042a38e49c720a3a1 + pristine_git_object: 30c38b525fcde4ef2de34a1403883fa72dd08124 docs/models/responseformattext.md: id: e80abf091260 - last_write_checksum: sha1:6ffb74bc6a12785ded33dd7c908fbd4297426baa - pristine_git_object: 31d567a0d1673e6a5bed98e7493026dcb4d1cc7e + last_write_checksum: sha1:3627497362612dc90054829c73a1879c8c54d007 + pristine_git_object: 974957dad3ea827b37d28313a20f17eb7e8edd88 docs/models/responseformattextconfig.md: id: 67d2a56b3b19 - last_write_checksum: sha1:dfd14ad714f58a8d13e33ce4f578bd806accfd07 - pristine_git_object: 4f72b4d30bcb3742c1b0845622f98f38d08948da + last_write_checksum: sha1:a3e54c8c3ea8b56667efef5238e6679dbd338ef5 + pristine_git_object: ffbadc4a31d939926db6a0628f310e08425db8ad docs/models/responseformattextgrammar.md: id: ceb6a8193168 - last_write_checksum: sha1:039f5eb3af9032c70f2c8183a445552241b75041 - pristine_git_object: b3176d02fd37b63f4f6ad6b3d06c13022f67b9ec + last_write_checksum: sha1:bc726c539c0cd25cca11e56623ce594da2cdb704 + pristine_git_object: e4fe16117ff16c53c91e7eb15dbd8effb5343cf7 + docs/models/responseformattextpython.md: + id: 1ae6af3f7e18 + last_write_checksum: sha1:9fec32005d67f48b78f717578338524756e7cc08 + pristine_git_object: 4bda61a2b89207e7292efeb4e43197c1c6771388 docs/models/responseinputaudio.md: id: 996212d960b6 last_write_checksum: sha1:b0cbad9582e6064cf100b38c84a0cffbcb29498f @@ -1960,12 +2093,8 @@ trackedFiles: pristine_git_object: 9e5fada59213aea3dacee5009176b80cfb637d03 docs/models/responseserrorfield.md: id: 03dd50404dff - last_write_checksum: sha1:a1ed09b0e9a1091293d8503e1e59b30cc5e9d416 - pristine_git_object: 2596ef9c70dffe376273c086c7d01596f7e01ebb - docs/models/responsesformatjsonobject.md: - id: 35e2bac2ceca - last_write_checksum: sha1:7a6d0f2a6dea81fd60b5d62d735c340303510d09 - pristine_git_object: 8cd57234ee744f91899caa066f1bae14a7a047c6 + last_write_checksum: sha1:84b4728077d3f577b2ef7f0c41d0f96cecb3bf10 + pristine_git_object: b29a01e17ddc4a8537b7fce887b467ed248709ca docs/models/responsesformattext.md: id: 1318d21e1e22 last_write_checksum: sha1:d37dd85788ce406e41df25d4edc40d648be1aece @@ -2110,90 +2239,6 @@ trackedFiles: id: 4e295cb39615 last_write_checksum: sha1:2505f82e0a95187be9b84f1d59ac35e7b1222075 pristine_git_object: 3f5a6cdccb1a9ff1f38a28db58d25f00fb02b28c - docs/models/route.md: - id: 3bb3b11c076d - last_write_checksum: sha1:aa81fc0043550c2d40aaaac6f140782d90927f78 - pristine_git_object: 5f434fffab42e23c118bf4f6841a3249d4408c1e - docs/models/schema0.md: - id: c1139eedbfe5 - last_write_checksum: sha1:e993163d7c6674ba140348ec206e71ebe84a40e3 - pristine_git_object: 8e622e20bb5c22b717ef6d3dbd36463431448901 - docs/models/schema10.md: - id: 693f29f69b66 - last_write_checksum: sha1:43f26e8e13bb25af312c894a869935480ad5012c - pristine_git_object: 5d5cc36712948de4c9de6c64bf7f5bb97b568074 - docs/models/schema14.md: - id: f94235323bee - last_write_checksum: sha1:66115a670371c791e172e4002ad867386955665f - pristine_git_object: f999374327a944c9df80a46a895e686a4599cc06 - docs/models/schema15.md: - id: 456f1fb5f9f2 - last_write_checksum: sha1:de8f3f2a8756c1d453c7089ec20057731dabfd15 - pristine_git_object: a9bf2ca06d7febc60df48cf51ee3e08e03cfe6c4 - docs/models/schema15union.md: - id: 93bbc11b55bc - last_write_checksum: sha1:feb494ede629f161991e6bd77d4b3e62ab56eafe - pristine_git_object: cb5077a8ac2e0dbfc3d47384dc7de5886c9e6135 - docs/models/schema17.md: - id: bfe40849bca4 - last_write_checksum: sha1:f1410f69e12bbf26f08df8c93e4db824179f5e59 - pristine_git_object: 5534020d4d25e9e645286e2f5b8912ed12b6a5eb - docs/models/schema17autorouter.md: - id: 2cc904c46712 - last_write_checksum: sha1:297070e455fc03f1ef1694204cc76563a9b16fee - pristine_git_object: b84df676094ca5526c5ec5b979665d84058bbbd1 - docs/models/schema17fileparser.md: - id: 5311f7deb34f - last_write_checksum: sha1:f81faa7ef29577feb437fa1e2a2cb520aee073cb - pristine_git_object: 1115ee3bfd5a02b438ea4f72c764f6eb9fd79c75 - docs/models/schema17moderation.md: - id: bd81786158fa - last_write_checksum: sha1:b1dc65c0f5df2e4c3d980bd3b2f9df52823c8235 - pristine_git_object: 0e5427bdce3ed6c6aa068e7e5ff7b58e0118bc35 - docs/models/schema17responsehealing.md: - id: 0ca4d926c135 - last_write_checksum: sha1:bbfefcc8aaefa21912dc604ec75ddcae1b8927fc - pristine_git_object: ac17a1f805ad13434971942c86d1a787c6c83635 - docs/models/schema17web.md: - id: db60038a4a11 - last_write_checksum: sha1:20a3217a72c41504a11b8de6f040e4104b432384 - pristine_git_object: 39f9a71b8276493ffea88de4ae61a47f27277d03 - docs/models/schema19.md: - id: a1918c602c97 - last_write_checksum: sha1:e840979f2ff09151616b46c9c2bbc8f0c419fbd9 - pristine_git_object: aa2d2931b1b3b60faa3d703d9ff0848f1c5f4f48 - docs/models/schema19reasoningencrypted.md: - id: bef8249cf600 - last_write_checksum: sha1:d2d3e8a3f12f36636de2c4c3f72c2637b6160756 - pristine_git_object: 2ef1668e819ebe96b736a5dad819f7785e919375 - docs/models/schema19reasoningsummary.md: - id: 2884b6e8d663 - last_write_checksum: sha1:d4638f1ea9b5c92744384fa55560a41fd0fe2e98 - pristine_git_object: 9595a9de68d56e7012f00d8bdf72c4217f96021b - docs/models/schema19reasoningtext.md: - id: 94d85a504b7a - last_write_checksum: sha1:980d6961d6438eaa091d8f82cb27b3dac5ed1c63 - pristine_git_object: d9fbcced669fc610456c275a0b0540cc7edf4d18 - docs/models/schema21.md: - id: 3ae2a63feaa4 - last_write_checksum: sha1:7aa4056a76f38ba3fe953bc38aa17adba8acba67 - pristine_git_object: 7cddb12216cee68755fc5122c49b91c5c39de830 - docs/models/schema3.md: - id: 082bedbd3961 - last_write_checksum: sha1:ccc46a9e8a10a7f5449e0032a922e0f688c676d8 - pristine_git_object: 891a40d0050039406355215207ab9713139c394e - docs/models/schema5.md: - id: 83e60f793ba8 - last_write_checksum: sha1:a20efd26efd22405882e8fc29225cafdcc81a2a3 - pristine_git_object: c8bd7fbcb6b0864537817fde15b672cf7240a3c1 - docs/models/schema5enum.md: - id: 8cc4e5e6f57f - last_write_checksum: sha1:46fec4f027c1c99af32ef4c2ecbf3c4d76d5e339 - pristine_git_object: cd07a0e9145207e04b30d442789cb9581a830958 - docs/models/schema8.md: - id: b7fc1c31b395 - last_write_checksum: sha1:65caddd51adeb138c4d2697f9b08dd4cfab57d4c - pristine_git_object: bbf20ad70d07f175526cc943d0f2b44235714659 docs/models/security.md: id: 452e4d4eb67a last_write_checksum: sha1:1978f84a4c782f5ed85eb03074a6fa5e19f103c7 @@ -2206,22 +2251,18 @@ trackedFiles: id: b50775a0d86d last_write_checksum: sha1:24fb8c872debc75aab710d0fb0ff6ef3619f7859 pristine_git_object: acbb103131a68a6cb0eb141ca860390174b3b7d0 - docs/models/sortenum.md: - id: 7b3e5f905bcb - last_write_checksum: sha1:0f8df7822af7ece19810b63feeca1852b0abb058 - pristine_git_object: ecee27db0b505879dd4f067cad530fe079068603 docs/models/stop.md: id: f231cc9f5041 - last_write_checksum: sha1:b21bc93eb473bffadb17ca1f1e4a859be980453e - pristine_git_object: 53db1523da509a136ace20523e92cee6df3edc8d + last_write_checksum: sha1:95c2ed4b5980bcf9f0bb22b368e78934a8ec8bef + pristine_git_object: 7e11196842bcbfed098627675ab33ebb7fd22c01 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:1d46885d3930d81968e826be68afaa93aa956357 - pristine_git_object: 5f7c34b4dfe962acd506f2e0f31c7c9dc31ac32a + last_write_checksum: sha1:18b4b56bd13ce3b0ca6a6fe7bfe13dd19ad17845 + pristine_git_object: 274288daefd3467d9731c910f7bacf482f8c1d3b docs/models/systemmessagecontent.md: id: 94a56febaeda - last_write_checksum: sha1:3cce46c2fa9b368fd3599fff127c7b794702e19e - pristine_git_object: 47e563a3d96d16390e8a751f97ccc6b55b771649 + last_write_checksum: sha1:4800f1b37092f6c580478262b22726510e18a22c + pristine_git_object: 31db158c8ed1bfe1e53f3f0d25c2b0961008ec92 docs/models/tokenizer.md: id: 8f0a31dac373 last_write_checksum: sha1:e56b70900d5dd25dd5b68962a3816509dd2dcca1 @@ -2230,22 +2271,42 @@ trackedFiles: id: 0eab420fb379 last_write_checksum: sha1:73bed1e4575796eb8db37bea4ddcfe242c2a6f31 pristine_git_object: 91c2d0a7ed73db9e7156634849a512da44564816 + docs/models/toolchoiceoption.md: + id: 3ec5b5751284 + last_write_checksum: sha1:bab3b7f7689954301e0aed26838c09597b32a651 + pristine_git_object: 865edb69f0a761a47c1b337c49e057bfea5e56ce + docs/models/toolchoiceoptionauto.md: + id: e7b70919fbd3 + last_write_checksum: sha1:6dc61e423b812b6eb41bcebd23a77b06caf4f0db + pristine_git_object: 29e8d3893565b37d80e9447df06d2c22e6eca38a + docs/models/toolchoiceoptionnone.md: + id: 3b46fc87ee9c + last_write_checksum: sha1:1c45cad3cabe3af4fe87471bcc99e50c3eb91fc7 + pristine_git_object: 171706d77a718ddc1d48a1a23c9259c161e13cd8 + docs/models/toolchoiceoptionrequired.md: + id: 042583fc101d + last_write_checksum: sha1:b1326a18295376ced44012e4085bdf88d386572d + pristine_git_object: e2db43c12b5deebc781e88c1b177c85d24f3749f docs/models/tooldefinitionjson.md: id: 30e8e6d8a7d2 - last_write_checksum: sha1:09b8133ac650550d8d9db33bb8a330bc434d9dd9 - pristine_git_object: d8993e62121c8ae0733faa7946b72569e4d5d974 + last_write_checksum: sha1:78fdf2cb0129c7f2ec1288ecc4be2a283ffef7a2 + pristine_git_object: ab5f6870e8ebc7b8dbdebbf8fcf3b951f2b8b40d docs/models/tooldefinitionjsonfunction.md: id: 75761f9c1950 - last_write_checksum: sha1:82a4f3e911d54c86f538b3f0f87c7ea70fcd5983 - pristine_git_object: 645a829d2fa85fbbfc4d5a14ac1ececd3f5e1ef4 + last_write_checksum: sha1:88063ac26c377baaa329a036d1f7ea597cbb3080 + pristine_git_object: 0da6b3ca53c2c0b33751f5433ab49c5d2ebac9fb + docs/models/tooldefinitionjsontype.md: + id: 4dc3dae1ea45 + last_write_checksum: sha1:365a1506e89492ed19d843e6b97c8dae1250ed69 + pristine_git_object: 5a02c200e082071af2a24137f4fcb4a953f4ca73 docs/models/toolresponsemessage.md: id: 74f6bc03e6c7 - last_write_checksum: sha1:6c576db57d84488ef7a23513181070e88ea658ad - pristine_git_object: 0d66edc9901614f45e3b9b18da77d446887a4b8b + last_write_checksum: sha1:a9f26c1ce2f7e312cc0ee99b9e634d87506298f8 + pristine_git_object: 50404c94776b628ba3c2b7ab1d038d86f0ee23b7 docs/models/toolresponsemessagecontent.md: id: d3ace04b6d28 - last_write_checksum: sha1:d0338a186863386458a7e7f4e7545055f7978232 - pristine_git_object: 7926f5c353caad3558cbe59aec41ac6c54f60310 + last_write_checksum: sha1:6d1cb311171c9ef7d62162d73f2a47eb92d35dc0 + pristine_git_object: 2d46ed5593578dc046134a5aad453005217eb363 docs/models/toomanyrequestsresponseerrordata.md: id: b4929af623aa last_write_checksum: sha1:6135140c8a32957c3007ab7a00bc13970d083cca @@ -2280,24 +2341,20 @@ trackedFiles: pristine_git_object: 284cac107e23a55fcf60f87c6ae3a23bd351365b docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:366766cff589479a6d5a88cecadef2c935d4a469 - pristine_git_object: 792d86b7ad16f3d1c2ac17d508fa9859853ab7c3 + last_write_checksum: sha1:5d6dc2fcb436c6c93e3871af79ada34f629adb2f + pristine_git_object: a9730290644f3a68b9eb637ae0784c976e4f6f43 docs/models/usermessagecontent.md: id: 52c072c851e8 - last_write_checksum: sha1:28f52afa08f4e78bcdf9da266dfd9d46c3a620b4 - pristine_git_object: 69eeecaf3c800d0a56145515ed4718a71c0f4aee + last_write_checksum: sha1:c90522b9a9519781fec6ad746725aee98d64fc4c + pristine_git_object: 70ea3af8a9d0cae66da7c537b2910487d597c583 docs/models/variables.md: id: 332583bb3e65 last_write_checksum: sha1:c2cd3fe1d6b00556a5032759a4b4862edeb51137 pristine_git_object: 263e9f1205792bb5a2486e84c99cda6b94ab4011 - docs/models/videourl1.md: - id: 116e215fe8cc - last_write_checksum: sha1:1346e407a8044d253791244b0a57da0a56babc04 - pristine_git_object: aa61751a00985fd83666d3bd152fefa2ed8a146b - docs/models/videourl2.md: - id: 0983e3c2ee19 - last_write_checksum: sha1:28d6b05f73465ee945d4c6a18ddfe009a3acf1b5 - pristine_git_object: 0ec1780e37920ea436d8fa714c500690c0622eb3 + docs/models/videoinput.md: + id: 7bc2ee336143 + last_write_checksum: sha1:5d6fb5e69ce162e73fc78e0de5096b8b8a76699e + pristine_git_object: 776af9320964cdf98ff10ed57bc3972069655cee docs/models/websearchengine.md: id: 2a21d29e41be last_write_checksum: sha1:880ff8f1183b1b88d85ae88c5e83ee21f1d3535b @@ -2324,8 +2381,8 @@ trackedFiles: pristine_git_object: a21f23847e5222b0d2b19cf12194e7ba09f381f8 docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:1610fcbc645fa0a091eb461baf018fcaa701e1af - pristine_git_object: fade8858fe3b101c8fb53727ffdba8f1d98d2e56 + last_write_checksum: sha1:3a7c3e3c56d82621d5c5df04902a91cfe9f31117 + pristine_git_object: e53280e2c55d52f1ebbac57f933302d74fbad9a8 docs/sdks/credits/README.md: id: 81608135c0ff last_write_checksum: sha1:7ebf2a794e61eba8ecbf33d2426b56e44d805913 @@ -2384,12 +2441,12 @@ trackedFiles: pristine_git_object: 410efafd6a7f50d91ccb87131fedbe0c3d47e15a jsr.json: id: 7f6ab7767282 - last_write_checksum: sha1:cb80f2c7d507bdca84d0fa1d3d6b891ce607ae47 - pristine_git_object: 2d1f7055f8a6294771f7ba50692e8497246e1d19 + last_write_checksum: sha1:61a6fdfc3bfd2b2518a73498fa94328d847381c9 + pristine_git_object: 168f07e6e4d67ce797d83ef05dab3250d4a9a5e3 package.json: id: 7030d0b2f71b - last_write_checksum: sha1:40346950444d9374e141c5d2e7d9ea32c1852856 - pristine_git_object: 56389bb7325171c710bf27068daa83190d57989d + last_write_checksum: sha1:d9e30289113196bbeee3052b0af8e5da87a8b31a + pristine_git_object: 82e748644dcc7d62329be506dd0f3eaa760eddeb src/core.ts: id: f431fdbcd144 last_write_checksum: sha1:5aa66b0b6a5964f3eea7f3098c2eb3c0ee9c0131 @@ -2428,8 +2485,8 @@ trackedFiles: pristine_git_object: ab899db5c6da144933af7b58fb2ac877050f047f src/funcs/chatSend.ts: id: 8c3aa3c963bf - last_write_checksum: sha1:7061ef91e085f7f72b81cddef733f7d25937a80a - pristine_git_object: 4333f5755810d33967130adfada42f33933e5e65 + last_write_checksum: sha1:f4ed539490a4d3bc976e72b885891718c3b28e73 + pristine_git_object: b72b43c76d14fc161a77a07e0e5bb9e75cd93a77 src/funcs/creditsCreateCoinbaseCharge.ts: id: e07ee6831da3 last_write_checksum: sha1:0426e98e9045393f635b5af07b969dd85658f230 @@ -2556,8 +2613,8 @@ trackedFiles: pristine_git_object: a187e58707bdb726ca2aff74941efe7493422d4e src/lib/config.ts: id: 320761608fb3 - last_write_checksum: sha1:23d7aec033b120bf2ec84920d89183a71e556659 - pristine_git_object: 4dcf5869566f3b7242a49487782bd8a196c9cdf9 + last_write_checksum: sha1:5621165bcef29b3c555e1b11f545a34d08724305 + pristine_git_object: a197ea8e68671882df72e7be77c0bdfe90f1d32f src/lib/dlv.ts: id: b1988214835a last_write_checksum: sha1:eaac763b22717206a6199104e0403ed17a4e2711 @@ -2612,8 +2669,8 @@ trackedFiles: pristine_git_object: 89074c11bcb352d27dbbe12e7dac3febe0f932fc src/lib/security.ts: id: 0502afa7922e - last_write_checksum: sha1:0662ea29ed0ba80e36ad54564d1c8ec5551ad304 - pristine_git_object: 93be85a25dc4e9bbc014e4db8bf6f1fbc2d4fa10 + last_write_checksum: sha1:969925531e400bbdbc2a119a706b36e78f8aa334 + pristine_git_object: ac10c99cdd0fd5b43b9f78d866ad23bb6037cc57 src/lib/url.ts: id: b0057e24ed76 last_write_checksum: sha1:d20760633113236809e01329fa51899bc286721a @@ -2624,8 +2681,12 @@ trackedFiles: pristine_git_object: c13ce72558d6d7e328189266bca17c00b433ab2a src/models/assistantmessage.ts: id: 2147a953e992 - last_write_checksum: sha1:1dbbc90af00e688f22ff31eddaf94eaa533b15d7 - pristine_git_object: 81ba1846a6453b24689cb4f561dd6eba20f413cd + last_write_checksum: sha1:5e4c907af2b18e38c74684a9f20fbbc8601769f0 + pristine_git_object: 40c885cecb091997eb183d18eb664fdb3d545748 + src/models/assistantmessageimages.ts: + id: ca83bcee5e88 + last_write_checksum: sha1:038cccb79c9cf80702845fce69cfda8ef2e93e8f + pristine_git_object: 296c506829f18047d26818efc93e914de73e68cc src/models/badgatewayresponseerrordata.ts: id: de317517c298 last_write_checksum: sha1:0c142f4aa9e808430a0caffe3c454146875e7c56 @@ -2638,82 +2699,82 @@ trackedFiles: id: 6b06a4d1562d last_write_checksum: sha1:b8825306d92f2cb42eaf572e25e47c91b6216cda pristine_git_object: 83a32911001440f84720f01833289f1de62171fd - src/models/chaterror.ts: - id: b107ec938dc1 - last_write_checksum: sha1:a5b9d65842b3b45bd2f57ffb551aa56a331e167b - pristine_git_object: 9f509fc94bcb4b1c24b5046b3cb6e2b7b637c118 src/models/chatgenerationparams.ts: id: f98a2a558f3f - last_write_checksum: sha1:d0eb1a7bcf8f8ea6da811f6d001c46017d8ad755 - pristine_git_object: 5de6d6588189a5c5da6264d0d2d3d2fbd174add7 + last_write_checksum: sha1:22b149f4a05c9a8a4a2ecb408086dec4e8ef61d3 + pristine_git_object: 01d4e66171559fb56421f0e9b365d36662ca96de src/models/chatgenerationtokenusage.ts: id: 1d57b0d238b8 - last_write_checksum: sha1:c07f2e7a811b665994f9ae1d42df31aa701388ff - pristine_git_object: a2c30cf8b4e4db296d146277961e5760fec2b6ad + last_write_checksum: sha1:68fbbb7b6843c87d8a3eceac28d9531d225fd6f1 + pristine_git_object: 4509571bf0f10b40153a483ad9eaf22f245c57e7 src/models/chatmessagecontentitem.ts: id: bb7f6b29b93f - last_write_checksum: sha1:97181a656928af6a5685c7f979bcba9a208e65f2 - pristine_git_object: f1548c67c0983a5bee7ba4e48a5210b981762450 + last_write_checksum: sha1:ce5c91ce0f8340fc0578c5d780e623546b542d4e + pristine_git_object: 096ae1632fd9bbedac92c721766f846e1a1fdd53 src/models/chatmessagecontentitemaudio.ts: id: 49b1cafcb338 - last_write_checksum: sha1:f14db38d815c5200cf67b430d4774df98f338e86 - pristine_git_object: afcdc4c15d5feca07853e6bf7859ad45192d080f + last_write_checksum: sha1:cbd4bd9b2d967cd172d9cd80e5debd8b8a627774 + pristine_git_object: a469476879545087c5653b72e07a50a732e94558 src/models/chatmessagecontentitemcachecontrol.ts: id: b5c18e04e19c - last_write_checksum: sha1:5b5b82f909ca6806bb474c6b2d4da5273c28bab5 - pristine_git_object: c368f9535a4557e2e2b00c847ebce3a97ff7b225 + last_write_checksum: sha1:5978d47f3c53bb2f169f56c53147c05742cfa7fc + pristine_git_object: e92d5063c9c4e1949eef7adf786fa238207ee097 src/models/chatmessagecontentitemimage.ts: id: 3989cdc41817 - last_write_checksum: sha1:5108f45e6d55502ef909557e10835f2ddb5029ec - pristine_git_object: f5d965e2a0253d9b5a7fb8d01991c235834894cc + last_write_checksum: sha1:4b19e02a5c49a393e843a5c87f627163cc20abb8 + pristine_git_object: e9f3cc51d4082728004ba1fcc69ccc8df59bb378 src/models/chatmessagecontentitemtext.ts: id: 27089860fd98 - last_write_checksum: sha1:e5bb74db5c23adf8c3dcc7cee12337dcaa67290b - pristine_git_object: aa527d53b91638159c3f6da7070dca6856f8a8d2 + last_write_checksum: sha1:3f5931a2bd5354717e63010050b27910421e89f5 + pristine_git_object: c5dd160c24410417cafa74730f2b1ac3a86a5f67 src/models/chatmessagecontentitemvideo.ts: id: 5a81301cb4a7 - last_write_checksum: sha1:033ca3a33e579359fb717f3fabd6ce666df21f99 - pristine_git_object: 9847a91701eb1b038dccd549a763f68e69bf8a4c + last_write_checksum: sha1:e1d7a852b06f292e0632efac2f2a53452122b014 + pristine_git_object: e3f56e047f4a20f38efa51662f3b3bdf91088199 + src/models/chatmessagecontentitemvideolegacy.ts: + id: 00ea865fc04f + last_write_checksum: sha1:de2a038d4ad1795d3f516a13d04eb55c5f782d43 + pristine_git_object: 59253ba54397258375e4264470157acbe5366f43 src/models/chatmessagetokenlogprob.ts: id: eed686a58eb1 - last_write_checksum: sha1:3060b72cc4b8eb330c6e5a740ce29d0a82666a8d - pristine_git_object: f1d1922a8c3977d88ca9db4dc5c45ddc47d80ad4 + last_write_checksum: sha1:435a5de455de1603a2dd1124797589dbd4068a91 + pristine_git_object: a07ea564a25ab8f8629ca6143afaa42ed229de69 src/models/chatmessagetokenlogprobs.ts: id: 2dbd9fc61ed8 - last_write_checksum: sha1:7ee84adbffe9c6abc3387568b19091201a14bab3 - pristine_git_object: d898f22a2064ece1393f0554c57c21d84571b1df + last_write_checksum: sha1:2fa0c0d84c843ea22c77a0d0c6ab817316927b54 + pristine_git_object: 96f62bae8536276840a740dd84a57dcd97d501dd src/models/chatmessagetoolcall.ts: id: b294b44052da - last_write_checksum: sha1:ebbd3dde77665a760f24c94518a51f60795cfc47 - pristine_git_object: c6678412deeed6ccd753c5a21b488ffb57ca1f1e + last_write_checksum: sha1:5f4ccc6c8ae6cd7fc8d60b5bd3976887d8c46b79 + pristine_git_object: baf5c348832b6668ced7e3e83e989c0e07ffa72d src/models/chatresponse.ts: id: 76c3703fa3d5 - last_write_checksum: sha1:539b61213bc60d718988e22076065d2ec606b913 - pristine_git_object: e1e9ede103eab06b70a8e4516485761e20e19640 + last_write_checksum: sha1:0d34426427d9bcdb3c27ebceb51983e2250ac800 + pristine_git_object: e53973ba6a1c1c5eed7d86cd26e4057db3c04273 src/models/chatresponsechoice.ts: id: 7db13c3cc866 - last_write_checksum: sha1:185c84859c36e961bbe960b6ef3840051fc5d78a - pristine_git_object: 1d46bed8b3eafabe7c03fec2af45959853c6826c + last_write_checksum: sha1:bef0f54b4d3e1dcd865839b326daf54b2e0ec15a + pristine_git_object: 67d1a4193f5754888870f27cb5b30ee0e0ca09e2 src/models/chatstreamingchoice.ts: id: 15346f0b1bc4 - last_write_checksum: sha1:253079ffed3d4e533d1ea98f7b752ebe1d0b7bbd - pristine_git_object: 43fa1d4cf1de1de8bd7fdd33c8a8260a9f6b3602 + last_write_checksum: sha1:2bc9da8ad0915a168e6233606d8d3be4c79a7b79 + pristine_git_object: 376c66635fa1530ea7b1d28aeafd0652c473bc55 src/models/chatstreamingmessagechunk.ts: id: 339ee7e4b920 - last_write_checksum: sha1:3b626c19b00b500680e7e4bd4cba5a7bc31b617d - pristine_git_object: fb1171fa67fe08b432131d9f2c13fc846d35f953 + last_write_checksum: sha1:6188c464f262bcd6bcba3ed275d0c2123c57090d + pristine_git_object: 343bf68cc4bc8242491cb6329b7e82cdee9be32d src/models/chatstreamingmessagetoolcall.ts: id: 74ba154581ba - last_write_checksum: sha1:bff3523e97b1d522d79810caaf849509d1d9d280 - pristine_git_object: fc2c3662370bbd065a0f28a9054063c61f28055c + last_write_checksum: sha1:42cb94988a5b990ad5ac35ff0e616f0045473e91 + pristine_git_object: bd4187bbd62cd7392584549cebbab42853b828fe src/models/chatstreamingresponsechunk.ts: id: 00bf2ea52439 - last_write_checksum: sha1:c0439824ebf5e916c7ca007b517b6467477de9af - pristine_git_object: b3e9a660ec8dbe9d562bc895bb64ff10f8bd84ff + last_write_checksum: sha1:81bd01e8cb7f01c5a7bed47f2f6d9be952433516 + pristine_git_object: 9ed8b45e0f4975096d733fc1c41b8be8c3289251 src/models/chatstreamoptions.ts: id: 83ba1c3db4bb - last_write_checksum: sha1:52843e897225ca36fe772600a59a39b500c13c27 - pristine_git_object: aee57eed1dd9b80028f71f082c5db3a2ebf6ee56 + last_write_checksum: sha1:140bb05ede4b812704e15368a9ace4ccf3fc0a82 + pristine_git_object: 41edb3f3d3b0b53241720d885cf78ece2613e3d5 src/models/createchargerequest.ts: id: c342dc72d1f4 last_write_checksum: sha1:c57248f3591179c079ee972d6e5fed7ae0cfa50f @@ -2722,14 +2783,18 @@ trackedFiles: id: fa417e9ad79a last_write_checksum: sha1:2245458d063c9185e8a0be3cc77fff435750853b pristine_git_object: e075ba95073bbb9e5e43b32d75dcc1c48a1cf9ce + src/models/debugoptions.ts: + id: 9045b3cff3b4 + last_write_checksum: sha1:4ab9b560597871feaec34fb4494959c7424b8bdd + pristine_git_object: 3047d39cb709eeebd3fe0b8c9341d48d532963d8 src/models/defaultparameters.ts: id: 8da6fac53cb5 last_write_checksum: sha1:f80ad4685bda90bbf12e6e56c20991e89839c05e pristine_git_object: 25444216c0d85142b45910c918e47e8cd5808e42 src/models/developermessage.ts: id: 358908e80669 - last_write_checksum: sha1:920432ef98b737bf523fa4f64c45fc69749fe81d - pristine_git_object: d7ccdd5d838edf26363029659218fce6ceb5d0b0 + last_write_checksum: sha1:95b5480fc0df7c12bef98197c02b891208b85019 + pristine_git_object: 2324f40e794a5b8b3ce470a262d5a761f5226523 src/models/edgenetworktimeoutresponseerrordata.ts: id: d86e84250105 last_write_checksum: sha1:97f1a32869159e48b331c2bcd5fcb220e6895dc8 @@ -2746,10 +2811,6 @@ trackedFiles: id: beddfd22a313 last_write_checksum: sha1:03d18a4e841ab897dc6c4a207c008def161679f8 pristine_git_object: 7d2b7ba7cc3294067cebc9cd82784971f6336183 - src/models/errors/chaterror.ts: - id: a3b18e48f494 - last_write_checksum: sha1:8502eef27ec1004acd59bd2e2cd8b6806ee8ac02 - pristine_git_object: 33cb7e5abe27c4ef70e9be5c8f86c1f398364912 src/models/errors/edgenetworktimeoutresponseerror.ts: id: da53f308c771 last_write_checksum: sha1:746eafb997c4d7ebcbd22327bd72f935a4fd3798 @@ -2764,8 +2825,8 @@ trackedFiles: pristine_git_object: 53bb3cfae157b89fda8e4a5b0f1dd709da82c1c9 src/models/errors/index.ts: id: c4e22507cb83 - last_write_checksum: sha1:7d4d84edaf25b9b9b70b3f56d00188ac30c75e2e - pristine_git_object: c5ddaff0616208d105296f039ef1131e8e9c3f76 + last_write_checksum: sha1:f6d0f2c568e5fde0916684dbedc78edef3efbdf5 + pristine_git_object: 173a02ae3e0d283f0df2c1550d3590dae918600c src/models/errors/internalserverresponseerror.ts: id: 02b76cec85f0 last_write_checksum: sha1:f1064e995c554473167d4b55284ad1e55283e4e4 @@ -2840,8 +2901,8 @@ trackedFiles: pristine_git_object: db61647cd182502b2b3b5333ee3a0bba66542645 src/models/index.ts: id: f93644b0f37e - last_write_checksum: sha1:8107e23b16666b9aa3f5d4393fa7f10cfd8852fe - pristine_git_object: 1330b6a98e31a963061e279bf8a69c6434a80efe + last_write_checksum: sha1:06020aee0915574b89b7201e735694dfefc695cc + pristine_git_object: 7f102ce5537968a491b418722fd1d67595fe7d7c src/models/inputmodality.ts: id: 771d5d4c91ec last_write_checksum: sha1:7f742bf5a1e6b7953ea96df4c4cef29c602accad @@ -2856,16 +2917,16 @@ trackedFiles: pristine_git_object: 1676c40a5b6dc5646acad69089007f2e43a1bd61 src/models/jsonschemaconfig.ts: id: a5dbdd0305ec - last_write_checksum: sha1:045b19a579875de278fd4ad5b9868c593368bfeb - pristine_git_object: 4ea4757a1a0a9e26f469bbaa98935a2ef72574e9 + last_write_checksum: sha1:0a19a48739adfe8ed30b0794cce5b81cb986c8a7 + pristine_git_object: 6ea4e14b575822bccc19fc518c2fa7dc0b8c5269 src/models/listendpointsresponse.ts: id: 5178e92a44ba last_write_checksum: sha1:f14d4f1bb0115f6de83613d6d946bd75ec47322a pristine_git_object: b6d6af839a15e92f869817c5cb03cab07a8461bd src/models/message.ts: id: 34645e53d993 - last_write_checksum: sha1:bda4a74ad2a2f5e77af6fe943519e93f4797aca5 - pristine_git_object: ab7c5ef589c3fd2fdc63136083a7d554cafa6375 + last_write_checksum: sha1:b16c4b16526e6910d2305ec3f175de15942d7b20 + pristine_git_object: 70fe60e3fa43a7e7f81027d6ccf301c1722d0a1c src/models/model.ts: id: b99d4c14e794 last_write_checksum: sha1:da45cdb16ea5dff001077711e43ad96482927b8c @@ -2880,16 +2941,16 @@ trackedFiles: pristine_git_object: 9c40d281d50644248ff88cc05e7f63514b519504 src/models/modelscountresponse.ts: id: cf41f6279453 - last_write_checksum: sha1:fd465602592fa0ddf0b22bdf05a97866c31e5503 - pristine_git_object: cb6c47ea1a24e1f6e63221ddf7ce3e46183b34b9 + last_write_checksum: sha1:128db8cacfb273fe25d15a99356a5fa40f2e8180 + pristine_git_object: 950a3f9216780e6a00895a6dcd743d411377fb52 src/models/modelslistresponse.ts: id: ea0e62a13d8d last_write_checksum: sha1:0beae3a59e783ff5e4a4b444b16e6227a5024f1a pristine_git_object: f3fae481c3a9a4c1461521e18e9693de9c291846 src/models/namedtoolchoice.ts: id: db68e36b6a77 - last_write_checksum: sha1:9e527a0fd2b2f0deb5a8eb82a5d4d1cd8cf5f10b - pristine_git_object: 1603f2c885891df5f3ed6c4e5e9ba57eac02e573 + last_write_checksum: sha1:a026cdf8bdd5bb3df4ae695da8e0f46caeea43d4 + pristine_git_object: a65a4e558a7ae8771861905aeb7e621cccea61f9 src/models/notfoundresponseerrordata.ts: id: 6c2943dd4f02 last_write_checksum: sha1:cd0ce3ea1fc6a309f88347c58a6d62cf9814f354 @@ -2916,8 +2977,8 @@ trackedFiles: pristine_git_object: faf39e855f59fde332184d397e47fbb32502eeb5 src/models/openairesponsesreasoningconfig.ts: id: 60a66fbc6068 - last_write_checksum: sha1:00ef6efa11d19d9c294b889025ee06ce4f890a27 - pristine_git_object: eeea2c05c4e1d2d9e9b80e8c335f0f64f40621d0 + last_write_checksum: sha1:cb6b34c5538ab4d58027162e42d354fc8789a3be + pristine_git_object: df560cf5660bca663456c3560545d0909f3abe66 src/models/openairesponsesreasoningeffort.ts: id: af69873eff63 last_write_checksum: sha1:293ef33861d72420cdef63e867b3dee972e69a1c @@ -2988,16 +3049,16 @@ trackedFiles: pristine_git_object: ea0b9a92edcb6803a0f5a691859f11ec5a62561c src/models/openresponsesnonstreamingresponse.ts: id: ffd6f7198dd7 - last_write_checksum: sha1:6045084704290bf2e9e2ee52a5f5fde31ae0bffa - pristine_git_object: b6c8c858d7b3ee85438351f22696a44cf6b96721 + last_write_checksum: sha1:9c5819febe88b78c289477099f1ca57178d12147 + pristine_git_object: 9843d593ae125d2ccd2afb4ded47a41e4fe83fa3 src/models/openresponsesreasoning.ts: id: 296442f2d24a last_write_checksum: sha1:208af0f7ff336615af9942b82932e1fa01a46e94 pristine_git_object: 416b878637b5cfe8d50cbc714aeb6b49acfe326b src/models/openresponsesreasoningconfig.ts: id: c2f9d074fbd4 - last_write_checksum: sha1:fa4a78f97745f137635f0cfc25d6ea083d620906 - pristine_git_object: 6c2046fad179a3bd3a0ea49ce879007250b63235 + last_write_checksum: sha1:a235518d6f3fc216069a559e169d7765f4ee25a1 + pristine_git_object: a6fe99f759ef53f24c8ac7e82738f72119b565c3 src/models/openresponsesreasoningdeltaevent.ts: id: 32b751d1d3d7 last_write_checksum: sha1:db1e33644b6b4d57c087be3e43e1c93fed5ded39 @@ -3020,8 +3081,8 @@ trackedFiles: pristine_git_object: 438e3268f5050c6e3e4899d019e9671f0ef9a556 src/models/openresponsesrequest.ts: id: ff8a31d46b1c - last_write_checksum: sha1:0d0ed188e0a8133eed1e5c5e0546ce21839c10dd - pristine_git_object: dae7dabb9dab203fe7ac5b0fef400fee5e4fe33a + last_write_checksum: sha1:e96bec9f97c70c448d58d02ff947960779628398 + pristine_git_object: 2c89e3fb65270a464994f304d9a7551c4d54e1e3 src/models/openresponsesresponsetext.ts: id: aded1ce23b04 last_write_checksum: sha1:5046f904e0b5db52fbae46d9bbed94d651b7b0ef @@ -3072,8 +3133,8 @@ trackedFiles: pristine_git_object: b1cee1916f9fca2cd03b30a6723df85d89f0f028 src/models/operations/createauthkeyscode.ts: id: 1443f6afbf40 - last_write_checksum: sha1:765f931d589b433b0ee597425212c62725fc1988 - pristine_git_object: d12a0b230da2428ee7ebb7c2c56e3da5513c0d36 + last_write_checksum: sha1:00a755f7d44b65af50fc56cf92a2bfca618f0737 + pristine_git_object: 8979e7afe267d5449a312b065ab9b5c268d7ea2c src/models/operations/createcoinbasecharge.ts: id: 1e75ac2debf2 last_write_checksum: sha1:9da654d157dfcad5897b2d2d606827abf626fdd0 @@ -3116,8 +3177,8 @@ trackedFiles: pristine_git_object: 2677e460511944b9c01e063c03956af071e7f933 src/models/operations/getgeneration.ts: id: 5cdb2959d2a5 - last_write_checksum: sha1:5340d088c5810bec1d8833cccd1c9a972def5864 - pristine_git_object: f32739a9593a193fdcd8231317f1b643410bbfc6 + last_write_checksum: sha1:2c071bcdff10facacd81b9b8090a3aa1aa02f54b + pristine_git_object: 98798dd9343d8a44677141ec0d6382a238e69f72 src/models/operations/getguardrail.ts: id: 11c366ebdade last_write_checksum: sha1:13348e3498ff474d192662d5e437d438d42877c2 @@ -3188,8 +3249,8 @@ trackedFiles: pristine_git_object: 92a8d661a49ef497b5fcbf7bfafa7592bdc3d644 src/models/operations/sendchatcompletionrequest.ts: id: 52a55a780d2d - last_write_checksum: sha1:1fa22228444a5a84257a0bb83f33190de27033ec - pristine_git_object: ada1e88578c0c468eb958ece29d1259db8ff6c98 + last_write_checksum: sha1:21816e5aec4a1af4237443333059a94877680a00 + pristine_git_object: 957b070c8a8bfeae36224fe10ee343cc6905fdc8 src/models/operations/updateguardrail.ts: id: f9780f5d66dd last_write_checksum: sha1:04dd4fbdaad754148b4915a5448aae87ff872569 @@ -3256,28 +3317,24 @@ trackedFiles: pristine_git_object: d2180d165594e1e5a5cd373ad9b0e03b13acce61 src/models/providername.ts: id: 89e536fb023a - last_write_checksum: sha1:2a476e8d383764d6455af2f5158d94cb56656679 - pristine_git_object: 41aa063bd1f0e46f86a8c74820e5f80426936a92 + last_write_checksum: sha1:631f393306798a6837f84ab517559e27c313930d + pristine_git_object: eca31616292f8b3884fb409d0249ba989aff788d src/models/provideroverloadedresponseerrordata.ts: id: 379f1256314f last_write_checksum: sha1:0458b6a8454adfa7c415e3bd7f49f2879cc6093e pristine_git_object: a42994698e17df9b87b470212766b7d9a5c0ddb8 src/models/providerpreferences.ts: id: 3a47295d7b91 - last_write_checksum: sha1:33fcbc4b1098c4ec696a3211ba389f1ae7817839 - pristine_git_object: fab25d321dc259aae7b01b6ffbfc0375da8cc83e + last_write_checksum: sha1:7c2d7001eef6045961f8fde2802030cb19e8f371 + pristine_git_object: 92c734108de97afc636791081c048d56712be1c4 src/models/providersort.ts: id: 7d1e919d1ec3 - last_write_checksum: sha1:79757871ce1ce3fea1a1e0457286dbf45a32b35e - pristine_git_object: 2d28a6168824150ab179230f6656f310c7615b5f + last_write_checksum: sha1:de57e6a17b835c7201fd023a739ec5eb54606716 + pristine_git_object: b0cd31cce4a9287106552a4a7f09d00004526fb7 src/models/providersortconfig.ts: id: 1491329aae16 - last_write_checksum: sha1:cc9f54498fbe219de77cc6744ea1817a1b9db1dd - pristine_git_object: 56f70b245b5a23bfcf20d25c43389ad53b680203 - src/models/providersortunion.ts: - id: 7a2d23baab80 - last_write_checksum: sha1:ec0fe732ada2fae9665df9a25b3522a728b33e94 - pristine_git_object: ab212a4c43bd6e099ce33c01c8152e54d47ce0bd + last_write_checksum: sha1:8055ea12b8902a8e10a7e1c38c43caafb3aa2813 + pristine_git_object: 473ed35ef4bf2b5ca589eb9af90c188b522a7c6e src/models/publicendpoint.ts: id: 396ce3186017 last_write_checksum: sha1:b209b95d49d745546712cd8b42a5c34f9945d6c2 @@ -3290,6 +3347,22 @@ trackedFiles: id: b28328f1822c last_write_checksum: sha1:4d10d34237fddb881f2c49c3e27342487e5fca47 pristine_git_object: 55082b7e886b01408398431244ebcf4ecef0f605 + src/models/reasoningdetailencrypted.ts: + id: 6172b093f9ae + last_write_checksum: sha1:0f219cd7ba17f128a79037deee0d277bc35337e7 + pristine_git_object: 8f8217ed91e379bc1740b6ee7704bb2ff200e8bb + src/models/reasoningdetailsummary.ts: + id: ecc7ceb880a1 + last_write_checksum: sha1:df1a40e39c171b2410303f817256c083267ba1af + pristine_git_object: 4370c07ced3247792c34c7d25c67e0d5f01400a7 + src/models/reasoningdetailtext.ts: + id: 0f126d8c4950 + last_write_checksum: sha1:48ba33c3de6b350714d8e070864562b5a89c5a9e + pristine_git_object: 4735b8824cf4f59bc1f4ffdf11f4d69d378142ef + src/models/reasoningdetailunion.ts: + id: 688738e07ecb + last_write_checksum: sha1:1bfd2cf95a930e0670fb3cc1ba1fa39226dcf467 + pristine_git_object: 08cb14ea2d94a964554164b751fa8fd248a552b8 src/models/reasoningsummarytext.ts: id: 9c06c18de6d6 last_write_checksum: sha1:a4cad2def2f1c17142fb524927190f0d3e571037 @@ -3306,18 +3379,30 @@ trackedFiles: id: a08eeff8786c last_write_checksum: sha1:5429dbc4ea062c63e6dc613d74c3ece63f218903 pristine_git_object: c9eac87a0c41f81c8ab40f4634f943a0a896ae59 + src/models/responseformatjsonobject.ts: + id: ae5419a9e494 + last_write_checksum: sha1:0b87af6d5f1244b92b2351bfc8ad1b0f2ec86e8e + pristine_git_object: 57c3bfa2342b728128436ca3af30f042b457c5c5 src/models/responseformatjsonschema.ts: id: ff0b42f802dc - last_write_checksum: sha1:a02e6171bd3cd6f4935c784145b41db9263275fe - pristine_git_object: cb2913fd93c6afe6de471e4a3da5c6583691638f + last_write_checksum: sha1:dcc4b25adf808e43ee4b9ac09964ce50347679b3 + pristine_git_object: 1caafd9f7ab0d47e58ee87d1a3e8d7ee5b427a5f + src/models/responseformattext.ts: + id: 872eaee007ec + last_write_checksum: sha1:25e5714bea5aacb8698b3d11e9a5903739adda89 + pristine_git_object: 671ca989e29968d185d5d765302f73425ff5009c src/models/responseformattextconfig.ts: id: 4408142ee314 - last_write_checksum: sha1:ca323a03f8d9bf2407fd1e084f6b30a05525df8e - pristine_git_object: 09480e22ce53625c7845c9ed783684ecdc21cf64 + last_write_checksum: sha1:12b745af97c4fc169ef6adab1a62d9d8fb3fc02d + pristine_git_object: 645f81b573a9322c60c097ff8cc8300bcb83b39f src/models/responseformattextgrammar.ts: id: 5bc0a32fad4c - last_write_checksum: sha1:411c19a592ad720bda0b1795bcd305a9f8109eef - pristine_git_object: 407bdc0120f425b9b121791fe8f174e4cf749ef3 + last_write_checksum: sha1:d505765f20fcc4b37cc11e0558c76cac3656cb1c + pristine_git_object: 6e16981f578142d3c445252ff00b9f84ebe41f57 + src/models/responseformattextpython.ts: + id: 1310afb1b7d1 + last_write_checksum: sha1:384c2d1026e39ad7c0cf1709ec38bc770e48b83c + pristine_git_object: a274cd1bcad05805568d85f47b8372f981a1e851 src/models/responseinputaudio.ts: id: 259dfb8718b4 last_write_checksum: sha1:46b603165b97e287d6787590c4793ac395843aa0 @@ -3344,12 +3429,8 @@ trackedFiles: pristine_git_object: 1d68d6e594449824c640bb546e697a2244b85727 src/models/responseserrorfield.ts: id: 830ea3d3b590 - last_write_checksum: sha1:776be0716ffd7975f6e36f67179d31c49013f2af - pristine_git_object: 9672f19f64566b65b24322f523f25b1629554e9f - src/models/responsesformatjsonobject.ts: - id: cd67052e598c - last_write_checksum: sha1:08c8bf0e6d7fef5be95181a910ee18677a52a817 - pristine_git_object: 8da5d102020c8276b116b2ed43f96e02b387fb51 + last_write_checksum: sha1:c297c3d3db7622fc157822291aeba52308394941 + pristine_git_object: 584f66cd23f2e112b14f8d0b23aea1cd4532c412 src/models/responsesformattext.ts: id: 30c4a4421b1d last_write_checksum: sha1:c8c68573d40c346e3baa49b258093d77434a8fca @@ -3402,26 +3483,6 @@ trackedFiles: id: 0f516033ff9c last_write_checksum: sha1:c0290bc80b219e511eae7abeb33514d08cd57d40 pristine_git_object: ad0047e3ee3baf0dea29abdfda2f3b7681c03519 - src/models/schema10.ts: - id: 29b219f570e7 - last_write_checksum: sha1:7760744a917b1aeea1f5446833db1d189fb92daf - pristine_git_object: 50f1a6bf15bbe34a23cce3e689ce333f5ff45503 - src/models/schema14.ts: - id: 0ad907e0bae0 - last_write_checksum: sha1:6e6e26a41844c12bdb7d73dc7fdf72166ed8cf32 - pristine_git_object: f75275f231e2e778372bfd84031388d219477e6d - src/models/schema17.ts: - id: 9c9a2fdb4582 - last_write_checksum: sha1:dc04f7ab06a108b87f10e0202f9f7776ee09b558 - pristine_git_object: d410ec3d6ae94f8cb4391f82343968b4e2e89030 - src/models/schema19.ts: - id: 6aa8d65adcb3 - last_write_checksum: sha1:7e99f2092ff0f3ede005d43d36e36daaafdadf94 - pristine_git_object: 15e01a96c9bb013343c768eb2cd2f2da3cecb9f4 - src/models/schema5.ts: - id: 6013b8ad4236 - last_write_checksum: sha1:df2f0193c20ae44d7e64e4679702dac1fc16df6b - pristine_git_object: 6b140f2a155a28ec1abf3ffff165e4519984fe4c src/models/security.ts: id: d90c6c784ca5 last_write_checksum: sha1:90d7a8903f46cd2ea5428527343a003511fc78c9 @@ -3432,20 +3493,24 @@ trackedFiles: pristine_git_object: f6242052cf481213a36c0389e3e3483f23d3bca5 src/models/systemmessage.ts: id: 2a179ca48627 - last_write_checksum: sha1:64861bd0196bf5eed94b1273658105140502476d - pristine_git_object: 68f3666fb3d96cb8a9076f86793dc25213d13906 + last_write_checksum: sha1:4c2991a97fd8bb1ec26e407e38eea26dc39aa4f9 + pristine_git_object: de89ccd3afe2b4974709f205944a342eb19b0289 src/models/toolcallstatus.ts: id: 54eb9c06bedb last_write_checksum: sha1:28d8c73463fd784616838c285a0cf5a6b83b7b4c pristine_git_object: 1585e3f90f3f774a0b955cb671e7745c5b95473b + src/models/toolchoiceoption.ts: + id: 4095df8e5f44 + last_write_checksum: sha1:cade0c854e180e096285220adf7c395ba68b0d7b + pristine_git_object: 521db1d52eaacae27ee6497ccd9b4541fe6a7f6b src/models/tooldefinitionjson.ts: id: 546c340f3013 - last_write_checksum: sha1:554556b413cd0b845ddcf77a614ea71375de2c24 - pristine_git_object: 8456579f999ff223573519e0595978297d10c8e6 + last_write_checksum: sha1:2552b1e3ce60c5b02b5cd31d085d8572f1222aee + pristine_git_object: 9393ec424c0743062607dce3a77ac2b6692e4606 src/models/toolresponsemessage.ts: id: 1122bbfb530b - last_write_checksum: sha1:81695fbc02de3945d966a1f7de981c58bee89773 - pristine_git_object: 1cadd6837ee9183ec6168c386d9dfeb886f52f8b + last_write_checksum: sha1:636d20f2da253a303d17abca8e064695dc10ed6b + pristine_git_object: 319df667e196e4b9c2f9e4a42f693782ba964fa5 src/models/toomanyrequestsresponseerrordata.ts: id: ff9492357bd9 last_write_checksum: sha1:0ec65f87b4b129d107d23eab65206c26f7f80435 @@ -3468,8 +3533,12 @@ trackedFiles: pristine_git_object: 4d03e2fd769dbe3f2c6bde3ac3f642a1343ebafd src/models/usermessage.ts: id: e34f73b285e1 - last_write_checksum: sha1:4c49b3a864caca0de3b3c6b5289d13820cbdedb6 - pristine_git_object: ec761e7f2d614cf5d588f7f372dd0452f5beb949 + last_write_checksum: sha1:cb5a4e8f86be37c18befc23a3ce782fa82c0099e + pristine_git_object: 8234718ab21607c1df5b0fdde54cd776adc5c730 + src/models/videoinput.ts: + id: d06ebb3e2672 + last_write_checksum: sha1:f93a76214aea223982767f18fe369c4bb5e5042b + pristine_git_object: 4489c8aebc5f654b5f389d61fa3d3e628e144de1 src/models/websearchengine.ts: id: 49b0152fd59a last_write_checksum: sha1:78c4e85f1e3e73741af8b3ed45ce7450d1cb802f @@ -3496,8 +3565,8 @@ trackedFiles: pristine_git_object: 0fe9a46a9e7b3bef6618cb60ac2eb7003531ccf8 src/sdk/chat.ts: id: c56babc22a20 - last_write_checksum: sha1:c7312ca438389d24b9511957ed19c1c78e6b8da7 - pristine_git_object: 9020f613bc836a25e350e6e25ee2c4098e3a9d7c + last_write_checksum: sha1:5047439c28d29c12740c76f53acf60f49e7d8488 + pristine_git_object: e330dfa1da2ba5f17a6d92559aa20ea1bedf1881 src/sdk/credits.ts: id: c72074b5f165 last_write_checksum: sha1:3b225301fa5aa4892bab943123f1c3a4d9db2adf @@ -3540,8 +3609,8 @@ trackedFiles: pristine_git_object: 12be54b6eb5df3755eb943fab7fe0e6f5c0a9204 src/sdk/sdk.ts: id: 784571af2f69 - last_write_checksum: sha1:5a2f09c703516cfcfb8f051c59bada07c9e447fc - pristine_git_object: 6769c2df5dc3400b4bb1c2c6b35ba709c1816d01 + last_write_checksum: sha1:f5f22ce1632d901cedbcff864d72ac738c58b962 + pristine_git_object: 1d5e118350c42ad92a48233f06050ba123bfeda1 src/types/async.ts: id: fac8da972f86 last_write_checksum: sha1:e2a7c53b428567587741f38e6da489f596de6227 @@ -4229,14 +4298,36 @@ examples: sendChatCompletionRequest: speakeasy-default-send-chat-completion-request: requestBody: - application/json: {"messages": [], "stream": false} + application/json: {"messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is the capital of France?"}], "stream": false, "temperature": 0.7, "top_p": 1} responses: "200": - application/json: {"id": "", "choices": [], "created": 2736.96, "model": "Impala", "object": "chat.completion"} + application/json: {"id": "chatcmpl-123", "choices": [{"finish_reason": "stop", "index": 0, "message": {"role": "assistant"}}], "created": 1677652288, "model": "openai/gpt-4", "object": "chat.completion"} "400": - application/json: {"error": {"code": "", "message": ""}} + application/json: {"error": {"code": 400, "message": "Invalid request parameters"}} "500": - application/json: {"error": {"code": "", "message": ""}} + application/json: {"error": {"code": 500, "message": "Internal Server Error"}} + "401": + application/json: {"error": {"code": 401, "message": "Missing Authentication header"}} + "402": + application/json: {"error": {"code": 402, "message": "Insufficient credits. Add more using https://openrouter.ai/credits"}} + "404": + application/json: {"error": {"code": 404, "message": "Resource not found"}} + "408": + application/json: {"error": {"code": 408, "message": "Operation timed out. Please try again later."}} + "413": + application/json: {"error": {"code": 413, "message": "Request payload too large"}} + "422": + application/json: {"error": {"code": 422, "message": "Invalid argument"}} + "429": + application/json: {"error": {"code": 429, "message": "Rate limit exceeded"}} + "502": + application/json: {"error": {"code": 502, "message": "Provider returned error"}} + "503": + application/json: {"error": {"code": 503, "message": "Service temporarily unavailable"}} + "524": + application/json: {"error": {"code": 524, "message": "Request timed out. Please try again later."}} + "529": + application/json: {"error": {"code": 529, "message": "Provider returned error"}} createApiAlphaResponses: speakeasy-default-create-api-alpha-responses: requestBody: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index f27b66b8..13936db1 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -35,7 +35,7 @@ generation: preApplyUnionDiscriminators: true versioningStrategy: automatic typescript: - version: 0.8.0 + version: 0.9.4 acceptHeaderEnum: false additionalDependencies: dependencies: diff --git a/.speakeasy/in.openapi.yaml b/.speakeasy/in.openapi.yaml index cc8e6171..c3c38fcd 100644 --- a/.speakeasy/in.openapi.yaml +++ b/.speakeasy/in.openapi.yaml @@ -10,6 +10,15 @@ info: license: name: MIT url: https://opensource.org/licenses/MIT +servers: + - url: https://openrouter.ai/api/v1 + description: Production server + x-speakeasy-server-id: production +security: + - apiKey: [] +externalDocs: + description: OpenRouter Documentation + url: https://openrouter.ai/docs components: schemas: OpenAIResponsesResponseStatus: @@ -1117,10 +1126,12 @@ components: - none ReasoningSummaryVerbosity: type: string + nullable: true enum: - auto - concise - detailed + example: auto OpenAIResponsesReasoningConfig: type: object nullable: true @@ -1156,18 +1167,6 @@ components: description: Plain text response format example: type: text - ResponsesFormatJSONObject: - type: object - properties: - type: - type: string - enum: - - json_object - required: - - type - description: JSON object response format - example: - type: json_object ResponsesFormatTextJSONSchemaConfig: type: object properties: @@ -1207,7 +1206,7 @@ components: ResponseFormatTextConfig: anyOf: - $ref: '#/components/schemas/ResponsesFormatText' - - $ref: '#/components/schemas/ResponsesFormatJSONObject' + - $ref: '#/components/schemas/ResponseFormatJSONObject' - $ref: '#/components/schemas/ResponsesFormatTextJSONSchemaConfig' description: Text response format configuration example: @@ -3574,6 +3573,7 @@ components: - Inceptron - InferenceNet - Infermatic + - Io Net - Inflection - Liquid - Mara @@ -3631,20 +3631,34 @@ components: - price - throughput - latency + description: The provider sorting strategy (price, throughput, latency) + example: price ProviderSortConfig: type: object properties: by: - anyOf: - - $ref: '#/components/schemas/ProviderSort' - - type: 'null' + type: string + nullable: true + enum: + - price + - throughput + - latency + description: The provider sorting strategy (price, throughput, latency) + example: price partition: - anyOf: - - type: string - enum: - - model - - none - - type: 'null' + type: string + nullable: true + enum: + - model + - none + description: >- + Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback + models remain fallbacks), "none" sorts all endpoints together regardless of model. + example: model + description: The provider sorting strategy (price, throughput, latency) + example: + by: price + partition: model BigNumberUnion: type: string description: Price per million prompt tokens @@ -4094,6 +4108,25 @@ components: A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. + trace: + type: object + properties: + trace_id: + type: string + trace_name: + type: string + span_name: + type: string + generation_name: + type: string + parent_span_id: + type: string + additionalProperties: + nullable: true + description: >- + Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, + parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured + broadcast destinations. description: Request schema for Responses endpoint example: model: anthropic/claude-4.5-sonnet-20250929 @@ -7191,6 +7224,25 @@ components: A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. + trace: + type: object + properties: + trace_id: + type: string + trace_name: + type: string + span_name: + type: string + generation_name: + type: string + parent_span_id: + type: string + additionalProperties: + nullable: true + description: >- + Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, + parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured + broadcast destinations. models: type: array items: @@ -7314,2164 +7366,2497 @@ components: error: code: 403 message: Only management keys can perform this operation - CreateChargeRequest: + ChatCompletionFinishReason: + type: string + enum: + - tool_calls + - stop + - length + - content_filter + - error + example: stop + ChatMessageContentItemCacheControl: type: object properties: - amount: - type: number - sender: + type: type: string - chain_id: - type: integer enum: - - 1 - - 137 - - 8453 + - ephemeral + ttl: + type: string + enum: + - 5m + - 1h required: - - amount - - sender - - chain_id - description: Create a Coinbase charge for crypto payment + - type + description: Cache control for the content part example: - amount: 100 - sender: '0x1234567890123456789012345678901234567890' - chain_id: 1 - ProviderPreferences: + type: ephemeral + ttl: 5m + ChatMessageContentItemText: type: object properties: - allow_fallbacks: - type: boolean - nullable: true - description: > - Whether to allow backup providers to serve requests - - - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the - next best provider. - - - false: use only the primary/custom provider, and return the upstream error if it's unavailable. - require_parameters: - type: boolean - nullable: true - description: >- - Whether to filter providers to only those that support the parameters you've provided. If this setting is - omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. - data_collection: - $ref: '#/components/schemas/DataCollection' - zdr: - type: boolean - nullable: true - description: >- - Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do - not retain prompts will be used. - example: true - enforce_distillable_text: - type: boolean - nullable: true - description: >- - Whether to restrict routing to only models that allow text distillation. When true, only models where the - author has allowed distillation will be used. - example: true - order: - type: array - nullable: true - items: - anyOf: - - $ref: '#/components/schemas/ProviderName' - - type: string - description: >- - An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this - list that supports your requested model, and fall back to the next if it is unavailable. If no providers are - available, the request will fail with an error message. - only: - type: array - nullable: true - items: - anyOf: - - $ref: '#/components/schemas/ProviderName' - - type: string - description: >- - List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider - settings for this request. - ignore: - type: array - nullable: true - items: - anyOf: - - $ref: '#/components/schemas/ProviderName' - - type: string - description: >- - List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider - settings for this request. - quantizations: - type: array - nullable: true - items: - $ref: '#/components/schemas/Quantization' - description: A list of quantization levels to filter the provider by. - sort: - allOf: - - $ref: '#/components/schemas/ProviderSort' - - anyOf: - - $ref: '#/components/schemas/ProviderSort' - - $ref: '#/components/schemas/ProviderSortConfig' - - nullable: true - description: >- - The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing - is performed. - max_price: + type: + type: string + enum: + - text + text: + type: string + cache_control: + $ref: '#/components/schemas/ChatMessageContentItemCacheControl' + required: + - type + - text + description: Text content part + example: + type: text + text: Hello, world! + ChatMessageContentItemImage: + type: object + properties: + type: + type: string + enum: + - image_url + image_url: type: object properties: - prompt: - $ref: '#/components/schemas/BigNumberUnion' - completion: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per million completion tokens - image: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per image - audio: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per audio unit - request: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per request - description: >- - The object specifying the maximum price you want to pay for this request. USD price per million tokens, for - prompt and completion. - preferred_min_throughput: - $ref: '#/components/schemas/PreferredMinThroughput' - preferred_max_latency: - $ref: '#/components/schemas/PreferredMaxLatency' - description: Provider routing preferences for the request. - PublicPricing: + url: + type: string + description: 'URL of the image (data: URLs supported)' + detail: + type: string + enum: + - auto + - low + - high + description: Image detail level for vision models + required: + - url + required: + - type + - image_url + description: Image content part for vision models + example: + type: image_url + image_url: + url: https://example.com/image.jpg + detail: auto + ChatMessageContentItemAudio: type: object properties: - prompt: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - completion: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - request: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_token: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_audio_cache: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - web_search: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - internal_reasoning: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_read: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_write: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - discount: - type: number + type: + type: string + enum: + - input_audio + input_audio: + type: object + properties: + data: + type: string + description: Base64 encoded audio data + format: + type: string + description: >- + Audio format (e.g., wav, mp3, flac, m4a, ogg, aiff, aac, pcm16, pcm24). Supported formats vary by + provider. + required: + - data + - format required: - - prompt - - completion - additionalProperties: false - description: Pricing information for the model + - type + - input_audio + description: Audio input content part. Supported audio formats vary by provider. example: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - ModelGroup: - type: string - enum: - - Router - - Media - - Other - - GPT - - Claude - - Gemini - - Grok - - Cohere - - Nova - - Qwen - - Yi - - DeepSeek - - Mistral - - Llama2 - - Llama3 - - Llama4 - - PaLM - - RWKV - - Qwen3 - example: GPT - description: Tokenizer type used by the model - InputModality: - type: string - enum: - - text - - image - - file - - audio - - video - example: text - OutputModality: - type: string - enum: - - text - - image - - embeddings - - audio - example: text - ModelArchitecture: + type: input_audio + input_audio: + data: SGVsbG8gV29ybGQ= + format: wav + VideoInput: type: object properties: - tokenizer: - $ref: '#/components/schemas/ModelGroup' - instruct_type: + url: type: string - nullable: true - enum: - - none - - airoboros - - alpaca - - alpaca-modif - - chatml - - claude - - code-llama - - gemma - - llama2 - - llama3 - - mistral - - nemotron - - neural - - openchat - - phi3 - - rwkv - - vicuna - - zephyr - - deepseek-r1 - - deepseek-v3.1 - - qwq - - qwen3 - example: chatml - description: Instruction format type - modality: + description: 'URL of the video (data: URLs supported)' + required: + - url + description: Video input object + ChatMessageContentItemVideoLegacy: + type: object + properties: + type: type: string - nullable: true - description: Primary modality of the model - example: text->text - input_modalities: - type: array - items: - $ref: '#/components/schemas/InputModality' - description: Supported input modalities - output_modalities: - type: array - items: - $ref: '#/components/schemas/OutputModality' - description: Supported output modalities + enum: + - input_video + video_url: + $ref: '#/components/schemas/VideoInput' required: - - modality - - input_modalities - - output_modalities - description: Model architecture information + - type + - video_url + description: Video input content part (legacy format - deprecated) + deprecated: true example: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - TopProviderInfo: + type: input_video + video_url: + url: https://example.com/video.mp4 + ChatMessageContentItemVideo: type: object properties: - context_length: - type: number - nullable: true - description: Context length from the top provider - example: 8192 - max_completion_tokens: - type: number - nullable: true - description: Maximum completion tokens from the top provider - example: 4096 - is_moderated: - type: boolean - description: Whether the top provider moderates content - example: true + type: + type: string + enum: + - video_url + video_url: + $ref: '#/components/schemas/VideoInput' required: - - is_moderated - description: Information about the top provider for this model + - type + - video_url + description: Video input content part example: - context_length: 8192 - max_completion_tokens: 4096 - is_moderated: true - PerRequestLimits: + type: video_url + video_url: + url: https://example.com/video.mp4 + ChatMessageContentItem: + oneOf: + - $ref: '#/components/schemas/ChatMessageContentItemText' + - $ref: '#/components/schemas/ChatMessageContentItemImage' + - $ref: '#/components/schemas/ChatMessageContentItemAudio' + - oneOf: + - $ref: '#/components/schemas/ChatMessageContentItemVideoLegacy' + - $ref: '#/components/schemas/ChatMessageContentItemVideo' + discriminator: + propertyName: type + mapping: + input_video: '#/components/schemas/ChatMessageContentItemVideoLegacy' + video_url: '#/components/schemas/ChatMessageContentItemVideo' + discriminator: + propertyName: type + description: Content part for chat completion messages + example: + type: text + text: Hello, world! + ChatMessageToolCall: type: object - nullable: true properties: - prompt_tokens: - type: number - example: 1000 - description: Maximum prompt tokens per request - completion_tokens: - type: number - example: 1000 - description: Maximum completion tokens per request + id: + type: string + description: Tool call identifier + type: + type: string + enum: + - function + function: + type: object + properties: + name: + type: string + description: Function name to call + arguments: + type: string + description: Function arguments as JSON string + required: + - name + - arguments required: - - prompt_tokens - - completion_tokens - description: Per-request token limits + - id + - type + - function + description: Tool call made by the assistant example: - prompt_tokens: 1000 - completion_tokens: 1000 - Parameter: - type: string - enum: - - temperature - - top_p - - top_k - - min_p - - top_a - - frequency_penalty - - presence_penalty - - repetition_penalty - - max_tokens - - logit_bias - - logprobs - - top_logprobs - - seed - - response_format - - structured_outputs - - stop - - tools - - tool_choice - - parallel_tool_calls - - include_reasoning - - reasoning - - reasoning_effort - - web_search_options - - verbosity - example: temperature - DefaultParameters: + id: call_abc123 + type: function + function: + name: get_current_weather + arguments: '{"location": "Boston, MA"}' + ReasoningDetailSummary: type: object - nullable: true properties: - temperature: - type: number + type: + type: string + enum: + - reasoning.summary + summary: + type: string + id: + type: string nullable: true - minimum: 0 - maximum: 2 - top_p: - type: number + format: + type: string nullable: true - minimum: 0 - maximum: 1 - frequency_penalty: + enum: + - unknown + - openai-responses-v1 + - azure-openai-responses-v1 + - xai-responses-v1 + - anthropic-claude-v1 + - google-gemini-v1 + index: type: number - nullable: true - minimum: -2 - maximum: 2 - additionalProperties: false - description: Default parameters for this model + required: + - type + - summary + description: Reasoning detail summary schema example: - temperature: 0.7 - top_p: 0.9 - frequency_penalty: 0 - Model: + type: reasoning.summary + summary: The model analyzed the problem by first identifying key constraints, then evaluating possible solutions... + ReasoningDetailEncrypted: type: object properties: - id: + type: type: string - description: Unique identifier for the model - example: openai/gpt-4 - canonical_slug: + enum: + - reasoning.encrypted + data: type: string - description: Canonical slug for the model - example: openai/gpt-4 - hugging_face_id: + id: type: string nullable: true - description: Hugging Face model identifier, if applicable - example: microsoft/DialoGPT-medium - name: + format: type: string - description: Display name of the model - example: GPT-4 - created: + nullable: true + enum: + - unknown + - openai-responses-v1 + - azure-openai-responses-v1 + - xai-responses-v1 + - anthropic-claude-v1 + - google-gemini-v1 + index: type: number - description: Unix timestamp of when the model was created - example: 1692901234 - description: + required: + - type + - data + description: Reasoning detail encrypted schema + example: + type: reasoning.encrypted + data: encrypted data + ReasoningDetailText: + type: object + properties: + type: + type: string + enum: + - reasoning.text + text: type: string - description: Description of the model - example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - pricing: - $ref: '#/components/schemas/PublicPricing' - context_length: - type: number nullable: true - description: Maximum context length in tokens - example: 8192 - architecture: - $ref: '#/components/schemas/ModelArchitecture' - top_provider: - $ref: '#/components/schemas/TopProviderInfo' - per_request_limits: - $ref: '#/components/schemas/PerRequestLimits' - supported_parameters: - type: array - items: - $ref: '#/components/schemas/Parameter' - description: List of supported parameters for this model - default_parameters: - $ref: '#/components/schemas/DefaultParameters' - expiration_date: + signature: type: string nullable: true - description: The date after which the model may be removed. ISO 8601 date string (YYYY-MM-DD) or null if no expiration. - example: '2025-06-01' + id: + type: string + nullable: true + format: + type: string + nullable: true + enum: + - unknown + - openai-responses-v1 + - azure-openai-responses-v1 + - xai-responses-v1 + - anthropic-claude-v1 + - google-gemini-v1 + index: + type: number required: - - id - - canonical_slug - - name - - created - - pricing - - context_length - - architecture - - top_provider - - per_request_limits - - supported_parameters - - default_parameters - description: Information about an AI model available on OpenRouter + - type + description: Reasoning detail text schema example: - id: openai/gpt-4 - canonical_slug: openai/gpt-4 - name: GPT-4 - created: 1692901234 - description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - context_length: 8192 - architecture: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - top_provider: - context_length: 8192 - max_completion_tokens: 4096 - is_moderated: true - per_request_limits: null - supported_parameters: - - temperature - - top_p - - max_tokens - default_parameters: null - expiration_date: null - ModelsListResponseData: + type: reasoning.text + text: The model analyzed the problem by first identifying key constraints, then evaluating possible solutions... + signature: signature + ReasoningDetailUnion: + oneOf: + - $ref: '#/components/schemas/ReasoningDetailSummary' + - $ref: '#/components/schemas/ReasoningDetailEncrypted' + - $ref: '#/components/schemas/ReasoningDetailText' + discriminator: + propertyName: type + mapping: + reasoning.summary: '#/components/schemas/ReasoningDetailSummary' + reasoning.encrypted: '#/components/schemas/ReasoningDetailEncrypted' + reasoning.text: '#/components/schemas/ReasoningDetailText' + description: Reasoning detail union schema + example: + type: reasoning.summary + summary: The model analyzed the problem by first identifying key constraints, then evaluating possible solutions... + AssistantMessageReasoningDetails: type: array items: - $ref: '#/components/schemas/Model' - description: List of available models - ModelsListResponse: + $ref: '#/components/schemas/ReasoningDetailUnion' + description: Reasoning details for extended thinking models + AssistantMessageImages: + type: array + items: + type: object + properties: + image_url: + type: object + properties: + url: + type: string + description: URL or base64-encoded data of the generated image + required: + - url + required: + - image_url + description: Generated images from image generation models + example: + - image_url: + url: data:image/png;base64,iVBORw0KGgo... + AssistantMessage: type: object properties: - data: - $ref: '#/components/schemas/ModelsListResponseData' + role: + type: string + enum: + - assistant + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItem' + - nullable: true + description: Assistant message content + name: + type: string + description: Optional name for the assistant + tool_calls: + type: array + items: + $ref: '#/components/schemas/ChatMessageToolCall' + description: Tool calls made by the assistant + refusal: + type: string + nullable: true + description: Refusal message if content was refused + reasoning: + type: string + nullable: true + description: Reasoning output + reasoning_details: + $ref: '#/components/schemas/AssistantMessageReasoningDetails' + images: + $ref: '#/components/schemas/AssistantMessageImages' required: - - data - description: List of available models + - role + description: Assistant message for requests and responses example: - data: - - id: openai/gpt-4 - canonical_slug: openai/gpt-4 - name: GPT-4 - created: 1692901234 - description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - context_length: 8192 - architecture: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - top_provider: - context_length: 8192 - max_completion_tokens: 4096 - is_moderated: true - per_request_limits: null - supported_parameters: - - temperature - - top_p - - max_tokens - - frequency_penalty - - presence_penalty - default_parameters: null - expiration_date: null - ModelsCountResponse: + role: assistant + content: The capital of France is Paris. + ChatMessageTokenLogprob: type: object properties: - data: - type: object - properties: - count: - type: number - description: Total number of available models - example: 150 - required: - - count - description: Model count data - example: - count: 150 + token: + type: string + description: The token + logprob: + type: number + description: Log probability of the token + bytes: + type: array + nullable: true + items: + type: number + description: UTF-8 bytes of the token + top_logprobs: + type: array + items: + type: object + properties: + token: + type: string + logprob: + type: number + bytes: + type: array + nullable: true + items: + type: number + required: + - token + - logprob + - bytes + description: Top alternative tokens with probabilities required: - - data - description: Model count data + - token + - logprob + - bytes + - top_logprobs + description: Token log probability information example: - data: - count: 150 - InstructType: - type: string - nullable: true - enum: - - none - - airoboros - - alpaca - - alpaca-modif - - chatml - - claude - - code-llama - - gemma - - llama2 - - llama3 - - mistral - - nemotron - - neural - - openchat - - phi3 - - rwkv - - vicuna - - zephyr - - deepseek-r1 - - deepseek-v3.1 - - qwq - - qwen3 - description: Instruction format type - EndpointStatus: - type: integer - enum: - - 0 - - -1 - - -2 - - -3 - - -5 - - -10 - example: 0 - PercentileStats: + token: ' Hello' + logprob: -0.612345 + bytes: null + top_logprobs: + - token: ' Hello' + logprob: -0.612345 + bytes: null + ChatMessageTokenLogprobs: type: object nullable: true properties: - p50: + content: + type: array + nullable: true + items: + $ref: '#/components/schemas/ChatMessageTokenLogprob' + description: Log probabilities for content tokens + refusal: + type: array + nullable: true + items: + $ref: '#/components/schemas/ChatMessageTokenLogprob' + description: Log probabilities for refusal tokens + required: + - content + - refusal + description: Log probabilities for the completion + example: + content: + - token: ' Hello' + logprob: -0.612345 + bytes: null + top_logprobs: [] + refusal: null + ChatResponseChoice: + type: object + properties: + finish_reason: + anyOf: + - $ref: '#/components/schemas/ChatCompletionFinishReason' + - nullable: true + - nullable: true + index: type: number - description: Median (50th percentile) - example: 25.5 - p75: + description: Choice index + example: 0 + message: + $ref: '#/components/schemas/AssistantMessage' + logprobs: + $ref: '#/components/schemas/ChatMessageTokenLogprobs' + required: + - finish_reason + - index + - message + description: Chat completion choice + example: + finish_reason: stop + index: 0 + message: + role: assistant + content: The capital of France is Paris. + logprobs: null + ChatGenerationTokenUsage: + type: object + properties: + completion_tokens: type: number - description: 75th percentile - example: 35.2 - p90: + description: Number of tokens in the completion + prompt_tokens: type: number - description: 90th percentile - example: 48.7 - p99: + description: Number of tokens in the prompt + total_tokens: type: number - description: 99th percentile - example: 85.3 + description: Total number of tokens + completion_tokens_details: + type: object + nullable: true + properties: + reasoning_tokens: + type: number + nullable: true + description: Tokens used for reasoning + audio_tokens: + type: number + nullable: true + description: Tokens used for audio output + accepted_prediction_tokens: + type: number + nullable: true + description: Accepted prediction tokens + rejected_prediction_tokens: + type: number + nullable: true + description: Rejected prediction tokens + description: Detailed completion token usage + prompt_tokens_details: + type: object + nullable: true + properties: + cached_tokens: + type: number + description: Cached prompt tokens + cache_write_tokens: + type: number + description: Tokens written to cache. Only returned for models with explicit caching and cache write pricing. + audio_tokens: + type: number + description: Audio input tokens + video_tokens: + type: number + description: Video input tokens + description: Detailed prompt token usage required: - - p50 - - p75 - - p90 - - p99 - description: >- - Latency percentiles in milliseconds over the last 30 minutes. Latency measures time to first token. Only visible - when authenticated with an API key or cookie; returns null for unauthenticated requests. - PublicEndpoint: + - completion_tokens + - prompt_tokens + - total_tokens + description: Token usage statistics + example: + completion_tokens: 15 + prompt_tokens: 10 + total_tokens: 25 + completion_tokens_details: + reasoning_tokens: 5 + prompt_tokens_details: + cached_tokens: 2 + ChatResponse: type: object properties: - name: + id: type: string - model_id: + description: Unique completion identifier + example: chatcmpl-123 + choices: + type: array + items: + $ref: '#/components/schemas/ChatResponseChoice' + description: List of completion choices + created: + type: number + description: Unix timestamp of creation + example: 1677652288 + model: type: string - description: The unique identifier for the model (permaslug) + description: Model used for completion example: openai/gpt-4 - model_name: + object: type: string - context_length: + enum: + - chat.completion + system_fingerprint: + type: string + nullable: true + description: System fingerprint + example: fp_44709d6fcb + usage: + $ref: '#/components/schemas/ChatGenerationTokenUsage' + required: + - id + - choices + - created + - model + - object + description: Chat completion response + example: + id: chatcmpl-123 + object: chat.completion + created: 1677652288 + model: openai/gpt-4 + choices: + - index: 0 + message: + role: assistant + content: The capital of France is Paris. + finish_reason: stop + usage: + prompt_tokens: 10 + completion_tokens: 15 + total_tokens: 25 + ChatStreamingMessageToolCall: + type: object + properties: + index: type: number - pricing: + description: Tool call index in the array + example: 0 + id: + type: string + description: Tool call identifier + example: call_abc123 + type: + type: string + enum: + - function + description: Tool call type + example: function + function: type: object properties: - prompt: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - completion: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - request: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_token: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_audio_cache: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - web_search: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - internal_reasoning: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_read: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_write: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - discount: - type: number - required: - - prompt - - completion - additionalProperties: false - provider_name: - $ref: '#/components/schemas/ProviderName' - tag: + name: + type: string + description: Function name + example: get_weather + arguments: + type: string + description: Function arguments as JSON string + example: '{"location":' + description: Function call details + required: + - index + description: Tool call delta for streaming responses + example: + index: 0 + id: call_abc123 + type: function + function: + name: get_weather + arguments: '{"location":' + ChatStreamingMessageChunk: + type: object + properties: + role: + type: string + enum: + - assistant + description: The role of the message author + example: assistant + content: type: string - quantization: - allOf: - - $ref: '#/components/schemas/Quantization' - - nullable: true - max_completion_tokens: - type: number nullable: true - max_prompt_tokens: - type: number + description: Message content delta + example: Hello + reasoning: + type: string nullable: true - supported_parameters: + description: Reasoning content delta + example: I need to + refusal: + type: string + nullable: true + description: Refusal message delta + example: null + tool_calls: type: array items: - $ref: '#/components/schemas/Parameter' - status: - $ref: '#/components/schemas/EndpointStatus' - uptime_last_30m: + $ref: '#/components/schemas/ChatStreamingMessageToolCall' + description: Tool calls delta + reasoning_details: + $ref: '#/components/schemas/AssistantMessageReasoningDetails' + description: Delta changes in streaming response + example: + role: assistant + content: Hello + ChatStreamingChoice: + type: object + properties: + delta: + $ref: '#/components/schemas/ChatStreamingMessageChunk' + finish_reason: + anyOf: + - $ref: '#/components/schemas/ChatCompletionFinishReason' + - nullable: true + - nullable: true + index: type: number - nullable: true - supports_implicit_caching: - type: boolean - latency_last_30m: - $ref: '#/components/schemas/PercentileStats' - throughput_last_30m: - allOf: - - $ref: '#/components/schemas/PercentileStats' - - description: >- - Throughput percentiles in tokens per second over the last 30 minutes. Throughput measures output token - generation speed. Only visible when authenticated with an API key or cookie; returns null for - unauthenticated requests. + description: Choice index + example: 0 + logprobs: + $ref: '#/components/schemas/ChatMessageTokenLogprobs' required: - - name - - model_id - - model_name - - context_length - - pricing - - provider_name - - tag - - quantization - - max_completion_tokens - - max_prompt_tokens - - supported_parameters - - uptime_last_30m - - supports_implicit_caching - - latency_last_30m - - throughput_last_30m - description: Information about a specific model endpoint + - delta + - finish_reason + - index + description: Streaming completion choice chunk example: - name: 'OpenAI: GPT-4' - model_id: openai/gpt-4 - model_name: GPT-4 - context_length: 8192 - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - provider_name: OpenAI - tag: openai - quantization: fp16 - max_completion_tokens: 4096 - max_prompt_tokens: 8192 - supported_parameters: - - temperature - - top_p - - max_tokens - status: 0 - uptime_last_30m: 99.5 - supports_implicit_caching: true - latency_last_30m: - p50: 0.25 - p75: 0.35 - p90: 0.48 - p99: 0.85 - throughput_last_30m: - p50: 45.2 - p75: 38.5 - p90: 28.3 - p99: 15.1 - ListEndpointsResponse: + index: 0 + delta: + role: assistant + content: Hello + finish_reason: null + ChatStreamingResponseChunk: type: object properties: id: type: string - description: Unique identifier for the model - example: openai/gpt-4 - name: - type: string - description: Display name of the model - example: GPT-4 + description: Unique chunk identifier + example: chatcmpl-123 + choices: + type: array + items: + $ref: '#/components/schemas/ChatStreamingChoice' + description: List of streaming chunk choices created: type: number - description: Unix timestamp of when the model was created - example: 1692901234 - description: + description: Unix timestamp of creation + example: 1677652288 + model: type: string - description: Description of the model - example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - architecture: - allOf: - - $ref: '#/components/schemas/ModelArchitecture' - - properties: - tokenizer: - allOf: - - $ref: '#/components/schemas/ModelGroup' - - nullable: true - instruct_type: - $ref: '#/components/schemas/InstructType' - modality: - type: string - nullable: true - description: Primary modality of the model - example: text - input_modalities: - type: array - items: - $ref: '#/components/schemas/InputModality' - description: Supported input modalities - output_modalities: - type: array - items: - $ref: '#/components/schemas/OutputModality' - description: Supported output modalities - required: - - tokenizer - - instruct_type - - modality - - input_modalities - - output_modalities - example: - tokenizer: GPT - instruct_type: chatml - modality: text - endpoints: - type: array - items: - $ref: '#/components/schemas/PublicEndpoint' - description: List of available endpoints for this model + description: Model used for completion + example: openai/gpt-4 + object: + type: string + enum: + - chat.completion.chunk + system_fingerprint: + type: string + nullable: true + description: System fingerprint + example: fp_44709d6fcb + error: + type: object + properties: + message: + type: string + description: Error message + example: Rate limit exceeded + code: + type: number + description: Error code + example: 429 + required: + - message + - code + description: Error information + example: + message: Rate limit exceeded + code: 429 + usage: + $ref: '#/components/schemas/ChatGenerationTokenUsage' required: - id - - name + - choices - created - - description - - architecture - - endpoints - description: List of available endpoints for a model + - model + - object + description: Streaming chat completion chunk + x-speakeasy-entity: ChatStreamChunk example: - id: openai/gpt-4 - name: GPT-4 - created: 1692901234 - description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - architecture: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - endpoints: - - name: 'OpenAI: GPT-4' - model_name: GPT-4 - context_length: 8192 - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - provider_name: OpenAI - tag: openai - quantization: fp16 - max_completion_tokens: 4096 - max_prompt_tokens: 8192 - supported_parameters: - - temperature - - top_p - - max_tokens - - frequency_penalty - - presence_penalty - status: default - uptime_last_30m: 99.5 - supports_implicit_caching: true - latency_last_30m: - p50: 0.25 - p75: 0.35 - p90: 0.48 - p99: 0.85 - throughput_last_30m: - p50: 45.2 - p75: 38.5 - p90: 28.3 - p99: 15.1 - __schema0: - anyOf: - - type: object - properties: - allow_fallbacks: - description: > - Whether to allow backup providers to serve requests - - - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use - the next best provider. - - - false: use only the primary/custom provider, and return the upstream error if it's unavailable. - $ref: '#/components/schemas/__schema1' - require_parameters: - description: >- - Whether to filter providers to only those that support the parameters you've provided. If this setting - is omitted or set to false, then providers will receive only the parameters they support, and ignore the - rest. - $ref: '#/components/schemas/__schema1' - data_collection: - description: >- - Data collection setting. If no available model provider meets the requirement, your request will return - an error. - - - allow: (default) allow providers which store user data non-transiently and may train on it - - - - deny: use only providers which do not collect user data. - $ref: '#/components/schemas/__schema3' - zdr: - anyOf: - - type: boolean - - type: 'null' - enforce_distillable_text: - anyOf: - - type: boolean - - type: 'null' - order: - description: >- - An ordered list of provider slugs. The router will attempt to use the first provider in the subset of - this list that supports your requested model, and fall back to the next if it is unavailable. If no - providers are available, the request will fail with an error message. - $ref: '#/components/schemas/__schema4' - only: - description: >- - List of provider slugs to allow. If provided, this list is merged with your account-wide allowed - provider settings for this request. - $ref: '#/components/schemas/__schema4' - ignore: - description: >- - List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored - provider settings for this request. - $ref: '#/components/schemas/__schema4' - quantizations: - description: A list of quantization levels to filter the provider by. - $ref: '#/components/schemas/__schema8' - sort: - description: >- - The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing - is performed. - $ref: '#/components/schemas/__schema9' - max_price: - description: >- - The object specifying the maximum price you want to pay for this request. USD price per million tokens, - for prompt and completion. - $ref: '#/components/schemas/__schema10' - preferred_min_throughput: - description: >- - Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with - percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized - in routing. When using fallback models, this may cause a fallback model to be used instead of the - primary model if it meets the threshold. - $ref: '#/components/schemas/__schema15' - preferred_max_latency: - description: >- - Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with - percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized - in routing. When using fallback models, this may cause a fallback model to be used instead of the - primary model if it meets the threshold. - $ref: '#/components/schemas/__schema15' - additionalProperties: false - - type: 'null' - __schema1: - anyOf: - - type: boolean - - type: 'null' - __schema3: - anyOf: - - type: string - enum: - - deny - - allow - - type: 'null' - __schema4: - anyOf: - - $ref: '#/components/schemas/__schema5' - - type: 'null' - __schema5: - type: array - items: - anyOf: - - type: string - enum: - - AI21 - - AionLabs - - Alibaba - - Ambient - - Amazon Bedrock - - Amazon Nova - - Anthropic - - Arcee AI - - AtlasCloud - - Avian - - Azure - - BaseTen - - BytePlus - - Black Forest Labs - - Cerebras - - Chutes - - Cirrascale - - Clarifai - - Cloudflare - - Cohere - - Crusoe - - DeepInfra - - DeepSeek - - Featherless - - Fireworks - - Friendli - - GMICloud - - Google - - Google AI Studio - - Groq - - Hyperbolic - - Inception - - Inceptron - - InferenceNet - - Infermatic - - Inflection - - Liquid - - Mara - - Mancer 2 - - Minimax - - ModelRun - - Mistral - - Modular - - Moonshot AI - - Morph - - NCompass - - Nebius - - NextBit - - Novita - - Nvidia - - OpenAI - - OpenInference - - Parasail - - Perplexity - - Phala - - Relace - - SambaNova - - Seed - - SiliconFlow - - Sourceful - - StepFun - - Stealth - - StreamLake - - Switchpoint - - Together - - Upstage - - Venice - - WandB - - Xiaomi - - xAI - - Z.AI - - FakeProvider - - type: string - __schema8: - anyOf: - - type: array - items: - type: string - enum: - - int4 - - int8 - - fp4 - - fp6 - - fp8 - - fp16 - - bf16 - - fp32 - - unknown - - type: 'null' - __schema9: - anyOf: - - $ref: '#/components/schemas/ProviderSortUnion' - - type: 'null' - __schema10: + id: chatcmpl-123 + object: chat.completion.chunk + created: 1677652288 + model: openai/gpt-4 + choices: + - index: 0 + delta: + role: assistant + content: Hello + finish_reason: null + SystemMessage: type: object properties: - prompt: - anyOf: - - $ref: '#/components/schemas/__schema11' - - $ref: '#/components/schemas/ModelName' - - $ref: '#/components/schemas/__schema13' - completion: - anyOf: - - $ref: '#/components/schemas/__schema11' - - $ref: '#/components/schemas/ModelName' - - $ref: '#/components/schemas/__schema13' - image: - $ref: '#/components/schemas/__schema14' - audio: - $ref: '#/components/schemas/__schema14' - request: - $ref: '#/components/schemas/__schema14' - __schema11: - type: number - __schema13: {} - __schema14: - anyOf: - - $ref: '#/components/schemas/__schema11' - - $ref: '#/components/schemas/ModelName' - - $ref: '#/components/schemas/__schema13' - __schema15: - anyOf: - - anyOf: - - type: number - - type: object - properties: - p50: - anyOf: - - type: number - - type: 'null' - p75: - anyOf: - - type: number - - type: 'null' - p90: - anyOf: - - type: number - - type: 'null' - p99: - anyOf: - - type: number - - type: 'null' - - type: 'null' - __schema17: - type: array - items: - oneOf: - - type: object - properties: - id: - type: string - const: auto-router - enabled: - type: boolean - allowed_models: - type: array - items: - type: string - required: - - id - - type: object - properties: - id: - type: string - const: moderation - required: - - id - - type: object - properties: - id: - type: string - const: web - enabled: - type: boolean - max_results: - type: number - search_prompt: - type: string - engine: - type: string - enum: - - native - - exa - required: - - id - - type: object - properties: - id: - type: string - const: file-parser - enabled: - type: boolean - pdf: - type: object - properties: - engine: - type: string - enum: - - mistral-ocr - - pdf-text - - native - required: - - id - - type: object - properties: - id: - type: string - const: response-healing - enabled: - type: boolean - required: - - id - type: object - __schema18: - type: string - maxLength: 128 - __schema19: - oneOf: - - type: object - properties: - type: - type: string - const: reasoning.summary - summary: - type: string - id: - $ref: '#/components/schemas/__schema20' - format: - $ref: '#/components/schemas/__schema21' - index: - $ref: '#/components/schemas/__schema11' - required: - - type - - summary - - type: object - properties: - type: - type: string - const: reasoning.encrypted - data: - type: string - id: - $ref: '#/components/schemas/__schema20' - format: - $ref: '#/components/schemas/__schema21' - index: - $ref: '#/components/schemas/__schema11' - required: - - type - - data - - type: object - properties: - type: - type: string - const: reasoning.text - text: - anyOf: - - type: string - - type: 'null' - signature: - anyOf: - - type: string - - type: 'null' - id: - $ref: '#/components/schemas/__schema20' - format: - $ref: '#/components/schemas/__schema21' - index: - $ref: '#/components/schemas/__schema11' - required: - - type - type: object - __schema20: - anyOf: - - type: string - - type: 'null' - __schema21: - anyOf: - - type: string + role: + type: string enum: - - unknown - - openai-responses-v1 - - azure-openai-responses-v1 - - xai-responses-v1 - - anthropic-claude-v1 - - google-gemini-v1 - - type: 'null' - __schema25: - anyOf: - - $ref: '#/components/schemas/ChatCompletionFinishReason' - - type: 'null' - ModelName: - type: string - ChatMessageContentItemText: + - system + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItemText' + description: System message content + example: You are a helpful assistant. + name: + type: string + description: Optional name for the system message + example: Assistant Config + required: + - role + - content + description: System message for setting behavior + example: + role: system + content: You are a helpful assistant. + name: Assistant Config + UserMessage: type: object properties: - type: + role: type: string - const: text - text: + enum: + - user + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItem' + description: User message content + example: What is the capital of France? + name: type: string - cache_control: - $ref: '#/components/schemas/ChatMessageContentItemCacheControl' + description: Optional name for the user + example: User required: - - type - - text - ChatMessageContentItemImage: + - role + - content + description: User message + example: + role: user + content: What is the capital of France? + DeveloperMessage: type: object properties: - type: + role: type: string - const: image_url - image_url: - type: object - properties: - url: - type: string - detail: - type: string - enum: - - auto - - low - - high - required: - - url + enum: + - developer + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItemText' + description: Developer message content + example: This is a message from the developer. + name: + type: string + description: Optional name for the developer message + example: Developer required: - - type - - image_url - ChatMessageContentItemAudio: + - role + - content + description: Developer message + example: + role: developer + content: This is a message from the developer. + ToolResponseMessage: type: object properties: - type: + role: type: string - const: input_audio - input_audio: - type: object - properties: - data: - type: string - format: - type: string - required: - - data - - format + enum: + - tool + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItem' + description: Tool response content + example: The weather in San Francisco is 72°F and sunny. + tool_call_id: + type: string + description: ID of the assistant message tool call this message responds to + example: call_abc123 required: - - type - - input_audio - ChatMessageContentItemVideo: - oneOf: - - type: object - properties: - type: - type: string - const: input_video - video_url: - type: object - properties: - url: - type: string - required: - - url - required: - - type - - video_url - - type: object - properties: - type: - type: string - const: video_url - video_url: - type: object - properties: - url: - type: string - required: - - url - required: - - type - - video_url - type: object - ChatMessageContentItem: + - role + - content + - tool_call_id + description: Tool response message + example: + role: tool + content: The weather in San Francisco is 72°F and sunny. + tool_call_id: call_abc123 + Message: oneOf: - - $ref: '#/components/schemas/ChatMessageContentItemText' - - $ref: '#/components/schemas/ChatMessageContentItemImage' - - $ref: '#/components/schemas/ChatMessageContentItemAudio' - - $ref: '#/components/schemas/ChatMessageContentItemVideo' - type: object + - $ref: '#/components/schemas/SystemMessage' + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/DeveloperMessage' + - $ref: '#/components/schemas/AssistantMessage' + - $ref: '#/components/schemas/ToolResponseMessage' discriminator: - propertyName: type - mapping: - text: '#/components/schemas/ChatMessageContentItemText' - image_url: '#/components/schemas/ChatMessageContentItemImage' - input_audio: '#/components/schemas/ChatMessageContentItemAudio' - input_video: '#/components/schemas/ChatMessageContentItemVideo' - video_url: '#/components/schemas/ChatMessageContentItemVideo' - ChatMessageToolCall: + propertyName: role + description: Chat completion message with role-based discrimination + example: + role: user + content: What is the capital of France? + ModelName: + type: string + description: Model to use for completion + example: openai/gpt-4 + ModelNames: + type: array + items: + allOf: + - $ref: '#/components/schemas/ModelName' + - description: Available OpenRouter chat completion models + description: Models to use for completion + example: + - openai/gpt-4 + - openai/gpt-4o + ResponseFormatText: type: object properties: - id: - type: string type: type: string - const: function - function: - type: object - properties: - name: - type: string - arguments: - type: string - required: - - name - - arguments + enum: + - text required: - - id - type - - function - ChatMessageTokenLogprob: + description: Default text response format + example: + type: text + ResponseFormatJSONObject: type: object properties: - token: + type: type: string - logprob: - type: number - bytes: - anyOf: - - type: array - items: - type: number - - type: 'null' - top_logprobs: - type: array - items: - type: object - properties: - token: - type: string - logprob: - type: number - bytes: - anyOf: - - type: array - items: - type: number - - type: 'null' - required: - - token - - logprob - - bytes - required: - - token - - logprob - - bytes - - top_logprobs - ChatMessageTokenLogprobs: - type: object - properties: - content: - anyOf: - - type: array - items: - $ref: '#/components/schemas/ChatMessageTokenLogprob' - - type: 'null' - refusal: - anyOf: - - type: array - items: - $ref: '#/components/schemas/ChatMessageTokenLogprob' - - type: 'null' - required: - - content - - refusal - ChatGenerationTokenUsage: - type: object - properties: - completion_tokens: - type: number - prompt_tokens: - type: number - total_tokens: - type: number - completion_tokens_details: - anyOf: - - type: object - properties: - reasoning_tokens: - anyOf: - - type: number - - type: 'null' - audio_tokens: - anyOf: - - type: number - - type: 'null' - accepted_prediction_tokens: - anyOf: - - type: number - - type: 'null' - rejected_prediction_tokens: - anyOf: - - type: number - - type: 'null' - - type: 'null' - prompt_tokens_details: - anyOf: - - type: object - properties: - cached_tokens: - type: number - cache_write_tokens: - type: number - audio_tokens: - type: number - video_tokens: - type: number - - type: 'null' + enum: + - json_object required: - - completion_tokens - - prompt_tokens - - total_tokens - ChatCompletionFinishReason: - type: string - enum: - - tool_calls - - stop - - length - - content_filter - - error + - type + description: JSON object response format + example: + type: json_object JSONSchemaConfig: type: object properties: name: type: string maxLength: 64 + description: Schema name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) + example: math_response description: type: string + description: Schema description for the model + example: A mathematical response schema: type: object - propertyNames: - type: string - additionalProperties: {} + additionalProperties: + nullable: true + description: JSON Schema object + example: + type: object + properties: + answer: + type: number + required: + - answer strict: - anyOf: - - type: boolean - - type: 'null' + type: boolean + nullable: true + description: Enable strict schema adherence + example: false required: - name + description: JSON Schema configuration object + example: + name: math_response + description: A mathematical response + schema: + type: object + properties: + answer: + type: number + required: + - answer + strict: true ResponseFormatJSONSchema: type: object properties: type: type: string - const: json_schema + enum: + - json_schema json_schema: $ref: '#/components/schemas/JSONSchemaConfig' required: - type - json_schema + description: JSON Schema response format for structured outputs + example: + type: json_schema + json_schema: + name: math_response + schema: + type: object + properties: + answer: + type: number + required: + - answer ResponseFormatTextGrammar: type: object properties: type: type: string - const: grammar + enum: + - grammar grammar: type: string + description: Custom grammar for text generation + example: root ::= "yes" | "no" required: - type - grammar - ChatMessageContentItemCacheControl: + description: Custom grammar response format + example: + type: grammar + grammar: root ::= "yes" | "no" + ResponseFormatTextPython: type: object properties: type: - type: string - const: ephemeral - ttl: type: string enum: - - 5m - - 1h + - python required: - type - SystemMessage: + description: Python code response format + example: + type: python + ChatStreamOptions: type: object + nullable: true properties: - role: - type: string - const: system - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItemText' - name: - type: string - required: - - role - - content - UserMessage: + include_usage: + type: boolean + description: 'Deprecated: This field has no effect. Full usage details are always included.' + example: true + deprecated: true + description: Streaming configuration options + example: + include_usage: true + NamedToolChoice: type: object properties: - role: - type: string - const: user - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItem' - name: + type: type: string + enum: + - function + function: + type: object + properties: + name: + type: string + description: Function name to call + example: get_weather + required: + - name required: - - role - - content - DeveloperMessage: - type: object - properties: - role: - type: string - const: developer - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItemText' - name: - type: string - required: - - role - - content - AssistantMessage: - type: object - properties: - role: - type: string - const: assistant - content: - anyOf: - - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItem' - - type: 'null' - name: - type: string - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatMessageToolCall' - refusal: - anyOf: - - type: string - - type: 'null' - reasoning: - anyOf: - - type: string - - type: 'null' - reasoning_details: - type: array - items: - $ref: '#/components/schemas/__schema19' - images: - type: array - items: - type: object - properties: - image_url: - type: object - properties: - url: - type: string - required: - - url - required: - - image_url - required: - - role - ToolResponseMessage: - type: object - properties: - role: - type: string - const: tool - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItem' - tool_call_id: - type: string - required: - - role - - content - - tool_call_id - Message: - oneOf: - - $ref: '#/components/schemas/SystemMessage' - - $ref: '#/components/schemas/UserMessage' - - $ref: '#/components/schemas/DeveloperMessage' - - $ref: '#/components/schemas/AssistantMessage' - - $ref: '#/components/schemas/ToolResponseMessage' - type: object - discriminator: - propertyName: role - mapping: - system: '#/components/schemas/SystemMessage' - user: '#/components/schemas/UserMessage' - developer: '#/components/schemas/DeveloperMessage' - assistant: '#/components/schemas/AssistantMessage' - tool: '#/components/schemas/ToolResponseMessage' + - type + - function + description: Named tool choice for specific function + example: + type: function + function: + name: get_weather + ToolChoiceOption: + anyOf: + - type: string + enum: + - none + - type: string + enum: + - auto + - type: string + enum: + - required + - $ref: '#/components/schemas/NamedToolChoice' + description: Tool choice configuration + example: auto ToolDefinitionJson: type: object properties: type: type: string - const: function + enum: + - function function: type: object properties: name: type: string maxLength: 64 + description: Function name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) + example: get_weather description: type: string + description: Function description for the model + example: Get the current weather for a location parameters: type: object - propertyNames: - type: string - additionalProperties: {} + additionalProperties: + nullable: true + description: Function parameters as JSON Schema object + example: + type: object + properties: + location: + type: string + description: City name + required: + - location strict: - anyOf: - - type: boolean - - type: 'null' + type: boolean + nullable: true + description: Enable strict schema adherence + example: false required: - name + description: Function definition for tool calling + example: + name: get_weather + description: Get the current weather for a location + parameters: + type: object + properties: + location: + type: string + description: City name + required: + - location + cache_control: + $ref: '#/components/schemas/ChatMessageContentItemCacheControl' required: - type - function - NamedToolChoice: - type: object - properties: - type: - type: string - const: function + description: Tool definition for function calling + example: + type: function function: - type: object - properties: - name: - type: string - required: - - name - required: - - type - - function - ToolChoiceOption: - anyOf: - - type: string - const: none - - type: string - const: auto - - type: string - const: required - - $ref: '#/components/schemas/NamedToolChoice' - ChatStreamOptions: + name: get_weather + description: Get the current weather for a location + parameters: + type: object + properties: + location: + type: string + description: City name + unit: + type: string + enum: + - celsius + - fahrenheit + required: + - location + DebugOptions: type: object properties: - include_usage: + echo_upstream_body: type: boolean + description: >- + If true, includes the transformed upstream request body in a debug chunk at the start of the stream. Only + works with streaming mode. + example: true + description: Debug options for inspecting request transformations (streaming only) + example: + echo_upstream_body: true ChatGenerationParams: type: object properties: provider: - description: When multiple model providers are available, optionally indicate your routing preference. - $ref: '#/components/schemas/__schema0' - plugins: - description: Plugins you want to enable for this request, including their settings. - $ref: '#/components/schemas/__schema17' - route: - anyOf: - - type: string - enum: - - fallback - - sort - - type: 'null' - user: - type: string - session_id: - description: >- - A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for - observability. If provided in both the request body and the x-session-id header, the body value takes - precedence. Maximum of 128 characters. - $ref: '#/components/schemas/__schema18' - messages: - minItems: 1 - type: array - items: - $ref: '#/components/schemas/Message' - model: - $ref: '#/components/schemas/ModelName' - models: - type: array - items: - $ref: '#/components/schemas/ModelName' - frequency_penalty: - anyOf: - - type: number - minimum: -2 - maximum: 2 - - type: 'null' - logit_bias: - anyOf: - - type: object - propertyNames: - type: string - additionalProperties: - type: number - - type: 'null' - logprobs: - anyOf: - - type: boolean - - type: 'null' - top_logprobs: - anyOf: - - type: number - minimum: 0 - maximum: 20 - - type: 'null' - max_completion_tokens: - anyOf: - - type: number - minimum: 1 - - type: 'null' - max_tokens: - anyOf: - - type: number - minimum: 1 - - type: 'null' - metadata: - type: object - propertyNames: - type: string - additionalProperties: - type: string - presence_penalty: - anyOf: - - type: number - minimum: -2 - maximum: 2 - - type: 'null' - reasoning: type: object + nullable: true properties: - effort: - anyOf: - - type: string - enum: - - xhigh - - high - - medium - - low - - minimal - - none - - type: 'null' - summary: - anyOf: - - $ref: '#/components/schemas/ReasoningSummaryVerbosity' - - type: 'null' - response_format: - oneOf: - - type: object - properties: - type: - type: string - const: text - required: - - type - - type: object - properties: - type: - type: string - const: json_object - required: - - type - - $ref: '#/components/schemas/ResponseFormatJSONSchema' - - $ref: '#/components/schemas/ResponseFormatTextGrammar' - - type: object - properties: - type: - type: string - const: python - required: - - type - type: object - seed: - anyOf: - - type: integer - minimum: -9007199254740991 - maximum: 9007199254740991 - - type: 'null' - stop: - anyOf: - - anyOf: - - type: string - - maxItems: 4 - type: array - items: - $ref: '#/components/schemas/ModelName' - - type: 'null' - stream: - default: false - type: boolean - stream_options: - anyOf: - - $ref: '#/components/schemas/ChatStreamOptions' - - type: 'null' - temperature: - default: 1 - anyOf: - - type: number - minimum: 0 - maximum: 2 - - type: 'null' - tool_choice: - $ref: '#/components/schemas/ToolChoiceOption' - tools: - type: array - items: - $ref: '#/components/schemas/ToolDefinitionJson' - top_p: - default: 1 - anyOf: - - type: number - minimum: 0 - maximum: 1 - - type: 'null' - debug: + allow_fallbacks: + type: boolean + nullable: true + description: > + Whether to allow backup providers to serve requests + + - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use + the next best provider. + + - false: use only the primary/custom provider, and return the upstream error if it's unavailable. + require_parameters: + type: boolean + nullable: true + description: >- + Whether to filter providers to only those that support the parameters you've provided. If this setting + is omitted or set to false, then providers will receive only the parameters they support, and ignore the + rest. + data_collection: + $ref: '#/components/schemas/DataCollection' + zdr: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that + do not retain prompts will be used. + example: true + enforce_distillable_text: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only models that allow text distillation. When true, only models where + the author has allowed distillation will be used. + example: true + order: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + An ordered list of provider slugs. The router will attempt to use the first provider in the subset of + this list that supports your requested model, and fall back to the next if it is unavailable. If no + providers are available, the request will fail with an error message. + only: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to allow. If provided, this list is merged with your account-wide allowed + provider settings for this request. + ignore: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored + provider settings for this request. + quantizations: + type: array + nullable: true + items: + $ref: '#/components/schemas/Quantization' + description: A list of quantization levels to filter the provider by. + sort: + allOf: + - $ref: '#/components/schemas/ProviderSort' + - anyOf: + - $ref: '#/components/schemas/ProviderSort' + - $ref: '#/components/schemas/ProviderSortConfig' + - nullable: true + description: >- + The sorting strategy to use for this request, if "order" is not specified. When set, no load + balancing is performed. + max_price: + type: object + properties: + prompt: + $ref: '#/components/schemas/BigNumberUnion' + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per million completion tokens + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per image + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per audio unit + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per request + description: >- + The object specifying the maximum price you want to pay for this request. USD price per million tokens, + for prompt and completion. + preferred_min_throughput: + $ref: '#/components/schemas/PreferredMinThroughput' + preferred_max_latency: + $ref: '#/components/schemas/PreferredMaxLatency' + additionalProperties: false + description: When multiple model providers are available, optionally indicate your routing preference. + plugins: + type: array + items: + oneOf: + - type: object + properties: + id: + type: string + enum: + - auto-router + enabled: + type: boolean + description: Set to false to disable the auto-router plugin for this request. Defaults to true. + allowed_models: + type: array + items: + type: string + description: >- + List of model patterns to filter which models the auto-router can route between. Supports + wildcards (e.g., "anthropic/*" matches all Anthropic models). When not specified, uses the default + supported models list. + example: + - anthropic/* + - openai/gpt-4o + - google/* + required: + - id + - type: object + properties: + id: + type: string + enum: + - moderation + required: + - id + - type: object + properties: + id: + type: string + enum: + - web + enabled: + type: boolean + description: Set to false to disable the web-search plugin for this request. Defaults to true. + max_results: + type: number + search_prompt: + type: string + engine: + $ref: '#/components/schemas/WebSearchEngine' + required: + - id + - type: object + properties: + id: + type: string + enum: + - file-parser + enabled: + type: boolean + description: Set to false to disable the file-parser plugin for this request. Defaults to true. + pdf: + $ref: '#/components/schemas/PDFParserOptions' + required: + - id + - type: object + properties: + id: + type: string + enum: + - response-healing + enabled: + type: boolean + description: Set to false to disable the response-healing plugin for this request. Defaults to true. + required: + - id + description: Plugins you want to enable for this request, including their settings. + route: + type: string + nullable: true + enum: + - fallback + - sort + deprecated: true + description: >- + **DEPRECATED** Use providers.sort.partition instead. Backwards-compatible alias for + providers.sort.partition. Accepts legacy values: "fallback" (maps to "model"), "sort" (maps to "none"). + x-speakeasy-deprecation-message: Use providers.sort.partition instead. + x-speakeasy-ignore: true + x-fern-ignore: true + user: + type: string + description: Unique user identifier + example: user-123 + session_id: + type: string + maxLength: 128 + description: >- + A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for + observability. If provided in both the request body and the x-session-id header, the body value takes + precedence. Maximum of 128 characters. + trace: type: object properties: - echo_upstream_body: - type: boolean - image_config: + trace_id: + type: string + trace_name: + type: string + span_name: + type: string + generation_name: + type: string + parent_span_id: + type: string + additionalProperties: + nullable: true + description: >- + Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, + parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured + broadcast destinations. + messages: + type: array + items: + $ref: '#/components/schemas/Message' + minItems: 1 + description: List of messages for the conversation + example: + - role: user + content: Hello! + model: + $ref: '#/components/schemas/ModelName' + models: + $ref: '#/components/schemas/ModelNames' + frequency_penalty: + type: number + nullable: true + minimum: -2 + maximum: 2 + description: Frequency penalty (-2.0 to 2.0) + example: 0 + logit_bias: type: object - propertyNames: - type: string + nullable: true additionalProperties: - anyOf: - - type: string - - type: number - - type: array - items: {} - modalities: + type: number + description: Token logit bias adjustments + example: + '50256': -100 + logprobs: + type: boolean + nullable: true + description: Return log probabilities + example: false + top_logprobs: + type: number + nullable: true + minimum: 0 + maximum: 20 + description: Number of top log probabilities to return (0-20) + example: 5 + max_completion_tokens: + type: number + nullable: true + minimum: 1 + description: Maximum tokens in completion + example: 100 + max_tokens: + type: number + nullable: true + minimum: 1 + description: Maximum tokens (deprecated, use max_completion_tokens) + example: 100 + metadata: + type: object + additionalProperties: + type: string + description: Key-value pairs for additional object information (max 16 pairs, 64 char keys, 512 char values) + example: + user_id: user-123 + session_id: session-456 + presence_penalty: + type: number + nullable: true + minimum: -2 + maximum: 2 + description: Presence penalty (-2.0 to 2.0) + example: 0 + reasoning: + type: object + properties: + effort: + type: string + nullable: true + enum: + - xhigh + - high + - medium + - low + - minimal + - none + description: Constrains effort on reasoning for reasoning models + example: medium + summary: + anyOf: + - $ref: '#/components/schemas/ReasoningSummaryVerbosity' + - nullable: true + - nullable: true + description: Configuration options for reasoning models + example: + effort: medium + summary: concise + response_format: + oneOf: + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJSONObject' + - $ref: '#/components/schemas/ResponseFormatJSONSchema' + - $ref: '#/components/schemas/ResponseFormatTextGrammar' + - $ref: '#/components/schemas/ResponseFormatTextPython' + discriminator: + propertyName: type + description: Response format configuration + example: + type: json_object + seed: + type: integer + nullable: true + description: Random seed for deterministic outputs + example: 42 + stop: + anyOf: + - type: string + - type: array + items: + type: string + maxItems: 4 + - nullable: true + description: Stop sequences (up to 4) + example: + - |+ + + stream: + type: boolean + default: false + description: Enable streaming response + example: false + stream_options: + $ref: '#/components/schemas/ChatStreamOptions' + temperature: + type: number + nullable: true + minimum: 0 + maximum: 2 + default: 1 + description: Sampling temperature (0-2) + example: 0.7 + parallel_tool_calls: + type: boolean + nullable: true + tool_choice: + $ref: '#/components/schemas/ToolChoiceOption' + tools: + type: array + items: + $ref: '#/components/schemas/ToolDefinitionJson' + description: Available tools for function calling + example: + - type: function + function: + name: get_weather + description: Get weather + top_p: + type: number + nullable: true + minimum: 0 + maximum: 1 + default: 1 + description: Nucleus sampling parameter (0-1) + example: 1 + debug: + $ref: '#/components/schemas/DebugOptions' + image_config: + type: object + additionalProperties: + anyOf: + - type: string + - type: number + - type: array + items: + nullable: true + description: >- + Provider-specific image configuration options. Keys and values vary by model/provider. See + https://openrouter.ai/docs/guides/overview/multimodal/image-generation for more details. + example: + aspect_ratio: '16:9' + modalities: + type: array + items: + type: string + enum: + - text + - image + description: Output modalities for the response. Supported values are "text" and "image". + example: + - text + - image + required: + - messages + description: Chat completion request parameters + example: + messages: + - role: system + content: You are a helpful assistant. + - role: user + content: What is the capital of France? + model: openai/gpt-4 + temperature: 0.7 + max_tokens: 150 + CreateChargeRequest: + type: object + properties: + amount: + type: number + sender: + type: string + chain_id: + type: integer + enum: + - 1 + - 137 + - 8453 + required: + - amount + - sender + - chain_id + description: Create a Coinbase charge for crypto payment + example: + amount: 100 + sender: '0x1234567890123456789012345678901234567890' + chain_id: 1 + ProviderPreferences: + type: object + properties: + allow_fallbacks: + type: boolean + nullable: true + description: > + Whether to allow backup providers to serve requests + + - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the + next best provider. + + - false: use only the primary/custom provider, and return the upstream error if it's unavailable. + require_parameters: + type: boolean + nullable: true + description: >- + Whether to filter providers to only those that support the parameters you've provided. If this setting is + omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. + data_collection: + $ref: '#/components/schemas/DataCollection' + zdr: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do + not retain prompts will be used. + example: true + enforce_distillable_text: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only models that allow text distillation. When true, only models where the + author has allowed distillation will be used. + example: true + order: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this + list that supports your requested model, and fall back to the next if it is unavailable. If no providers are + available, the request will fail with an error message. + only: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider + settings for this request. + ignore: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider + settings for this request. + quantizations: + type: array + nullable: true + items: + $ref: '#/components/schemas/Quantization' + description: A list of quantization levels to filter the provider by. + sort: + allOf: + - $ref: '#/components/schemas/ProviderSort' + - anyOf: + - $ref: '#/components/schemas/ProviderSort' + - $ref: '#/components/schemas/ProviderSortConfig' + - nullable: true + description: >- + The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing + is performed. + max_price: + type: object + properties: + prompt: + $ref: '#/components/schemas/BigNumberUnion' + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per million completion tokens + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per image + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per audio unit + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per request + description: >- + The object specifying the maximum price you want to pay for this request. USD price per million tokens, for + prompt and completion. + preferred_min_throughput: + $ref: '#/components/schemas/PreferredMinThroughput' + preferred_max_latency: + $ref: '#/components/schemas/PreferredMaxLatency' + description: Provider routing preferences for the request. + PublicPricing: + type: object + properties: + prompt: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_token: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_audio_cache: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + web_search: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + internal_reasoning: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_read: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_write: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + discount: + type: number + required: + - prompt + - completion + description: Pricing information for the model + example: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + ModelGroup: + type: string + enum: + - Router + - Media + - Other + - GPT + - Claude + - Gemini + - Grok + - Cohere + - Nova + - Qwen + - Yi + - DeepSeek + - Mistral + - Llama2 + - Llama3 + - Llama4 + - PaLM + - RWKV + - Qwen3 + example: GPT + description: Tokenizer type used by the model + InputModality: + type: string + enum: + - text + - image + - file + - audio + - video + example: text + OutputModality: + type: string + enum: + - text + - image + - embeddings + - audio + example: text + ModelArchitecture: + type: object + properties: + tokenizer: + $ref: '#/components/schemas/ModelGroup' + instruct_type: + type: string + nullable: true + enum: + - none + - airoboros + - alpaca + - alpaca-modif + - chatml + - claude + - code-llama + - gemma + - llama2 + - llama3 + - mistral + - nemotron + - neural + - openchat + - phi3 + - rwkv + - vicuna + - zephyr + - deepseek-r1 + - deepseek-v3.1 + - qwq + - qwen3 + example: chatml + description: Instruction format type + modality: + type: string + nullable: true + description: Primary modality of the model + example: text->text + input_modalities: + type: array + items: + $ref: '#/components/schemas/InputModality' + description: Supported input modalities + output_modalities: + type: array + items: + $ref: '#/components/schemas/OutputModality' + description: Supported output modalities + required: + - modality + - input_modalities + - output_modalities + description: Model architecture information + example: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + TopProviderInfo: + type: object + properties: + context_length: + type: number + nullable: true + description: Context length from the top provider + example: 8192 + max_completion_tokens: + type: number + nullable: true + description: Maximum completion tokens from the top provider + example: 4096 + is_moderated: + type: boolean + description: Whether the top provider moderates content + example: true + required: + - is_moderated + description: Information about the top provider for this model + example: + context_length: 8192 + max_completion_tokens: 4096 + is_moderated: true + PerRequestLimits: + type: object + nullable: true + properties: + prompt_tokens: + type: number + example: 1000 + description: Maximum prompt tokens per request + completion_tokens: + type: number + example: 1000 + description: Maximum completion tokens per request + required: + - prompt_tokens + - completion_tokens + description: Per-request token limits + example: + prompt_tokens: 1000 + completion_tokens: 1000 + Parameter: + type: string + enum: + - temperature + - top_p + - top_k + - min_p + - top_a + - frequency_penalty + - presence_penalty + - repetition_penalty + - max_tokens + - logit_bias + - logprobs + - top_logprobs + - seed + - response_format + - structured_outputs + - stop + - tools + - tool_choice + - parallel_tool_calls + - include_reasoning + - reasoning + - reasoning_effort + - web_search_options + - verbosity + example: temperature + DefaultParameters: + type: object + nullable: true + properties: + temperature: + type: number + nullable: true + minimum: 0 + maximum: 2 + top_p: + type: number + nullable: true + minimum: 0 + maximum: 1 + frequency_penalty: + type: number + nullable: true + minimum: -2 + maximum: 2 + additionalProperties: false + description: Default parameters for this model + example: + temperature: 0.7 + top_p: 0.9 + frequency_penalty: 0 + Model: + type: object + properties: + id: + type: string + description: Unique identifier for the model + example: openai/gpt-4 + canonical_slug: + type: string + description: Canonical slug for the model + example: openai/gpt-4 + hugging_face_id: + type: string + nullable: true + description: Hugging Face model identifier, if applicable + example: microsoft/DialoGPT-medium + name: + type: string + description: Display name of the model + example: GPT-4 + created: + type: number + description: Unix timestamp of when the model was created + example: 1692901234 + description: + type: string + description: Description of the model + example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + pricing: + $ref: '#/components/schemas/PublicPricing' + context_length: + type: number + nullable: true + description: Maximum context length in tokens + example: 8192 + architecture: + $ref: '#/components/schemas/ModelArchitecture' + top_provider: + $ref: '#/components/schemas/TopProviderInfo' + per_request_limits: + $ref: '#/components/schemas/PerRequestLimits' + supported_parameters: type: array items: - type: string - enum: - - text - - image + $ref: '#/components/schemas/Parameter' + description: List of supported parameters for this model + default_parameters: + $ref: '#/components/schemas/DefaultParameters' + expiration_date: + type: string + nullable: true + description: The date after which the model may be removed. ISO 8601 date string (YYYY-MM-DD) or null if no expiration. + example: '2025-06-01' required: - - messages - ProviderSortUnion: - anyOf: - - $ref: '#/components/schemas/ProviderSort' - - $ref: '#/components/schemas/ProviderSortConfig' - ChatResponseChoice: + - id + - canonical_slug + - name + - created + - pricing + - context_length + - architecture + - top_provider + - per_request_limits + - supported_parameters + - default_parameters + description: Information about an AI model available on OpenRouter + example: + id: openai/gpt-4 + canonical_slug: openai/gpt-4 + name: GPT-4 + created: 1692901234 + description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + context_length: 8192 + architecture: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + top_provider: + context_length: 8192 + max_completion_tokens: 4096 + is_moderated: true + per_request_limits: null + supported_parameters: + - temperature + - top_p + - max_tokens + default_parameters: null + expiration_date: null + ModelsListResponseData: + type: array + items: + $ref: '#/components/schemas/Model' + description: List of available models + ModelsListResponse: type: object properties: - finish_reason: - $ref: '#/components/schemas/__schema25' - index: - type: number - message: - $ref: '#/components/schemas/AssistantMessage' - logprobs: - anyOf: - - $ref: '#/components/schemas/ChatMessageTokenLogprobs' - - type: 'null' + data: + $ref: '#/components/schemas/ModelsListResponseData' required: - - finish_reason - - index - - message - ChatStreamingMessageToolCall: + - data + description: List of available models + example: + data: + - id: openai/gpt-4 + canonical_slug: openai/gpt-4 + name: GPT-4 + created: 1692901234 + description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + context_length: 8192 + architecture: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + top_provider: + context_length: 8192 + max_completion_tokens: 4096 + is_moderated: true + per_request_limits: null + supported_parameters: + - temperature + - top_p + - max_tokens + - frequency_penalty + - presence_penalty + default_parameters: null + expiration_date: null + ModelsCountResponse: type: object properties: - index: - type: number - id: - type: string - type: - type: string - const: function - function: + data: type: object properties: - name: - type: string - arguments: - type: string + count: + type: number + description: Total number of available models + example: 150 + required: + - count + description: Model count data + example: + count: 150 required: - - index - ChatStreamingMessageChunk: - type: object - properties: - role: - type: string - enum: - - assistant - content: - anyOf: - - type: string - - type: 'null' - reasoning: - anyOf: - - type: string - - type: 'null' - refusal: - anyOf: - - type: string - - type: 'null' - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatStreamingMessageToolCall' - reasoning_details: - type: array - items: - $ref: '#/components/schemas/__schema19' - ChatStreamingChoice: + - data + description: Model count data + example: + data: + count: 150 + InstructType: + type: string + nullable: true + enum: + - none + - airoboros + - alpaca + - alpaca-modif + - chatml + - claude + - code-llama + - gemma + - llama2 + - llama3 + - mistral + - nemotron + - neural + - openchat + - phi3 + - rwkv + - vicuna + - zephyr + - deepseek-r1 + - deepseek-v3.1 + - qwq + - qwen3 + description: Instruction format type + EndpointStatus: + type: integer + enum: + - 0 + - -1 + - -2 + - -3 + - -5 + - -10 + example: 0 + PercentileStats: type: object + nullable: true properties: - delta: - $ref: '#/components/schemas/ChatStreamingMessageChunk' - finish_reason: - $ref: '#/components/schemas/__schema25' - index: + p50: + type: number + description: Median (50th percentile) + example: 25.5 + p75: + type: number + description: 75th percentile + example: 35.2 + p90: type: number - logprobs: - anyOf: - - $ref: '#/components/schemas/ChatMessageTokenLogprobs' - - type: 'null' + description: 90th percentile + example: 48.7 + p99: + type: number + description: 99th percentile + example: 85.3 required: - - delta - - finish_reason - - index - ChatError: + - p50 + - p75 + - p90 + - p99 + description: >- + Latency percentiles in milliseconds over the last 30 minutes. Latency measures time to first token. Only visible + when authenticated with an API key or cookie; returns null for unauthenticated requests. + PublicEndpoint: type: object properties: - error: + name: + type: string + model_id: + type: string + description: The unique identifier for the model (permaslug) + example: openai/gpt-4 + model_name: + type: string + context_length: + type: number + pricing: type: object properties: - code: - anyOf: - - anyOf: - - type: string - - type: number - - type: 'null' - message: - type: string - param: - anyOf: - - type: string - - type: 'null' - type: - anyOf: - - type: string - - type: 'null' + prompt: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_token: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_audio_cache: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + web_search: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + internal_reasoning: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_read: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_write: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + discount: + type: number required: - - code - - message - additionalProperties: false + - prompt + - completion + provider_name: + $ref: '#/components/schemas/ProviderName' + tag: + type: string + quantization: + allOf: + - $ref: '#/components/schemas/Quantization' + - nullable: true + max_completion_tokens: + type: number + nullable: true + max_prompt_tokens: + type: number + nullable: true + supported_parameters: + type: array + items: + $ref: '#/components/schemas/Parameter' + status: + $ref: '#/components/schemas/EndpointStatus' + uptime_last_30m: + type: number + nullable: true + supports_implicit_caching: + type: boolean + latency_last_30m: + $ref: '#/components/schemas/PercentileStats' + throughput_last_30m: + allOf: + - $ref: '#/components/schemas/PercentileStats' + - description: >- + Throughput percentiles in tokens per second over the last 30 minutes. Throughput measures output token + generation speed. Only visible when authenticated with an API key or cookie; returns null for + unauthenticated requests. required: - - error - additionalProperties: false - ChatResponse: + - name + - model_id + - model_name + - context_length + - pricing + - provider_name + - tag + - quantization + - max_completion_tokens + - max_prompt_tokens + - supported_parameters + - uptime_last_30m + - supports_implicit_caching + - latency_last_30m + - throughput_last_30m + description: Information about a specific model endpoint + example: + name: 'OpenAI: GPT-4' + model_id: openai/gpt-4 + model_name: GPT-4 + context_length: 8192 + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + provider_name: OpenAI + tag: openai + quantization: fp16 + max_completion_tokens: 4096 + max_prompt_tokens: 8192 + supported_parameters: + - temperature + - top_p + - max_tokens + status: 0 + uptime_last_30m: 99.5 + supports_implicit_caching: true + latency_last_30m: + p50: 0.25 + p75: 0.35 + p90: 0.48 + p99: 0.85 + throughput_last_30m: + p50: 45.2 + p75: 38.5 + p90: 28.3 + p99: 15.1 + ListEndpointsResponse: type: object properties: id: type: string - choices: - type: array - items: - $ref: '#/components/schemas/ChatResponseChoice' + description: Unique identifier for the model + example: openai/gpt-4 + name: + type: string + description: Display name of the model + example: GPT-4 created: type: number - model: - type: string - object: + description: Unix timestamp of when the model was created + example: 1692901234 + description: type: string - const: chat.completion - system_fingerprint: - anyOf: - - type: string - - type: 'null' - usage: - $ref: '#/components/schemas/ChatGenerationTokenUsage' - required: - - id - - choices - - created - - model - - object - additionalProperties: false - ChatStreamingResponseChunk: - type: object - properties: - data: - type: object - properties: - id: - type: string - choices: - type: array - items: - $ref: '#/components/schemas/ChatStreamingChoice' - created: - type: number - model: - type: string - object: - type: string - const: chat.completion.chunk - system_fingerprint: - anyOf: - - type: string - - type: 'null' - error: - type: object - properties: - message: + description: Description of the model + example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + architecture: + allOf: + - $ref: '#/components/schemas/ModelArchitecture' + - properties: + tokenizer: + allOf: + - $ref: '#/components/schemas/ModelGroup' + - nullable: true + instruct_type: + $ref: '#/components/schemas/InstructType' + modality: type: string - code: - type: number + nullable: true + description: Primary modality of the model + example: text + input_modalities: + type: array + items: + $ref: '#/components/schemas/InputModality' + description: Supported input modalities + output_modalities: + type: array + items: + $ref: '#/components/schemas/OutputModality' + description: Supported output modalities required: - - message - - code - additionalProperties: false - usage: - $ref: '#/components/schemas/ChatGenerationTokenUsage' - required: - - id - - choices - - created - - model - - object - additionalProperties: false + - tokenizer + - instruct_type + - modality + - input_modalities + - output_modalities + example: + tokenizer: GPT + instruct_type: chatml + modality: text + endpoints: + type: array + items: + $ref: '#/components/schemas/PublicEndpoint' + description: List of available endpoints for this model required: - - data - additionalProperties: false + - id + - name + - created + - description + - architecture + - endpoints + description: List of available endpoints for a model + example: + id: openai/gpt-4 + name: GPT-4 + created: 1692901234 + description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + architecture: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + endpoints: + - name: 'OpenAI: GPT-4' + model_name: GPT-4 + context_length: 8192 + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + provider_name: OpenAI + tag: openai + quantization: fp16 + max_completion_tokens: 4096 + max_prompt_tokens: 8192 + supported_parameters: + - temperature + - top_p + - max_tokens + - frequency_penalty + - presence_penalty + status: default + uptime_last_30m: 99.5 + supports_implicit_caching: true + latency_last_30m: + p50: 0.25 + p75: 0.35 + p90: 0.48 + p99: 0.85 + throughput_last_30m: + p50: 45.2 + p75: 38.5 + p90: 28.3 + p99: 15.1 parameters: {} securitySchemes: - apiKey: - type: http - scheme: bearer - description: API key as bearer token in Authorization header bearer: type: http scheme: bearer @@ -9885,6 +10270,118 @@ paths: application/json: schema: $ref: '#/components/schemas/InternalServerResponse' + /chat/completions: + post: + x-speakeasy-group: chat + x-speakeasy-name-override: send + x-speakeasy-stream-request-field: stream + tags: + - Chat + summary: Create a chat completion + operationId: sendChatCompletionRequest + description: >- + Sends a request for a model response for the given chat conversation. Supports both streaming and non-streaming + modes. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatGenerationParams' + required: true + responses: + '200': + description: Successful chat completion response + content: + application/json: + schema: + $ref: '#/components/schemas/ChatResponse' + text/event-stream: + schema: + type: object + properties: + data: + $ref: '#/components/schemas/ChatStreamingResponseChunk' + required: + - data + x-speakeasy-sse-sentinel: '[DONE]' + '400': + description: Bad Request - Invalid request parameters or malformed input + content: + application/json: + schema: + $ref: '#/components/schemas/BadRequestResponse' + '401': + description: Unauthorized - Authentication required or invalid credentials + content: + application/json: + schema: + $ref: '#/components/schemas/UnauthorizedResponse' + '402': + description: Payment Required - Insufficient credits or quota to complete request + content: + application/json: + schema: + $ref: '#/components/schemas/PaymentRequiredResponse' + '404': + description: Not Found - Resource does not exist + content: + application/json: + schema: + $ref: '#/components/schemas/NotFoundResponse' + '408': + description: Request Timeout - Operation exceeded time limit + content: + application/json: + schema: + $ref: '#/components/schemas/RequestTimeoutResponse' + '413': + description: Payload Too Large - Request payload exceeds size limits + content: + application/json: + schema: + $ref: '#/components/schemas/PayloadTooLargeResponse' + '422': + description: Unprocessable Entity - Semantic validation failure + content: + application/json: + schema: + $ref: '#/components/schemas/UnprocessableEntityResponse' + '429': + description: Too Many Requests - Rate limit exceeded + content: + application/json: + schema: + $ref: '#/components/schemas/TooManyRequestsResponse' + '500': + description: Internal Server Error - Unexpected server error + content: + application/json: + schema: + $ref: '#/components/schemas/InternalServerResponse' + '502': + description: Bad Gateway - Provider/upstream API failure + content: + application/json: + schema: + $ref: '#/components/schemas/BadGatewayResponse' + '503': + description: Service Unavailable - Service temporarily unavailable + content: + application/json: + schema: + $ref: '#/components/schemas/ServiceUnavailableResponse' + '524': + description: Infrastructure Timeout - Request timed out at our edge network + content: + application/json: + schema: + $ref: '#/components/schemas/EdgeNetworkTimeoutResponse' + '529': + description: Provider Overloaded - Provider is temporarily overloaded + content: + application/json: + schema: + $ref: '#/components/schemas/ProviderOverloadedResponse' /credits: get: x-speakeasy-name-override: getCredits @@ -10558,6 +11055,7 @@ paths: - Inceptron - InferenceNet - Infermatic + - Io Net - Inflection - Liquid - Mara @@ -13710,6 +14208,19 @@ paths: nullable: true format: date-time description: Optional expiration time for the API key to be created + key_label: + type: string + maxLength: 100 + description: Optional custom label for the API key. Defaults to the app name if not provided. + example: My Custom Key + usage_limit_type: + type: string + enum: + - daily + - weekly + - monthly + description: Optional credit limit reset interval. When set, the credit limit resets on this interval. + example: monthly required: - callback_url example: @@ -13771,70 +14282,6 @@ paths: schema: $ref: '#/components/schemas/InternalServerResponse' operationId: createAuthKeysCode - /chat/completions: - post: - summary: Create a chat completion - operationId: sendChatCompletionRequest - x-speakeasy-group: chat - x-speakeasy-name-override: send - x-speakeasy-stream-request-field: stream - description: >- - Sends a request for a model response for the given chat conversation. Supports both streaming and non-streaming - modes. - tags: - - Chat - requestBody: - required: true - description: Chat completion request parameters - content: - application/json: - schema: - $ref: '#/components/schemas/ChatGenerationParams' - responses: - '200': - description: Successful chat completion response - content: - application/json: - schema: - $ref: '#/components/schemas/ChatResponse' - description: Chat completion response - text/event-stream: - x-speakeasy-sse-sentinel: '[DONE]' - schema: - $ref: '#/components/schemas/ChatStreamingResponseChunk' - '400': - description: Bad request - invalid parameters - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - '401': - description: Unauthorized - invalid API key - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - '429': - description: Too many requests - rate limit exceeded - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - '500': - description: Internal server error - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' -servers: - - url: https://openrouter.ai/api/v1 - description: Production server - x-speakeasy-server-id: production -security: - - apiKey: [] -externalDocs: - description: OpenRouter Documentation - url: https://openrouter.ai/docs tags: - name: API Keys description: API key management endpoints diff --git a/.speakeasy/out.openapi.yaml b/.speakeasy/out.openapi.yaml index a8742654..9092a593 100644 --- a/.speakeasy/out.openapi.yaml +++ b/.speakeasy/out.openapi.yaml @@ -10,6 +10,15 @@ info: license: name: MIT url: https://opensource.org/licenses/MIT +servers: + - url: https://openrouter.ai/api/v1 + description: Production server + x-speakeasy-server-id: production +security: + - apiKey: [] +externalDocs: + description: OpenRouter Documentation + url: https://openrouter.ai/docs components: schemas: OpenAIResponsesResponseStatus: @@ -1128,10 +1137,12 @@ components: x-speakeasy-unknown-values: allow ReasoningSummaryVerbosity: type: string + nullable: true enum: - auto - concise - detailed + example: auto x-speakeasy-unknown-values: allow OpenAIResponsesReasoningConfig: type: object @@ -1170,18 +1181,6 @@ components: description: Plain text response format example: type: text - ResponsesFormatJSONObject: - type: object - properties: - type: - type: string - enum: - - json_object - required: - - type - description: JSON object response format - example: - type: json_object ResponsesFormatTextJSONSchemaConfig: type: object properties: @@ -1221,7 +1220,7 @@ components: ResponseFormatTextConfig: anyOf: - $ref: '#/components/schemas/ResponsesFormatText' - - $ref: '#/components/schemas/ResponsesFormatJSONObject' + - $ref: '#/components/schemas/ResponseFormatJSONObject' - $ref: '#/components/schemas/ResponsesFormatTextJSONSchemaConfig' description: Text response format configuration example: @@ -3593,6 +3592,7 @@ components: - Inceptron - InferenceNet - Infermatic + - Io Net - Inflection - Liquid - Mara @@ -3652,22 +3652,36 @@ components: - price - throughput - latency + description: The provider sorting strategy (price, throughput, latency) + example: price x-speakeasy-unknown-values: allow ProviderSortConfig: type: object properties: by: - anyOf: - - $ref: '#/components/schemas/ProviderSort' - - type: 'null' + type: string + nullable: true + enum: + - price + - throughput + - latency + description: The provider sorting strategy (price, throughput, latency) + example: price + x-speakeasy-unknown-values: allow partition: - anyOf: - - type: string - enum: - - model - - none - x-speakeasy-unknown-values: allow - - type: 'null' + type: string + nullable: true + enum: + - model + - none + description: >- + Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + example: model + x-speakeasy-unknown-values: allow + description: The provider sorting strategy (price, throughput, latency) + example: + by: price + partition: model BigNumberUnion: type: string description: Price per million prompt tokens @@ -4097,6 +4111,23 @@ components: maxLength: 128 description: >- A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. + trace: + type: object + properties: + trace_id: + type: string + trace_name: + type: string + span_name: + type: string + generation_name: + type: string + parent_span_id: + type: string + additionalProperties: + nullable: true + description: >- + Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. description: Request schema for Responses endpoint example: model: anthropic/claude-4.5-sonnet-20250929 @@ -7209,6 +7240,23 @@ components: maxLength: 128 description: >- A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. + trace: + type: object + properties: + trace_id: + type: string + trace_name: + type: string + span_name: + type: string + generation_name: + type: string + parent_span_id: + type: string + additionalProperties: + nullable: true + description: >- + Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. models: type: array items: @@ -7332,2149 +7380,2480 @@ components: error: code: 403 message: Only management keys can perform this operation - CreateChargeRequest: + ChatCompletionFinishReason: + type: string + enum: + - tool_calls + - stop + - length + - content_filter + - error + example: stop + x-speakeasy-unknown-values: allow + ChatMessageContentItemCacheControl: type: object properties: - amount: - type: number - sender: + type: type: string - chain_id: - type: integer enum: - - 1 - - 137 - - 8453 + - ephemeral + ttl: + type: string + enum: + - 5m + - 1h x-speakeasy-unknown-values: allow required: - - amount - - sender - - chain_id - description: Create a Coinbase charge for crypto payment + - type + description: Cache control for the content part example: - amount: 100 - sender: '0x1234567890123456789012345678901234567890' - chain_id: 1 - ProviderPreferences: + type: ephemeral + ttl: 5m + ChatMessageContentItemText: type: object properties: - allow_fallbacks: - type: boolean - nullable: true - description: > - Whether to allow backup providers to serve requests - - - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the next best provider. - - - false: use only the primary/custom provider, and return the upstream error if it's unavailable. - - require_parameters: - type: boolean - nullable: true - description: >- - Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. - data_collection: - $ref: '#/components/schemas/DataCollection' - zdr: - type: boolean - nullable: true - description: >- - Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do not retain prompts will be used. - example: true - enforce_distillable_text: - type: boolean - nullable: true - description: >- - Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used. - example: true - order: - type: array - nullable: true - items: - anyOf: - - $ref: '#/components/schemas/ProviderName' - - type: string - description: >- - An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message. - only: - type: array - nullable: true - items: - anyOf: - - $ref: '#/components/schemas/ProviderName' - - type: string - description: >- - List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. - ignore: - type: array - nullable: true - items: - anyOf: - - $ref: '#/components/schemas/ProviderName' - - type: string - description: >- - List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. - quantizations: - type: array - nullable: true - items: - $ref: '#/components/schemas/Quantization' - description: A list of quantization levels to filter the provider by. - sort: - allOf: - - $ref: '#/components/schemas/ProviderSort' - - anyOf: - - $ref: '#/components/schemas/ProviderSort' - - $ref: '#/components/schemas/ProviderSortConfig' - - nullable: true - description: >- - The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing is performed. - max_price: + type: + type: string + enum: + - text + text: + type: string + cache_control: + $ref: '#/components/schemas/ChatMessageContentItemCacheControl' + required: + - type + - text + description: Text content part + example: + type: text + text: Hello, world! + ChatMessageContentItemImage: + type: object + properties: + type: + type: string + enum: + - image_url + image_url: type: object properties: - prompt: - $ref: '#/components/schemas/BigNumberUnion' - completion: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per million completion tokens - image: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per image - audio: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per audio unit - request: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: Price per request - description: >- - The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. - preferred_min_throughput: - $ref: '#/components/schemas/PreferredMinThroughput' - preferred_max_latency: - $ref: '#/components/schemas/PreferredMaxLatency' - description: Provider routing preferences for the request. - PublicPricing: + url: + type: string + description: 'URL of the image (data: URLs supported)' + detail: + type: string + enum: + - auto + - low + - high + description: Image detail level for vision models + x-speakeasy-unknown-values: allow + required: + - url + required: + - type + - image_url + description: Image content part for vision models + example: + type: image_url + image_url: + url: https://example.com/image.jpg + detail: auto + ChatMessageContentItemAudio: type: object properties: - prompt: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - completion: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - request: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_token: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_audio_cache: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - web_search: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - internal_reasoning: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_read: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_write: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - discount: - type: number + type: + type: string + enum: + - input_audio + input_audio: + type: object + properties: + data: + type: string + description: Base64 encoded audio data + format: + type: string + description: >- + Audio format (e.g., wav, mp3, flac, m4a, ogg, aiff, aac, pcm16, pcm24). Supported formats vary by provider. + required: + - data + - format required: - - prompt - - completion - additionalProperties: false - description: Pricing information for the model + - type + - input_audio + description: Audio input content part. Supported audio formats vary by provider. example: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - ModelGroup: - type: string - enum: - - Router - - Media - - Other - - GPT - - Claude - - Gemini - - Grok - - Cohere - - Nova - - Qwen - - Yi - - DeepSeek - - Mistral - - Llama2 - - Llama3 - - Llama4 - - PaLM - - RWKV - - Qwen3 - example: GPT - description: Tokenizer type used by the model - x-speakeasy-unknown-values: allow - InputModality: - type: string - enum: - - text - - image - - file - - audio - - video - example: text - x-speakeasy-unknown-values: allow - OutputModality: - type: string - enum: - - text - - image - - embeddings - - audio - example: text - x-speakeasy-unknown-values: allow - ModelArchitecture: + type: input_audio + input_audio: + data: SGVsbG8gV29ybGQ= + format: wav + VideoInput: type: object properties: - tokenizer: - $ref: '#/components/schemas/ModelGroup' - instruct_type: + url: type: string - nullable: true - enum: - - none - - airoboros - - alpaca - - alpaca-modif - - chatml - - claude - - code-llama - - gemma - - llama2 - - llama3 - - mistral - - nemotron - - neural - - openchat - - phi3 - - rwkv - - vicuna - - zephyr - - deepseek-r1 - - deepseek-v3.1 - - qwq - - qwen3 - example: chatml - description: Instruction format type - x-speakeasy-unknown-values: allow - modality: + description: 'URL of the video (data: URLs supported)' + required: + - url + description: Video input object + ChatMessageContentItemVideoLegacy: + type: object + properties: + type: type: string - nullable: true - description: Primary modality of the model - example: text->text - input_modalities: - type: array - items: - $ref: '#/components/schemas/InputModality' - description: Supported input modalities - output_modalities: - type: array - items: - $ref: '#/components/schemas/OutputModality' - description: Supported output modalities + enum: + - input_video + video_url: + $ref: '#/components/schemas/VideoInput' required: - - modality - - input_modalities - - output_modalities - description: Model architecture information + - type + - video_url + description: Video input content part (legacy format - deprecated) + deprecated: true example: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - TopProviderInfo: + type: input_video + video_url: + url: https://example.com/video.mp4 + ChatMessageContentItemVideo: type: object properties: - context_length: - type: number - nullable: true - description: Context length from the top provider - example: 8192 - max_completion_tokens: - type: number - nullable: true - description: Maximum completion tokens from the top provider - example: 4096 - is_moderated: - type: boolean - description: Whether the top provider moderates content - example: true + type: + type: string + enum: + - video_url + video_url: + $ref: '#/components/schemas/VideoInput' required: - - is_moderated - description: Information about the top provider for this model + - type + - video_url + description: Video input content part example: - context_length: 8192 - max_completion_tokens: 4096 - is_moderated: true - PerRequestLimits: + type: video_url + video_url: + url: https://example.com/video.mp4 + ChatMessageContentItem: + oneOf: + - $ref: '#/components/schemas/ChatMessageContentItemText' + - $ref: '#/components/schemas/ChatMessageContentItemImage' + - $ref: '#/components/schemas/ChatMessageContentItemAudio' + - oneOf: + - $ref: '#/components/schemas/ChatMessageContentItemVideoLegacy' + - $ref: '#/components/schemas/ChatMessageContentItemVideo' + discriminator: + propertyName: type + mapping: + input_video: '#/components/schemas/ChatMessageContentItemVideoLegacy' + video_url: '#/components/schemas/ChatMessageContentItemVideo' + discriminator: + propertyName: type + description: Content part for chat completion messages + example: + type: text + text: Hello, world! + ChatMessageToolCall: type: object - nullable: true properties: - prompt_tokens: - type: number - example: 1000 - description: Maximum prompt tokens per request - completion_tokens: - type: number - example: 1000 - description: Maximum completion tokens per request + id: + type: string + description: Tool call identifier + type: + type: string + enum: + - function + function: + type: object + properties: + name: + type: string + description: Function name to call + arguments: + type: string + description: Function arguments as JSON string + required: + - name + - arguments required: - - prompt_tokens - - completion_tokens - description: Per-request token limits + - id + - type + - function + description: Tool call made by the assistant example: - prompt_tokens: 1000 - completion_tokens: 1000 - Parameter: - type: string - enum: - - temperature - - top_p - - top_k - - min_p - - top_a - - frequency_penalty - - presence_penalty - - repetition_penalty - - max_tokens - - logit_bias - - logprobs - - top_logprobs - - seed - - response_format - - structured_outputs - - stop - - tools - - tool_choice - - parallel_tool_calls - - include_reasoning - - reasoning - - reasoning_effort - - web_search_options - - verbosity - example: temperature - x-speakeasy-unknown-values: allow - DefaultParameters: + id: call_abc123 + type: function + function: + name: get_current_weather + arguments: '{"location": "Boston, MA"}' + ReasoningDetailSummary: type: object - nullable: true properties: - temperature: - type: number + type: + type: string + enum: + - reasoning.summary + summary: + type: string + id: + type: string nullable: true - minimum: 0 - maximum: 2 - top_p: - type: number + format: + type: string nullable: true - minimum: 0 - maximum: 1 - frequency_penalty: + enum: + - unknown + - openai-responses-v1 + - azure-openai-responses-v1 + - xai-responses-v1 + - anthropic-claude-v1 + - google-gemini-v1 + x-speakeasy-unknown-values: allow + index: type: number - nullable: true - minimum: -2 - maximum: 2 - additionalProperties: false - description: Default parameters for this model + required: + - type + - summary + description: Reasoning detail summary schema example: - temperature: 0.7 - top_p: 0.9 - frequency_penalty: 0 - Model: + type: reasoning.summary + summary: The model analyzed the problem by first identifying key constraints, then evaluating possible solutions... + ReasoningDetailEncrypted: type: object properties: - id: + type: type: string - description: Unique identifier for the model - example: openai/gpt-4 - canonical_slug: + enum: + - reasoning.encrypted + data: type: string - description: Canonical slug for the model - example: openai/gpt-4 - hugging_face_id: + id: type: string nullable: true - description: Hugging Face model identifier, if applicable - example: microsoft/DialoGPT-medium - name: + format: type: string - description: Display name of the model - example: GPT-4 - created: + nullable: true + enum: + - unknown + - openai-responses-v1 + - azure-openai-responses-v1 + - xai-responses-v1 + - anthropic-claude-v1 + - google-gemini-v1 + x-speakeasy-unknown-values: allow + index: type: number - description: Unix timestamp of when the model was created - example: 1692901234 - description: + required: + - type + - data + description: Reasoning detail encrypted schema + example: + type: reasoning.encrypted + data: encrypted data + ReasoningDetailText: + type: object + properties: + type: + type: string + enum: + - reasoning.text + text: type: string - description: Description of the model - example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - pricing: - $ref: '#/components/schemas/PublicPricing' - context_length: - type: number nullable: true - description: Maximum context length in tokens - example: 8192 - architecture: - $ref: '#/components/schemas/ModelArchitecture' - top_provider: - $ref: '#/components/schemas/TopProviderInfo' - per_request_limits: - $ref: '#/components/schemas/PerRequestLimits' - supported_parameters: - type: array - items: - $ref: '#/components/schemas/Parameter' - description: List of supported parameters for this model - default_parameters: - $ref: '#/components/schemas/DefaultParameters' - expiration_date: + signature: type: string nullable: true - description: The date after which the model may be removed. ISO 8601 date string (YYYY-MM-DD) or null if no expiration. - example: '2025-06-01' + id: + type: string + nullable: true + format: + type: string + nullable: true + enum: + - unknown + - openai-responses-v1 + - azure-openai-responses-v1 + - xai-responses-v1 + - anthropic-claude-v1 + - google-gemini-v1 + x-speakeasy-unknown-values: allow + index: + type: number required: - - id - - canonical_slug - - name - - created - - pricing - - context_length - - architecture - - top_provider - - per_request_limits - - supported_parameters - - default_parameters - description: Information about an AI model available on OpenRouter + - type + description: Reasoning detail text schema example: - id: openai/gpt-4 - canonical_slug: openai/gpt-4 - name: GPT-4 - created: 1692901234 - description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - context_length: 8192 - architecture: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - top_provider: - context_length: 8192 - max_completion_tokens: 4096 - is_moderated: true - per_request_limits: null - supported_parameters: - - temperature - - top_p - - max_tokens - default_parameters: null - expiration_date: null - ModelsListResponseData: + type: reasoning.text + text: The model analyzed the problem by first identifying key constraints, then evaluating possible solutions... + signature: signature + ReasoningDetailUnion: + oneOf: + - $ref: '#/components/schemas/ReasoningDetailSummary' + - $ref: '#/components/schemas/ReasoningDetailEncrypted' + - $ref: '#/components/schemas/ReasoningDetailText' + discriminator: + propertyName: type + mapping: + reasoning.summary: '#/components/schemas/ReasoningDetailSummary' + reasoning.encrypted: '#/components/schemas/ReasoningDetailEncrypted' + reasoning.text: '#/components/schemas/ReasoningDetailText' + description: Reasoning detail union schema + example: + type: reasoning.summary + summary: The model analyzed the problem by first identifying key constraints, then evaluating possible solutions... + AssistantMessageReasoningDetails: type: array items: - $ref: '#/components/schemas/Model' - description: List of available models - ModelsListResponse: + $ref: '#/components/schemas/ReasoningDetailUnion' + description: Reasoning details for extended thinking models + AssistantMessageImages: + type: array + items: + type: object + properties: + image_url: + type: object + properties: + url: + type: string + description: URL or base64-encoded data of the generated image + required: + - url + required: + - image_url + description: Generated images from image generation models + example: + - image_url: + url: data:image/png;base64,iVBORw0KGgo... + AssistantMessage: type: object properties: - data: - $ref: '#/components/schemas/ModelsListResponseData' + role: + type: string + enum: + - assistant + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItem' + - nullable: true + description: Assistant message content + name: + type: string + description: Optional name for the assistant + tool_calls: + type: array + items: + $ref: '#/components/schemas/ChatMessageToolCall' + description: Tool calls made by the assistant + refusal: + type: string + nullable: true + description: Refusal message if content was refused + reasoning: + type: string + nullable: true + description: Reasoning output + reasoning_details: + $ref: '#/components/schemas/AssistantMessageReasoningDetails' + images: + $ref: '#/components/schemas/AssistantMessageImages' required: - - data - description: List of available models + - role + description: Assistant message for requests and responses example: - data: - - id: openai/gpt-4 - canonical_slug: openai/gpt-4 - name: GPT-4 - created: 1692901234 - description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - context_length: 8192 - architecture: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - top_provider: - context_length: 8192 - max_completion_tokens: 4096 - is_moderated: true - per_request_limits: null - supported_parameters: - - temperature - - top_p - - max_tokens - - frequency_penalty - - presence_penalty - default_parameters: null - expiration_date: null - ModelsCountResponse: + role: assistant + content: The capital of France is Paris. + ChatMessageTokenLogprob: type: object properties: - data: - type: object - properties: - count: - type: number - description: Total number of available models - example: 150 - required: - - count - description: Model count data - example: - count: 150 + token: + type: string + description: The token + logprob: + type: number + description: Log probability of the token + bytes: + type: array + nullable: true + items: + type: number + description: UTF-8 bytes of the token + top_logprobs: + type: array + items: + type: object + properties: + token: + type: string + logprob: + type: number + bytes: + type: array + nullable: true + items: + type: number + required: + - token + - logprob + - bytes + description: Top alternative tokens with probabilities required: - - data - description: Model count data + - token + - logprob + - bytes + - top_logprobs + description: Token log probability information example: - data: - count: 150 - InstructType: - type: string - nullable: true - enum: - - none - - airoboros - - alpaca - - alpaca-modif - - chatml - - claude - - code-llama - - gemma - - llama2 - - llama3 - - mistral - - nemotron - - neural - - openchat - - phi3 - - rwkv - - vicuna - - zephyr - - deepseek-r1 - - deepseek-v3.1 - - qwq - - qwen3 - description: Instruction format type - x-speakeasy-unknown-values: allow - EndpointStatus: - type: integer - enum: - - 0 - - -1 - - -2 - - -3 - - -5 - - -10 - example: 0 - x-speakeasy-unknown-values: allow - PercentileStats: + token: ' Hello' + logprob: -0.612345 + bytes: null + top_logprobs: + - token: ' Hello' + logprob: -0.612345 + bytes: null + ChatMessageTokenLogprobs: type: object nullable: true properties: - p50: + content: + type: array + nullable: true + items: + $ref: '#/components/schemas/ChatMessageTokenLogprob' + description: Log probabilities for content tokens + refusal: + type: array + nullable: true + items: + $ref: '#/components/schemas/ChatMessageTokenLogprob' + description: Log probabilities for refusal tokens + required: + - content + - refusal + description: Log probabilities for the completion + example: + content: + - token: ' Hello' + logprob: -0.612345 + bytes: null + top_logprobs: [] + refusal: null + ChatResponseChoice: + type: object + properties: + finish_reason: + anyOf: + - $ref: '#/components/schemas/ChatCompletionFinishReason' + - nullable: true + - nullable: true + index: type: number - description: Median (50th percentile) - example: 25.5 - p75: + description: Choice index + example: 0 + message: + $ref: '#/components/schemas/AssistantMessage' + logprobs: + $ref: '#/components/schemas/ChatMessageTokenLogprobs' + required: + - finish_reason + - index + - message + description: Chat completion choice + example: + finish_reason: stop + index: 0 + message: + role: assistant + content: The capital of France is Paris. + logprobs: null + ChatGenerationTokenUsage: + type: object + properties: + completion_tokens: type: number - description: 75th percentile - example: 35.2 - p90: + description: Number of tokens in the completion + prompt_tokens: type: number - description: 90th percentile - example: 48.7 - p99: + description: Number of tokens in the prompt + total_tokens: type: number - description: 99th percentile - example: 85.3 + description: Total number of tokens + completion_tokens_details: + type: object + nullable: true + properties: + reasoning_tokens: + type: number + nullable: true + description: Tokens used for reasoning + audio_tokens: + type: number + nullable: true + description: Tokens used for audio output + accepted_prediction_tokens: + type: number + nullable: true + description: Accepted prediction tokens + rejected_prediction_tokens: + type: number + nullable: true + description: Rejected prediction tokens + description: Detailed completion token usage + prompt_tokens_details: + type: object + nullable: true + properties: + cached_tokens: + type: number + description: Cached prompt tokens + cache_write_tokens: + type: number + description: Tokens written to cache. Only returned for models with explicit caching and cache write pricing. + audio_tokens: + type: number + description: Audio input tokens + video_tokens: + type: number + description: Video input tokens + description: Detailed prompt token usage required: - - p50 - - p75 - - p90 - - p99 - description: >- - Latency percentiles in milliseconds over the last 30 minutes. Latency measures time to first token. Only visible when authenticated with an API key or cookie; returns null for unauthenticated requests. - PublicEndpoint: + - completion_tokens + - prompt_tokens + - total_tokens + description: Token usage statistics + example: + completion_tokens: 15 + prompt_tokens: 10 + total_tokens: 25 + completion_tokens_details: + reasoning_tokens: 5 + prompt_tokens_details: + cached_tokens: 2 + ChatResponse: type: object properties: - name: + id: type: string - model_id: + description: Unique completion identifier + example: chatcmpl-123 + choices: + type: array + items: + $ref: '#/components/schemas/ChatResponseChoice' + description: List of completion choices + created: + type: number + description: Unix timestamp of creation + example: 1677652288 + model: type: string - description: The unique identifier for the model (permaslug) + description: Model used for completion example: openai/gpt-4 - model_name: + object: type: string - context_length: + enum: + - chat.completion + system_fingerprint: + type: string + nullable: true + description: System fingerprint + example: fp_44709d6fcb + usage: + $ref: '#/components/schemas/ChatGenerationTokenUsage' + required: + - id + - choices + - created + - model + - object + description: Chat completion response + example: + id: chatcmpl-123 + object: chat.completion + created: 1677652288 + model: openai/gpt-4 + choices: + - index: 0 + message: + role: assistant + content: The capital of France is Paris. + finish_reason: stop + usage: + prompt_tokens: 10 + completion_tokens: 15 + total_tokens: 25 + ChatStreamingMessageToolCall: + type: object + properties: + index: type: number - pricing: + description: Tool call index in the array + example: 0 + id: + type: string + description: Tool call identifier + example: call_abc123 + type: + type: string + enum: + - function + description: Tool call type + example: function + function: type: object properties: - prompt: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - completion: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - request: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_token: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - image_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - audio_output: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_audio_cache: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - web_search: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - internal_reasoning: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_read: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - input_cache_write: - allOf: - - $ref: '#/components/schemas/BigNumberUnion' - - description: A number or string value representing a large number - discount: - type: number - required: - - prompt - - completion - additionalProperties: false - provider_name: - $ref: '#/components/schemas/ProviderName' - tag: + name: + type: string + description: Function name + example: get_weather + arguments: + type: string + description: Function arguments as JSON string + example: '{"location":' + description: Function call details + required: + - index + description: Tool call delta for streaming responses + example: + index: 0 + id: call_abc123 + type: function + function: + name: get_weather + arguments: '{"location":' + ChatStreamingMessageChunk: + type: object + properties: + role: + type: string + enum: + - assistant + description: The role of the message author + example: assistant + content: type: string - quantization: - allOf: - - $ref: '#/components/schemas/Quantization' - - nullable: true - max_completion_tokens: - type: number nullable: true - max_prompt_tokens: - type: number + description: Message content delta + example: Hello + reasoning: + type: string nullable: true - supported_parameters: + description: Reasoning content delta + example: I need to + refusal: + type: string + nullable: true + description: Refusal message delta + example: null + tool_calls: type: array items: - $ref: '#/components/schemas/Parameter' - status: - $ref: '#/components/schemas/EndpointStatus' - uptime_last_30m: + $ref: '#/components/schemas/ChatStreamingMessageToolCall' + description: Tool calls delta + reasoning_details: + $ref: '#/components/schemas/AssistantMessageReasoningDetails' + description: Delta changes in streaming response + example: + role: assistant + content: Hello + ChatStreamingChoice: + type: object + properties: + delta: + $ref: '#/components/schemas/ChatStreamingMessageChunk' + finish_reason: + anyOf: + - $ref: '#/components/schemas/ChatCompletionFinishReason' + - nullable: true + - nullable: true + index: type: number - nullable: true - supports_implicit_caching: - type: boolean - latency_last_30m: - $ref: '#/components/schemas/PercentileStats' - throughput_last_30m: - allOf: - - $ref: '#/components/schemas/PercentileStats' - - description: >- - Throughput percentiles in tokens per second over the last 30 minutes. Throughput measures output token generation speed. Only visible when authenticated with an API key or cookie; returns null for unauthenticated requests. + description: Choice index + example: 0 + logprobs: + $ref: '#/components/schemas/ChatMessageTokenLogprobs' required: - - name - - model_id - - model_name - - context_length - - pricing - - provider_name - - tag - - quantization - - max_completion_tokens - - max_prompt_tokens - - supported_parameters - - uptime_last_30m - - supports_implicit_caching - - latency_last_30m - - throughput_last_30m - description: Information about a specific model endpoint + - delta + - finish_reason + - index + description: Streaming completion choice chunk example: - name: 'OpenAI: GPT-4' - model_id: openai/gpt-4 - model_name: GPT-4 - context_length: 8192 - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - provider_name: OpenAI - tag: openai - quantization: fp16 - max_completion_tokens: 4096 - max_prompt_tokens: 8192 - supported_parameters: - - temperature - - top_p - - max_tokens - status: 0 - uptime_last_30m: 99.5 - supports_implicit_caching: true - latency_last_30m: - p50: 0.25 - p75: 0.35 - p90: 0.48 - p99: 0.85 - throughput_last_30m: - p50: 45.2 - p75: 38.5 - p90: 28.3 - p99: 15.1 - ListEndpointsResponse: + index: 0 + delta: + role: assistant + content: Hello + finish_reason: null + ChatStreamingResponseChunk: type: object properties: id: type: string - description: Unique identifier for the model - example: openai/gpt-4 - name: - type: string - description: Display name of the model - example: GPT-4 + description: Unique chunk identifier + example: chatcmpl-123 + choices: + type: array + items: + $ref: '#/components/schemas/ChatStreamingChoice' + description: List of streaming chunk choices created: type: number - description: Unix timestamp of when the model was created - example: 1692901234 - description: + description: Unix timestamp of creation + example: 1677652288 + model: type: string - description: Description of the model - example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - architecture: - allOf: - - $ref: '#/components/schemas/ModelArchitecture' - - properties: - tokenizer: - allOf: - - $ref: '#/components/schemas/ModelGroup' - - nullable: true - instruct_type: - $ref: '#/components/schemas/InstructType' - modality: - type: string - nullable: true - description: Primary modality of the model - example: text - input_modalities: - type: array - items: - $ref: '#/components/schemas/InputModality' - description: Supported input modalities - output_modalities: - type: array - items: - $ref: '#/components/schemas/OutputModality' - description: Supported output modalities - required: - - tokenizer - - instruct_type - - modality - - input_modalities - - output_modalities - example: - tokenizer: GPT - instruct_type: chatml - modality: text - endpoints: - type: array - items: - $ref: '#/components/schemas/PublicEndpoint' - description: List of available endpoints for this model + description: Model used for completion + example: openai/gpt-4 + object: + type: string + enum: + - chat.completion.chunk + system_fingerprint: + type: string + nullable: true + description: System fingerprint + example: fp_44709d6fcb + error: + type: object + properties: + message: + type: string + description: Error message + example: Rate limit exceeded + code: + type: number + description: Error code + example: 429 + required: + - message + - code + description: Error information + example: + message: Rate limit exceeded + code: 429 + usage: + $ref: '#/components/schemas/ChatGenerationTokenUsage' required: - id - - name + - choices - created - - description - - architecture - - endpoints - description: List of available endpoints for a model + - model + - object + description: Streaming chat completion chunk + x-speakeasy-entity: ChatStreamChunk example: - id: openai/gpt-4 - name: GPT-4 - created: 1692901234 - description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. - architecture: - tokenizer: GPT - instruct_type: chatml - modality: text->text - input_modalities: - - text - output_modalities: - - text - endpoints: - - name: 'OpenAI: GPT-4' - model_name: GPT-4 - context_length: 8192 - pricing: - prompt: '0.00003' - completion: '0.00006' - request: '0' - image: '0' - provider_name: OpenAI - tag: openai - quantization: fp16 - max_completion_tokens: 4096 - max_prompt_tokens: 8192 - supported_parameters: - - temperature - - top_p - - max_tokens - - frequency_penalty - - presence_penalty - status: default - uptime_last_30m: 99.5 - supports_implicit_caching: true - latency_last_30m: - p50: 0.25 - p75: 0.35 - p90: 0.48 - p99: 0.85 - throughput_last_30m: - p50: 45.2 - p75: 38.5 - p90: 28.3 - p99: 15.1 - __schema0: - anyOf: - - type: object - properties: - allow_fallbacks: - description: > - Whether to allow backup providers to serve requests - - - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the next best provider. - - - false: use only the primary/custom provider, and return the upstream error if it's unavailable. - - $ref: '#/components/schemas/__schema1' - require_parameters: - description: >- - Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. - $ref: '#/components/schemas/__schema1' - data_collection: - description: >- - Data collection setting. If no available model provider meets the requirement, your request will return an error. - - - allow: (default) allow providers which store user data non-transiently and may train on it - - - - deny: use only providers which do not collect user data. - $ref: '#/components/schemas/__schema3' - zdr: - anyOf: - - type: boolean - - type: 'null' - enforce_distillable_text: - anyOf: - - type: boolean - - type: 'null' - order: - description: >- - An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message. - $ref: '#/components/schemas/__schema4' - only: - description: >- - List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. - $ref: '#/components/schemas/__schema4' - ignore: - description: >- - List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. - $ref: '#/components/schemas/__schema4' - quantizations: - description: A list of quantization levels to filter the provider by. - $ref: '#/components/schemas/__schema8' - sort: - description: >- - The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing is performed. - $ref: '#/components/schemas/__schema9' - max_price: - description: >- - The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. - $ref: '#/components/schemas/__schema10' - preferred_min_throughput: - description: >- - Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. - $ref: '#/components/schemas/__schema15' - preferred_max_latency: - description: >- - Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. - $ref: '#/components/schemas/__schema15' - additionalProperties: false - - type: 'null' - __schema1: - anyOf: - - type: boolean - - type: 'null' - __schema3: - anyOf: - - type: string - enum: - - deny - - allow - x-speakeasy-unknown-values: allow - - type: 'null' - __schema4: - anyOf: - - $ref: '#/components/schemas/__schema5' - - type: 'null' - __schema5: - type: array - items: - anyOf: - - type: string - enum: - - AI21 - - AionLabs - - Alibaba - - Ambient - - Amazon Bedrock - - Amazon Nova - - Anthropic - - Arcee AI - - AtlasCloud - - Avian - - Azure - - BaseTen - - BytePlus - - Black Forest Labs - - Cerebras - - Chutes - - Cirrascale - - Clarifai - - Cloudflare - - Cohere - - Crusoe - - DeepInfra - - DeepSeek - - Featherless - - Fireworks - - Friendli - - GMICloud - - Google - - Google AI Studio - - Groq - - Hyperbolic - - Inception - - Inceptron - - InferenceNet - - Infermatic - - Inflection - - Liquid - - Mara - - Mancer 2 - - Minimax - - ModelRun - - Mistral - - Modular - - Moonshot AI - - Morph - - NCompass - - Nebius - - NextBit - - Novita - - Nvidia - - OpenAI - - OpenInference - - Parasail - - Perplexity - - Phala - - Relace - - SambaNova - - Seed - - SiliconFlow - - Sourceful - - StepFun - - Stealth - - StreamLake - - Switchpoint - - Together - - Upstage - - Venice - - WandB - - Xiaomi - - xAI - - Z.AI - - FakeProvider - x-speakeasy-unknown-values: allow - - type: string - __schema8: - anyOf: - - type: array - items: - type: string - enum: - - int4 - - int8 - - fp4 - - fp6 - - fp8 - - fp16 - - bf16 - - fp32 - - unknown - x-speakeasy-unknown-values: allow - - type: 'null' - __schema9: - anyOf: - - $ref: '#/components/schemas/ProviderSortUnion' - - type: 'null' - __schema10: + id: chatcmpl-123 + object: chat.completion.chunk + created: 1677652288 + model: openai/gpt-4 + choices: + - index: 0 + delta: + role: assistant + content: Hello + finish_reason: null + SystemMessage: type: object properties: - prompt: - anyOf: - - $ref: '#/components/schemas/__schema11' - - $ref: '#/components/schemas/ModelName' - - $ref: '#/components/schemas/__schema13' - completion: - anyOf: - - $ref: '#/components/schemas/__schema11' - - $ref: '#/components/schemas/ModelName' - - $ref: '#/components/schemas/__schema13' - image: - $ref: '#/components/schemas/__schema14' - audio: - $ref: '#/components/schemas/__schema14' - request: - $ref: '#/components/schemas/__schema14' - __schema11: - type: number - __schema13: {} - __schema14: - anyOf: - - $ref: '#/components/schemas/__schema11' - - $ref: '#/components/schemas/ModelName' - - $ref: '#/components/schemas/__schema13' - __schema15: - anyOf: - - anyOf: - - type: number - - type: object - properties: - p50: - anyOf: - - type: number - - type: 'null' - p75: - anyOf: - - type: number - - type: 'null' - p90: - anyOf: - - type: number - - type: 'null' - p99: - anyOf: - - type: number - - type: 'null' - - type: 'null' - __schema17: - type: array - items: - oneOf: - - type: object - properties: - id: - type: string - const: auto-router - enabled: - type: boolean - allowed_models: - type: array - items: - type: string - required: - - id - - type: object - properties: - id: - type: string - const: moderation - required: - - id - - type: object - properties: - id: - type: string - const: web - enabled: - type: boolean - max_results: - type: number - search_prompt: - type: string - engine: - type: string - enum: - - native - - exa - x-speakeasy-unknown-values: allow - required: - - id - - type: object - properties: - id: - type: string - const: file-parser - enabled: - type: boolean - pdf: - type: object - properties: - engine: - type: string - enum: - - mistral-ocr - - pdf-text - - native - x-speakeasy-unknown-values: allow - required: - - id - - type: object - properties: - id: - type: string - const: response-healing - enabled: - type: boolean - required: - - id - type: object - __schema18: - type: string - maxLength: 128 - __schema19: - oneOf: - - type: object - properties: - type: - type: string - const: reasoning.summary - summary: - type: string - id: - $ref: '#/components/schemas/__schema20' - format: - $ref: '#/components/schemas/__schema21' - index: - $ref: '#/components/schemas/__schema11' - required: - - type - - summary - - type: object - properties: - type: - type: string - const: reasoning.encrypted - data: - type: string - id: - $ref: '#/components/schemas/__schema20' - format: - $ref: '#/components/schemas/__schema21' - index: - $ref: '#/components/schemas/__schema11' - required: - - type - - data - - type: object - properties: - type: - type: string - const: reasoning.text - text: - anyOf: - - type: string - - type: 'null' - signature: - anyOf: - - type: string - - type: 'null' - id: - $ref: '#/components/schemas/__schema20' - format: - $ref: '#/components/schemas/__schema21' - index: - $ref: '#/components/schemas/__schema11' - required: - - type - type: object - __schema20: - anyOf: - - type: string - - type: 'null' - __schema21: - anyOf: - - type: string + role: + type: string enum: - - unknown - - openai-responses-v1 - - azure-openai-responses-v1 - - xai-responses-v1 - - anthropic-claude-v1 - - google-gemini-v1 - x-speakeasy-unknown-values: allow - - type: 'null' - __schema25: - anyOf: - - $ref: '#/components/schemas/ChatCompletionFinishReason' - - type: 'null' - ModelName: - type: string - ChatMessageContentItemText: + - system + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItemText' + description: System message content + example: You are a helpful assistant. + name: + type: string + description: Optional name for the system message + example: Assistant Config + required: + - role + - content + description: System message for setting behavior + example: + role: system + content: You are a helpful assistant. + name: Assistant Config + UserMessage: type: object properties: - type: + role: type: string - const: text - text: + enum: + - user + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItem' + description: User message content + example: What is the capital of France? + name: type: string - cache_control: - $ref: '#/components/schemas/ChatMessageContentItemCacheControl' + description: Optional name for the user + example: User required: - - type - - text - ChatMessageContentItemImage: + - role + - content + description: User message + example: + role: user + content: What is the capital of France? + DeveloperMessage: type: object properties: - type: + role: type: string - const: image_url - image_url: - type: object - properties: - url: - type: string - detail: - type: string - enum: - - auto - - low - - high - x-speakeasy-unknown-values: allow - required: - - url + enum: + - developer + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItemText' + description: Developer message content + example: This is a message from the developer. + name: + type: string + description: Optional name for the developer message + example: Developer required: - - type - - image_url - ChatMessageContentItemAudio: + - role + - content + description: Developer message + example: + role: developer + content: This is a message from the developer. + ToolResponseMessage: type: object properties: - type: + role: type: string - const: input_audio - input_audio: - type: object - properties: - data: - type: string - format: - type: string - required: - - data - - format + enum: + - tool + content: + anyOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/ChatMessageContentItem' + description: Tool response content + example: The weather in San Francisco is 72°F and sunny. + tool_call_id: + type: string + description: ID of the assistant message tool call this message responds to + example: call_abc123 required: - - type - - input_audio - ChatMessageContentItemVideo: - oneOf: - - type: object - properties: - type: - type: string - const: input_video - video_url: - type: object - properties: - url: - type: string - required: - - url - required: - - type - - video_url - - type: object - properties: - type: - type: string - const: video_url - video_url: - type: object - properties: - url: - type: string - required: - - url - required: - - type - - video_url - type: object - ChatMessageContentItem: + - role + - content + - tool_call_id + description: Tool response message + example: + role: tool + content: The weather in San Francisco is 72°F and sunny. + tool_call_id: call_abc123 + Message: oneOf: - - $ref: '#/components/schemas/ChatMessageContentItemText' - - $ref: '#/components/schemas/ChatMessageContentItemImage' - - $ref: '#/components/schemas/ChatMessageContentItemAudio' - - $ref: '#/components/schemas/ChatMessageContentItemVideo' - type: object + - $ref: '#/components/schemas/SystemMessage' + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/DeveloperMessage' + - $ref: '#/components/schemas/AssistantMessage' + - $ref: '#/components/schemas/ToolResponseMessage' discriminator: - propertyName: type - mapping: - text: '#/components/schemas/ChatMessageContentItemText' - image_url: '#/components/schemas/ChatMessageContentItemImage' - input_audio: '#/components/schemas/ChatMessageContentItemAudio' - input_video: '#/components/schemas/ChatMessageContentItemVideo' - video_url: '#/components/schemas/ChatMessageContentItemVideo' - ChatMessageToolCall: + propertyName: role + description: Chat completion message with role-based discrimination + example: + role: user + content: What is the capital of France? + ModelName: + type: string + description: Model to use for completion + example: openai/gpt-4 + ModelNames: + type: array + items: + allOf: + - $ref: '#/components/schemas/ModelName' + - description: Available OpenRouter chat completion models + description: Models to use for completion + example: + - openai/gpt-4 + - openai/gpt-4o + ResponseFormatText: type: object properties: - id: - type: string type: type: string - const: function - function: - type: object - properties: - name: - type: string - arguments: - type: string - required: - - name - - arguments + enum: + - text required: - - id - type - - function - ChatMessageTokenLogprob: + description: Default text response format + example: + type: text + ResponseFormatJSONObject: type: object properties: - token: + type: type: string - logprob: - type: number - bytes: - anyOf: - - type: array - items: - type: number - - type: 'null' - top_logprobs: - type: array - items: - type: object - properties: - token: - type: string - logprob: - type: number - bytes: - anyOf: - - type: array - items: - type: number - - type: 'null' - required: - - token - - logprob - - bytes - required: - - token - - logprob - - bytes - - top_logprobs - ChatMessageTokenLogprobs: - type: object - properties: - content: - anyOf: - - type: array - items: - $ref: '#/components/schemas/ChatMessageTokenLogprob' - - type: 'null' - refusal: - anyOf: - - type: array - items: - $ref: '#/components/schemas/ChatMessageTokenLogprob' - - type: 'null' - required: - - content - - refusal - ChatGenerationTokenUsage: - type: object - properties: - completion_tokens: - type: number - prompt_tokens: - type: number - total_tokens: - type: number - completion_tokens_details: - anyOf: - - type: object - properties: - reasoning_tokens: - anyOf: - - type: number - - type: 'null' - audio_tokens: - anyOf: - - type: number - - type: 'null' - accepted_prediction_tokens: - anyOf: - - type: number - - type: 'null' - rejected_prediction_tokens: - anyOf: - - type: number - - type: 'null' - - type: 'null' - prompt_tokens_details: - anyOf: - - type: object - properties: - cached_tokens: - type: number - cache_write_tokens: - type: number - audio_tokens: - type: number - video_tokens: - type: number - - type: 'null' + enum: + - json_object required: - - completion_tokens - - prompt_tokens - - total_tokens - ChatCompletionFinishReason: - type: string - enum: - - tool_calls - - stop - - length - - content_filter - - error - x-speakeasy-unknown-values: allow + - type + description: JSON object response format + example: + type: json_object JSONSchemaConfig: type: object properties: name: type: string maxLength: 64 + description: Schema name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) + example: math_response description: type: string + description: Schema description for the model + example: A mathematical response schema: type: object - propertyNames: - type: string - additionalProperties: {} + additionalProperties: + nullable: true + description: JSON Schema object + example: + type: object + properties: + answer: + type: number + required: + - answer strict: - anyOf: - - type: boolean - - type: 'null' + type: boolean + nullable: true + description: Enable strict schema adherence + example: false required: - name + description: JSON Schema configuration object + example: + name: math_response + description: A mathematical response + schema: + type: object + properties: + answer: + type: number + required: + - answer + strict: true ResponseFormatJSONSchema: type: object properties: type: type: string - const: json_schema + enum: + - json_schema json_schema: $ref: '#/components/schemas/JSONSchemaConfig' required: - type - json_schema + description: JSON Schema response format for structured outputs + example: + type: json_schema + json_schema: + name: math_response + schema: + type: object + properties: + answer: + type: number + required: + - answer ResponseFormatTextGrammar: type: object properties: type: type: string - const: grammar + enum: + - grammar grammar: type: string + description: Custom grammar for text generation + example: root ::= "yes" | "no" required: - type - grammar - ChatMessageContentItemCacheControl: + description: Custom grammar response format + example: + type: grammar + grammar: root ::= "yes" | "no" + ResponseFormatTextPython: type: object properties: type: - type: string - const: ephemeral - ttl: type: string enum: - - 5m - - 1h - x-speakeasy-unknown-values: allow + - python required: - type - SystemMessage: - type: object - properties: - role: - type: string - const: system - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItemText' - name: - type: string - required: - - role - - content - UserMessage: + description: Python code response format + example: + type: python + ChatStreamOptions: type: object + nullable: true properties: - role: - type: string - const: user - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItem' - name: - type: string - required: - - role - - content - DeveloperMessage: + include_usage: + type: boolean + description: 'Deprecated: This field has no effect. Full usage details are always included.' + example: true + deprecated: true + description: Streaming configuration options + example: + include_usage: true + NamedToolChoice: type: object properties: - role: + type: type: string - const: developer - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItemText' - name: - type: string - required: - - role - - content - AssistantMessage: - type: object - properties: - role: - type: string - const: assistant - content: - anyOf: - - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItem' - - type: 'null' - name: - type: string - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatMessageToolCall' - refusal: - anyOf: - - type: string - - type: 'null' - reasoning: - anyOf: - - type: string - - type: 'null' - reasoning_details: - type: array - items: - $ref: '#/components/schemas/__schema19' - images: - type: array - items: - type: object - properties: - image_url: - type: object - properties: - url: - type: string - required: - - url - required: - - image_url - required: - - role - ToolResponseMessage: - type: object - properties: - role: - type: string - const: tool - content: - anyOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/ChatMessageContentItem' - tool_call_id: - type: string - required: - - role - - content - - tool_call_id - Message: - oneOf: - - $ref: '#/components/schemas/SystemMessage' - - $ref: '#/components/schemas/UserMessage' - - $ref: '#/components/schemas/DeveloperMessage' - - $ref: '#/components/schemas/AssistantMessage' - - $ref: '#/components/schemas/ToolResponseMessage' - type: object - discriminator: - propertyName: role - mapping: - system: '#/components/schemas/SystemMessage' - user: '#/components/schemas/UserMessage' - developer: '#/components/schemas/DeveloperMessage' - assistant: '#/components/schemas/AssistantMessage' - tool: '#/components/schemas/ToolResponseMessage' - ToolDefinitionJson: - type: object - properties: - type: - type: string - const: function + enum: + - function function: type: object properties: name: type: string - maxLength: 64 - description: - type: string - parameters: - type: object - propertyNames: - type: string - additionalProperties: {} - strict: - anyOf: - - type: boolean - - type: 'null' + description: Function name to call + example: get_weather required: - name required: - type - function - NamedToolChoice: + description: Named tool choice for specific function + example: + type: function + function: + name: get_weather + ToolChoiceOption: + anyOf: + - type: string + enum: + - none + - type: string + enum: + - auto + - type: string + enum: + - required + - $ref: '#/components/schemas/NamedToolChoice' + description: Tool choice configuration + example: auto + ToolDefinitionJson: type: object properties: type: type: string - const: function + enum: + - function function: type: object properties: name: type: string + maxLength: 64 + description: Function name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) + example: get_weather + description: + type: string + description: Function description for the model + example: Get the current weather for a location + parameters: + type: object + additionalProperties: + nullable: true + description: Function parameters as JSON Schema object + example: + type: object + properties: + location: + type: string + description: City name + required: + - location + strict: + type: boolean + nullable: true + description: Enable strict schema adherence + example: false required: - name + description: Function definition for tool calling + example: + name: get_weather + description: Get the current weather for a location + parameters: + type: object + properties: + location: + type: string + description: City name + required: + - location + cache_control: + $ref: '#/components/schemas/ChatMessageContentItemCacheControl' required: - type - function - ToolChoiceOption: - anyOf: - - type: string - const: none - - type: string - const: auto - - type: string - const: required - - $ref: '#/components/schemas/NamedToolChoice' - ChatStreamOptions: + description: Tool definition for function calling + example: + type: function + function: + name: get_weather + description: Get the current weather for a location + parameters: + type: object + properties: + location: + type: string + description: City name + unit: + type: string + enum: + - celsius + - fahrenheit + x-speakeasy-unknown-values: allow + required: + - location + DebugOptions: type: object properties: - include_usage: + echo_upstream_body: type: boolean + description: >- + If true, includes the transformed upstream request body in a debug chunk at the start of the stream. Only works with streaming mode. + example: true + description: Debug options for inspecting request transformations (streaming only) + example: + echo_upstream_body: true ChatGenerationParams: type: object properties: provider: - description: When multiple model providers are available, optionally indicate your routing preference. - $ref: '#/components/schemas/__schema0' - plugins: - description: Plugins you want to enable for this request, including their settings. - $ref: '#/components/schemas/__schema17' - route: - anyOf: - - type: string - enum: - - fallback - - sort - x-speakeasy-unknown-values: allow - - type: 'null' - user: - type: string - session_id: - description: >- - A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. - $ref: '#/components/schemas/__schema18' - messages: - minItems: 1 - type: array - items: - $ref: '#/components/schemas/Message' - model: - $ref: '#/components/schemas/ModelName' - models: - type: array - items: - $ref: '#/components/schemas/ModelName' - frequency_penalty: - anyOf: - - type: number - minimum: -2 - maximum: 2 - - type: 'null' - logit_bias: - anyOf: - - type: object - propertyNames: - type: string - additionalProperties: - type: number - - type: 'null' - logprobs: - anyOf: - - type: boolean - - type: 'null' - top_logprobs: - anyOf: - - type: number - minimum: 0 - maximum: 20 - - type: 'null' - max_completion_tokens: - anyOf: - - type: number - minimum: 1 - - type: 'null' - max_tokens: - anyOf: - - type: number - minimum: 1 - - type: 'null' - metadata: - type: object - propertyNames: - type: string - additionalProperties: - type: string - presence_penalty: - anyOf: - - type: number - minimum: -2 - maximum: 2 - - type: 'null' - reasoning: type: object + nullable: true properties: - effort: - anyOf: - - type: string - enum: - - xhigh - - high - - medium - - low - - minimal - - none - x-speakeasy-unknown-values: allow - - type: 'null' - summary: - anyOf: - - $ref: '#/components/schemas/ReasoningSummaryVerbosity' - - type: 'null' - response_format: - oneOf: - - type: object - properties: - type: - type: string - const: text - required: - - type - - type: object - properties: - type: - type: string - const: json_object - required: - - type - - $ref: '#/components/schemas/ResponseFormatJSONSchema' - - $ref: '#/components/schemas/ResponseFormatTextGrammar' - - type: object - properties: - type: - type: string - const: python - required: - - type - type: object - seed: - anyOf: - - type: integer - minimum: -9007199254740991 - maximum: 9007199254740991 - - type: 'null' - stop: - anyOf: - - anyOf: - - type: string - - maxItems: 4 - type: array - items: - $ref: '#/components/schemas/ModelName' - - type: 'null' - stream: - default: false - type: boolean - stream_options: - anyOf: - - $ref: '#/components/schemas/ChatStreamOptions' - - type: 'null' - temperature: - default: 1 - anyOf: - - type: number - minimum: 0 - maximum: 2 - - type: 'null' - tool_choice: - $ref: '#/components/schemas/ToolChoiceOption' - tools: - type: array - items: - $ref: '#/components/schemas/ToolDefinitionJson' - top_p: - default: 1 - anyOf: - - type: number - minimum: 0 - maximum: 1 - - type: 'null' - debug: + allow_fallbacks: + type: boolean + nullable: true + description: > + Whether to allow backup providers to serve requests + + - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the next best provider. + + - false: use only the primary/custom provider, and return the upstream error if it's unavailable. + + require_parameters: + type: boolean + nullable: true + description: >- + Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. + data_collection: + $ref: '#/components/schemas/DataCollection' + zdr: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do not retain prompts will be used. + example: true + enforce_distillable_text: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used. + example: true + order: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message. + only: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. + ignore: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. + quantizations: + type: array + nullable: true + items: + $ref: '#/components/schemas/Quantization' + description: A list of quantization levels to filter the provider by. + sort: + allOf: + - $ref: '#/components/schemas/ProviderSort' + - anyOf: + - $ref: '#/components/schemas/ProviderSort' + - $ref: '#/components/schemas/ProviderSortConfig' + - nullable: true + description: >- + The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing is performed. + max_price: + type: object + properties: + prompt: + $ref: '#/components/schemas/BigNumberUnion' + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per million completion tokens + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per image + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per audio unit + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per request + description: >- + The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. + preferred_min_throughput: + $ref: '#/components/schemas/PreferredMinThroughput' + preferred_max_latency: + $ref: '#/components/schemas/PreferredMaxLatency' + additionalProperties: false + description: When multiple model providers are available, optionally indicate your routing preference. + plugins: + type: array + items: + oneOf: + - type: object + properties: + id: + type: string + enum: + - auto-router + enabled: + type: boolean + description: Set to false to disable the auto-router plugin for this request. Defaults to true. + allowed_models: + type: array + items: + type: string + description: >- + List of model patterns to filter which models the auto-router can route between. Supports wildcards (e.g., "anthropic/*" matches all Anthropic models). When not specified, uses the default supported models list. + example: + - anthropic/* + - openai/gpt-4o + - google/* + required: + - id + - type: object + properties: + id: + type: string + enum: + - moderation + required: + - id + - type: object + properties: + id: + type: string + enum: + - web + enabled: + type: boolean + description: Set to false to disable the web-search plugin for this request. Defaults to true. + max_results: + type: number + search_prompt: + type: string + engine: + $ref: '#/components/schemas/WebSearchEngine' + required: + - id + - type: object + properties: + id: + type: string + enum: + - file-parser + enabled: + type: boolean + description: Set to false to disable the file-parser plugin for this request. Defaults to true. + pdf: + $ref: '#/components/schemas/PDFParserOptions' + required: + - id + - type: object + properties: + id: + type: string + enum: + - response-healing + enabled: + type: boolean + description: Set to false to disable the response-healing plugin for this request. Defaults to true. + required: + - id + description: Plugins you want to enable for this request, including their settings. + route: + type: string + nullable: true + enum: + - fallback + - sort + deprecated: true + description: >- + **DEPRECATED** Use providers.sort.partition instead. Backwards-compatible alias for providers.sort.partition. Accepts legacy values: "fallback" (maps to "model"), "sort" (maps to "none"). + x-speakeasy-deprecation-message: Use providers.sort.partition instead. + x-speakeasy-ignore: true + x-fern-ignore: true + x-speakeasy-unknown-values: allow + user: + type: string + description: Unique user identifier + example: user-123 + session_id: + type: string + maxLength: 128 + description: >- + A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. + trace: type: object properties: - echo_upstream_body: - type: boolean - image_config: + trace_id: + type: string + trace_name: + type: string + span_name: + type: string + generation_name: + type: string + parent_span_id: + type: string + additionalProperties: + nullable: true + description: >- + Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + messages: + type: array + items: + $ref: '#/components/schemas/Message' + minItems: 1 + description: List of messages for the conversation + example: + - role: user + content: Hello! + model: + $ref: '#/components/schemas/ModelName' + models: + $ref: '#/components/schemas/ModelNames' + frequency_penalty: + type: number + nullable: true + minimum: -2 + maximum: 2 + description: Frequency penalty (-2.0 to 2.0) + example: 0 + logit_bias: type: object - propertyNames: - type: string + nullable: true additionalProperties: - anyOf: - - type: string - - type: number - - type: array - items: {} - modalities: + type: number + description: Token logit bias adjustments + example: + '50256': -100 + logprobs: + type: boolean + nullable: true + description: Return log probabilities + example: false + top_logprobs: + type: number + nullable: true + minimum: 0 + maximum: 20 + description: Number of top log probabilities to return (0-20) + example: 5 + max_completion_tokens: + type: number + nullable: true + minimum: 1 + description: Maximum tokens in completion + example: 100 + max_tokens: + type: number + nullable: true + minimum: 1 + description: Maximum tokens (deprecated, use max_completion_tokens) + example: 100 + metadata: + type: object + additionalProperties: + type: string + description: Key-value pairs for additional object information (max 16 pairs, 64 char keys, 512 char values) + example: + user_id: user-123 + session_id: session-456 + presence_penalty: + type: number + nullable: true + minimum: -2 + maximum: 2 + description: Presence penalty (-2.0 to 2.0) + example: 0 + reasoning: + type: object + properties: + effort: + type: string + nullable: true + enum: + - xhigh + - high + - medium + - low + - minimal + - none + description: Constrains effort on reasoning for reasoning models + example: medium + x-speakeasy-unknown-values: allow + summary: + anyOf: + - $ref: '#/components/schemas/ReasoningSummaryVerbosity' + - nullable: true + - nullable: true + description: Configuration options for reasoning models + example: + effort: medium + summary: concise + response_format: + oneOf: + - $ref: '#/components/schemas/ResponseFormatText' + - $ref: '#/components/schemas/ResponseFormatJSONObject' + - $ref: '#/components/schemas/ResponseFormatJSONSchema' + - $ref: '#/components/schemas/ResponseFormatTextGrammar' + - $ref: '#/components/schemas/ResponseFormatTextPython' + discriminator: + propertyName: type + description: Response format configuration + example: + type: json_object + seed: + type: integer + nullable: true + description: Random seed for deterministic outputs + example: 42 + stop: + anyOf: + - type: string + - type: array + items: + type: string + maxItems: 4 + - nullable: true + description: Stop sequences (up to 4) + example: + - "" + stream: + type: boolean + default: false + description: Enable streaming response + example: false + stream_options: + $ref: '#/components/schemas/ChatStreamOptions' + temperature: + type: number + nullable: true + minimum: 0 + maximum: 2 + default: 1 + description: Sampling temperature (0-2) + example: 0.7 + parallel_tool_calls: + type: boolean + nullable: true + tool_choice: + $ref: '#/components/schemas/ToolChoiceOption' + tools: + type: array + items: + $ref: '#/components/schemas/ToolDefinitionJson' + description: Available tools for function calling + example: + - type: function + function: + name: get_weather + description: Get weather + top_p: + type: number + nullable: true + minimum: 0 + maximum: 1 + default: 1 + description: Nucleus sampling parameter (0-1) + example: 1 + debug: + $ref: '#/components/schemas/DebugOptions' + image_config: + type: object + additionalProperties: + anyOf: + - type: string + - type: number + - type: array + items: + nullable: true + description: >- + Provider-specific image configuration options. Keys and values vary by model/provider. See https://openrouter.ai/docs/guides/overview/multimodal/image-generation for more details. + example: + aspect_ratio: '16:9' + modalities: + type: array + items: + type: string + enum: + - text + - image + x-speakeasy-unknown-values: allow + description: Output modalities for the response. Supported values are "text" and "image". + example: + - text + - image + required: + - messages + description: Chat completion request parameters + example: + messages: + - role: system + content: You are a helpful assistant. + - role: user + content: What is the capital of France? + model: openai/gpt-4 + temperature: 0.7 + max_tokens: 150 + CreateChargeRequest: + type: object + properties: + amount: + type: number + sender: + type: string + chain_id: + type: integer + enum: + - 1 + - 137 + - 8453 + x-speakeasy-unknown-values: allow + required: + - amount + - sender + - chain_id + description: Create a Coinbase charge for crypto payment + example: + amount: 100 + sender: '0x1234567890123456789012345678901234567890' + chain_id: 1 + ProviderPreferences: + type: object + properties: + allow_fallbacks: + type: boolean + nullable: true + description: > + Whether to allow backup providers to serve requests + + - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the next best provider. + + - false: use only the primary/custom provider, and return the upstream error if it's unavailable. + + require_parameters: + type: boolean + nullable: true + description: >- + Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. + data_collection: + $ref: '#/components/schemas/DataCollection' + zdr: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do not retain prompts will be used. + example: true + enforce_distillable_text: + type: boolean + nullable: true + description: >- + Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used. + example: true + order: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message. + only: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. + ignore: + type: array + nullable: true + items: + anyOf: + - $ref: '#/components/schemas/ProviderName' + - type: string + description: >- + List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. + quantizations: + type: array + nullable: true + items: + $ref: '#/components/schemas/Quantization' + description: A list of quantization levels to filter the provider by. + sort: + allOf: + - $ref: '#/components/schemas/ProviderSort' + - anyOf: + - $ref: '#/components/schemas/ProviderSort' + - $ref: '#/components/schemas/ProviderSortConfig' + - nullable: true + description: >- + The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing is performed. + max_price: + type: object + properties: + prompt: + $ref: '#/components/schemas/BigNumberUnion' + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per million completion tokens + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per image + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per audio unit + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: Price per request + description: >- + The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. + preferred_min_throughput: + $ref: '#/components/schemas/PreferredMinThroughput' + preferred_max_latency: + $ref: '#/components/schemas/PreferredMaxLatency' + description: Provider routing preferences for the request. + PublicPricing: + type: object + properties: + prompt: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_token: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_audio_cache: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + web_search: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + internal_reasoning: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_read: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_write: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + discount: + type: number + required: + - prompt + - completion + description: Pricing information for the model + example: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + ModelGroup: + type: string + enum: + - Router + - Media + - Other + - GPT + - Claude + - Gemini + - Grok + - Cohere + - Nova + - Qwen + - Yi + - DeepSeek + - Mistral + - Llama2 + - Llama3 + - Llama4 + - PaLM + - RWKV + - Qwen3 + example: GPT + description: Tokenizer type used by the model + x-speakeasy-unknown-values: allow + InputModality: + type: string + enum: + - text + - image + - file + - audio + - video + example: text + x-speakeasy-unknown-values: allow + OutputModality: + type: string + enum: + - text + - image + - embeddings + - audio + example: text + x-speakeasy-unknown-values: allow + ModelArchitecture: + type: object + properties: + tokenizer: + $ref: '#/components/schemas/ModelGroup' + instruct_type: + type: string + nullable: true + enum: + - none + - airoboros + - alpaca + - alpaca-modif + - chatml + - claude + - code-llama + - gemma + - llama2 + - llama3 + - mistral + - nemotron + - neural + - openchat + - phi3 + - rwkv + - vicuna + - zephyr + - deepseek-r1 + - deepseek-v3.1 + - qwq + - qwen3 + example: chatml + description: Instruction format type + x-speakeasy-unknown-values: allow + modality: + type: string + nullable: true + description: Primary modality of the model + example: text->text + input_modalities: + type: array + items: + $ref: '#/components/schemas/InputModality' + description: Supported input modalities + output_modalities: + type: array + items: + $ref: '#/components/schemas/OutputModality' + description: Supported output modalities + required: + - modality + - input_modalities + - output_modalities + description: Model architecture information + example: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + TopProviderInfo: + type: object + properties: + context_length: + type: number + nullable: true + description: Context length from the top provider + example: 8192 + max_completion_tokens: + type: number + nullable: true + description: Maximum completion tokens from the top provider + example: 4096 + is_moderated: + type: boolean + description: Whether the top provider moderates content + example: true + required: + - is_moderated + description: Information about the top provider for this model + example: + context_length: 8192 + max_completion_tokens: 4096 + is_moderated: true + PerRequestLimits: + type: object + nullable: true + properties: + prompt_tokens: + type: number + example: 1000 + description: Maximum prompt tokens per request + completion_tokens: + type: number + example: 1000 + description: Maximum completion tokens per request + required: + - prompt_tokens + - completion_tokens + description: Per-request token limits + example: + prompt_tokens: 1000 + completion_tokens: 1000 + Parameter: + type: string + enum: + - temperature + - top_p + - top_k + - min_p + - top_a + - frequency_penalty + - presence_penalty + - repetition_penalty + - max_tokens + - logit_bias + - logprobs + - top_logprobs + - seed + - response_format + - structured_outputs + - stop + - tools + - tool_choice + - parallel_tool_calls + - include_reasoning + - reasoning + - reasoning_effort + - web_search_options + - verbosity + example: temperature + x-speakeasy-unknown-values: allow + DefaultParameters: + type: object + nullable: true + properties: + temperature: + type: number + nullable: true + minimum: 0 + maximum: 2 + top_p: + type: number + nullable: true + minimum: 0 + maximum: 1 + frequency_penalty: + type: number + nullable: true + minimum: -2 + maximum: 2 + additionalProperties: false + description: Default parameters for this model + example: + temperature: 0.7 + top_p: 0.9 + frequency_penalty: 0 + Model: + type: object + properties: + id: + type: string + description: Unique identifier for the model + example: openai/gpt-4 + canonical_slug: + type: string + description: Canonical slug for the model + example: openai/gpt-4 + hugging_face_id: + type: string + nullable: true + description: Hugging Face model identifier, if applicable + example: microsoft/DialoGPT-medium + name: + type: string + description: Display name of the model + example: GPT-4 + created: + type: number + description: Unix timestamp of when the model was created + example: 1692901234 + description: + type: string + description: Description of the model + example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + pricing: + $ref: '#/components/schemas/PublicPricing' + context_length: + type: number + nullable: true + description: Maximum context length in tokens + example: 8192 + architecture: + $ref: '#/components/schemas/ModelArchitecture' + top_provider: + $ref: '#/components/schemas/TopProviderInfo' + per_request_limits: + $ref: '#/components/schemas/PerRequestLimits' + supported_parameters: type: array items: - type: string - enum: - - text - - image - x-speakeasy-unknown-values: allow + $ref: '#/components/schemas/Parameter' + description: List of supported parameters for this model + default_parameters: + $ref: '#/components/schemas/DefaultParameters' + expiration_date: + type: string + nullable: true + description: The date after which the model may be removed. ISO 8601 date string (YYYY-MM-DD) or null if no expiration. + example: '2025-06-01' required: - - messages - ProviderSortUnion: - anyOf: - - $ref: '#/components/schemas/ProviderSort' - - $ref: '#/components/schemas/ProviderSortConfig' - ChatResponseChoice: + - id + - canonical_slug + - name + - created + - pricing + - context_length + - architecture + - top_provider + - per_request_limits + - supported_parameters + - default_parameters + description: Information about an AI model available on OpenRouter + example: + id: openai/gpt-4 + canonical_slug: openai/gpt-4 + name: GPT-4 + created: 1692901234 + description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + context_length: 8192 + architecture: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + top_provider: + context_length: 8192 + max_completion_tokens: 4096 + is_moderated: true + per_request_limits: null + supported_parameters: + - temperature + - top_p + - max_tokens + default_parameters: null + expiration_date: null + ModelsListResponseData: + type: array + items: + $ref: '#/components/schemas/Model' + description: List of available models + ModelsListResponse: type: object properties: - finish_reason: - $ref: '#/components/schemas/__schema25' - index: - type: number - message: - $ref: '#/components/schemas/AssistantMessage' - logprobs: - anyOf: - - $ref: '#/components/schemas/ChatMessageTokenLogprobs' - - type: 'null' + data: + $ref: '#/components/schemas/ModelsListResponseData' required: - - finish_reason - - index - - message - ChatStreamingMessageToolCall: + - data + description: List of available models + example: + data: + - id: openai/gpt-4 + canonical_slug: openai/gpt-4 + name: GPT-4 + created: 1692901234 + description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + context_length: 8192 + architecture: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + top_provider: + context_length: 8192 + max_completion_tokens: 4096 + is_moderated: true + per_request_limits: null + supported_parameters: + - temperature + - top_p + - max_tokens + - frequency_penalty + - presence_penalty + default_parameters: null + expiration_date: null + ModelsCountResponse: type: object properties: - index: - type: number - id: - type: string - type: - type: string - const: function - function: + data: type: object properties: - name: - type: string - arguments: - type: string + count: + type: number + description: Total number of available models + example: 150 + required: + - count + description: Model count data + example: + count: 150 required: - - index - ChatStreamingMessageChunk: - type: object - properties: - role: - type: string - enum: - - assistant - content: - anyOf: - - type: string - - type: 'null' - reasoning: - anyOf: - - type: string - - type: 'null' - refusal: - anyOf: - - type: string - - type: 'null' - tool_calls: - type: array - items: - $ref: '#/components/schemas/ChatStreamingMessageToolCall' - reasoning_details: - type: array - items: - $ref: '#/components/schemas/__schema19' - ChatStreamingChoice: + - data + description: Model count data + example: + data: + count: 150 + InstructType: + type: string + nullable: true + enum: + - none + - airoboros + - alpaca + - alpaca-modif + - chatml + - claude + - code-llama + - gemma + - llama2 + - llama3 + - mistral + - nemotron + - neural + - openchat + - phi3 + - rwkv + - vicuna + - zephyr + - deepseek-r1 + - deepseek-v3.1 + - qwq + - qwen3 + description: Instruction format type + x-speakeasy-unknown-values: allow + EndpointStatus: + type: integer + enum: + - 0 + - -1 + - -2 + - -3 + - -5 + - -10 + example: 0 + x-speakeasy-unknown-values: allow + PercentileStats: type: object + nullable: true properties: - delta: - $ref: '#/components/schemas/ChatStreamingMessageChunk' - finish_reason: - $ref: '#/components/schemas/__schema25' - index: + p50: + type: number + description: Median (50th percentile) + example: 25.5 + p75: + type: number + description: 75th percentile + example: 35.2 + p90: type: number - logprobs: - anyOf: - - $ref: '#/components/schemas/ChatMessageTokenLogprobs' - - type: 'null' + description: 90th percentile + example: 48.7 + p99: + type: number + description: 99th percentile + example: 85.3 required: - - delta - - finish_reason - - index - ChatError: + - p50 + - p75 + - p90 + - p99 + description: >- + Latency percentiles in milliseconds over the last 30 minutes. Latency measures time to first token. Only visible when authenticated with an API key or cookie; returns null for unauthenticated requests. + PublicEndpoint: type: object properties: - error: + name: + type: string + model_id: + type: string + description: The unique identifier for the model (permaslug) + example: openai/gpt-4 + model_name: + type: string + context_length: + type: number + pricing: type: object properties: - code: - anyOf: - - anyOf: - - type: string - - type: number - - type: 'null' - message: - type: string - param: - anyOf: - - type: string - - type: 'null' - type: - anyOf: - - type: string - - type: 'null' + prompt: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + completion: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + request: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_token: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + image_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + audio_output: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_audio_cache: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + web_search: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + internal_reasoning: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_read: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + input_cache_write: + allOf: + - $ref: '#/components/schemas/BigNumberUnion' + - description: A number or string value representing a large number + discount: + type: number required: - - code - - message - additionalProperties: false + - prompt + - completion + provider_name: + $ref: '#/components/schemas/ProviderName' + tag: + type: string + quantization: + allOf: + - $ref: '#/components/schemas/Quantization' + - nullable: true + max_completion_tokens: + type: number + nullable: true + max_prompt_tokens: + type: number + nullable: true + supported_parameters: + type: array + items: + $ref: '#/components/schemas/Parameter' + status: + $ref: '#/components/schemas/EndpointStatus' + uptime_last_30m: + type: number + nullable: true + supports_implicit_caching: + type: boolean + latency_last_30m: + $ref: '#/components/schemas/PercentileStats' + throughput_last_30m: + allOf: + - $ref: '#/components/schemas/PercentileStats' + - description: >- + Throughput percentiles in tokens per second over the last 30 minutes. Throughput measures output token generation speed. Only visible when authenticated with an API key or cookie; returns null for unauthenticated requests. required: - - error - additionalProperties: false - ChatResponse: + - name + - model_id + - model_name + - context_length + - pricing + - provider_name + - tag + - quantization + - max_completion_tokens + - max_prompt_tokens + - supported_parameters + - uptime_last_30m + - supports_implicit_caching + - latency_last_30m + - throughput_last_30m + description: Information about a specific model endpoint + example: + name: 'OpenAI: GPT-4' + model_id: openai/gpt-4 + model_name: GPT-4 + context_length: 8192 + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + provider_name: OpenAI + tag: openai + quantization: fp16 + max_completion_tokens: 4096 + max_prompt_tokens: 8192 + supported_parameters: + - temperature + - top_p + - max_tokens + status: 0 + uptime_last_30m: 99.5 + supports_implicit_caching: true + latency_last_30m: + p50: 0.25 + p75: 0.35 + p90: 0.48 + p99: 0.85 + throughput_last_30m: + p50: 45.2 + p75: 38.5 + p90: 28.3 + p99: 15.1 + ListEndpointsResponse: type: object properties: id: type: string - choices: - type: array - items: - $ref: '#/components/schemas/ChatResponseChoice' + description: Unique identifier for the model + example: openai/gpt-4 + name: + type: string + description: Display name of the model + example: GPT-4 created: type: number - model: - type: string - object: + description: Unix timestamp of when the model was created + example: 1692901234 + description: type: string - const: chat.completion - system_fingerprint: - anyOf: - - type: string - - type: 'null' - usage: - $ref: '#/components/schemas/ChatGenerationTokenUsage' - required: - - id - - choices - - created - - model - - object - additionalProperties: false - ChatStreamingResponseChunk: - type: object - properties: - data: - type: object - properties: - id: - type: string - choices: - type: array - items: - $ref: '#/components/schemas/ChatStreamingChoice' - created: - type: number - model: - type: string - object: - type: string - const: chat.completion.chunk - system_fingerprint: - anyOf: - - type: string - - type: 'null' - error: - type: object - properties: - message: + description: Description of the model + example: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + architecture: + allOf: + - $ref: '#/components/schemas/ModelArchitecture' + - properties: + tokenizer: + allOf: + - $ref: '#/components/schemas/ModelGroup' + - nullable: true + instruct_type: + $ref: '#/components/schemas/InstructType' + modality: type: string - code: - type: number + nullable: true + description: Primary modality of the model + example: text + input_modalities: + type: array + items: + $ref: '#/components/schemas/InputModality' + description: Supported input modalities + output_modalities: + type: array + items: + $ref: '#/components/schemas/OutputModality' + description: Supported output modalities required: - - message - - code - additionalProperties: false - usage: - $ref: '#/components/schemas/ChatGenerationTokenUsage' - required: - - id - - choices - - created - - model - - object - additionalProperties: false + - tokenizer + - instruct_type + - modality + - input_modalities + - output_modalities + example: + tokenizer: GPT + instruct_type: chatml + modality: text + endpoints: + type: array + items: + $ref: '#/components/schemas/PublicEndpoint' + description: List of available endpoints for this model required: - - data - additionalProperties: false + - id + - name + - created + - description + - architecture + - endpoints + description: List of available endpoints for a model + example: + id: openai/gpt-4 + name: GPT-4 + created: 1692901234 + description: GPT-4 is a large multimodal model that can solve difficult problems with greater accuracy. + architecture: + tokenizer: GPT + instruct_type: chatml + modality: text->text + input_modalities: + - text + output_modalities: + - text + endpoints: + - name: 'OpenAI: GPT-4' + model_name: GPT-4 + context_length: 8192 + pricing: + prompt: '0.00003' + completion: '0.00006' + request: '0' + image: '0' + provider_name: OpenAI + tag: openai + quantization: fp16 + max_completion_tokens: 4096 + max_prompt_tokens: 8192 + supported_parameters: + - temperature + - top_p + - max_tokens + - frequency_penalty + - presence_penalty + status: default + uptime_last_30m: 99.5 + supports_implicit_caching: true + latency_last_30m: + p50: 0.25 + p75: 0.35 + p90: 0.48 + p99: 0.85 + throughput_last_30m: + p50: 45.2 + p75: 38.5 + p90: 28.3 + p99: 15.1 parameters: AppIdentifier: name: HTTP-Referer @@ -9492,10 +9871,6 @@ components: description: | The app display name allows you to customize how your app appears in OpenRouter's dashboard. securitySchemes: - apiKey: - type: http - scheme: bearer - description: API key as bearer token in Authorization header bearer: type: http scheme: bearer @@ -9916,6 +10291,120 @@ paths: parameters: - $ref: "#/components/parameters/AppIdentifier" - $ref: "#/components/parameters/AppDisplayName" + /chat/completions: + post: + x-speakeasy-group: chat + x-speakeasy-name-override: send + x-speakeasy-stream-request-field: stream + tags: + - Chat + summary: Create a chat completion + operationId: sendChatCompletionRequest + description: >- + Sends a request for a model response for the given chat conversation. Supports both streaming and non-streaming modes. + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ChatGenerationParams' + required: true + responses: + '200': + description: Successful chat completion response + content: + application/json: + schema: + $ref: '#/components/schemas/ChatResponse' + text/event-stream: + schema: + type: object + properties: + data: + $ref: '#/components/schemas/ChatStreamingResponseChunk' + required: + - data + x-speakeasy-sse-sentinel: '[DONE]' + '400': + description: Bad Request - Invalid request parameters or malformed input + content: + application/json: + schema: + $ref: '#/components/schemas/BadRequestResponse' + '401': + description: Unauthorized - Authentication required or invalid credentials + content: + application/json: + schema: + $ref: '#/components/schemas/UnauthorizedResponse' + '402': + description: Payment Required - Insufficient credits or quota to complete request + content: + application/json: + schema: + $ref: '#/components/schemas/PaymentRequiredResponse' + '404': + description: Not Found - Resource does not exist + content: + application/json: + schema: + $ref: '#/components/schemas/NotFoundResponse' + '408': + description: Request Timeout - Operation exceeded time limit + content: + application/json: + schema: + $ref: '#/components/schemas/RequestTimeoutResponse' + '413': + description: Payload Too Large - Request payload exceeds size limits + content: + application/json: + schema: + $ref: '#/components/schemas/PayloadTooLargeResponse' + '422': + description: Unprocessable Entity - Semantic validation failure + content: + application/json: + schema: + $ref: '#/components/schemas/UnprocessableEntityResponse' + '429': + description: Too Many Requests - Rate limit exceeded + content: + application/json: + schema: + $ref: '#/components/schemas/TooManyRequestsResponse' + '500': + description: Internal Server Error - Unexpected server error + content: + application/json: + schema: + $ref: '#/components/schemas/InternalServerResponse' + '502': + description: Bad Gateway - Provider/upstream API failure + content: + application/json: + schema: + $ref: '#/components/schemas/BadGatewayResponse' + '503': + description: Service Unavailable - Service temporarily unavailable + content: + application/json: + schema: + $ref: '#/components/schemas/ServiceUnavailableResponse' + '524': + description: Infrastructure Timeout - Request timed out at our edge network + content: + application/json: + schema: + $ref: '#/components/schemas/EdgeNetworkTimeoutResponse' + '529': + description: Provider Overloaded - Provider is temporarily overloaded + content: + application/json: + schema: + $ref: '#/components/schemas/ProviderOverloadedResponse' + parameters: + - $ref: "#/components/parameters/AppIdentifier" + - $ref: "#/components/parameters/AppDisplayName" /credits: get: x-speakeasy-name-override: getCredits @@ -10602,6 +11091,7 @@ paths: - Inceptron - InferenceNet - Infermatic + - Io Net - Inflection - Liquid - Mara @@ -13790,6 +14280,20 @@ paths: nullable: true format: date-time description: Optional expiration time for the API key to be created + key_label: + type: string + maxLength: 100 + description: Optional custom label for the API key. Defaults to the app name if not provided. + example: My Custom Key + usage_limit_type: + type: string + enum: + - daily + - weekly + - monthly + description: Optional credit limit reset interval. When set, the credit limit resets on this interval. + example: monthly + x-speakeasy-unknown-values: allow required: - callback_url example: @@ -13854,72 +14358,6 @@ paths: parameters: - $ref: "#/components/parameters/AppIdentifier" - $ref: "#/components/parameters/AppDisplayName" - /chat/completions: - post: - summary: Create a chat completion - operationId: sendChatCompletionRequest - x-speakeasy-group: chat - x-speakeasy-name-override: send - x-speakeasy-stream-request-field: stream - description: >- - Sends a request for a model response for the given chat conversation. Supports both streaming and non-streaming modes. - tags: - - Chat - requestBody: - required: true - description: Chat completion request parameters - content: - application/json: - schema: - $ref: '#/components/schemas/ChatGenerationParams' - responses: - '200': - description: Successful chat completion response - content: - application/json: - schema: - $ref: '#/components/schemas/ChatResponse' - description: Chat completion response - text/event-stream: - x-speakeasy-sse-sentinel: '[DONE]' - schema: - $ref: '#/components/schemas/ChatStreamingResponseChunk' - '400': - description: Bad request - invalid parameters - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - '401': - description: Unauthorized - invalid API key - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - '429': - description: Too many requests - rate limit exceeded - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - '500': - description: Internal server error - content: - application/json: - schema: - $ref: '#/components/schemas/ChatError' - parameters: - - $ref: "#/components/parameters/AppIdentifier" - - $ref: "#/components/parameters/AppDisplayName" -servers: - - url: https://openrouter.ai/api/v1 - description: Production server - x-speakeasy-server-id: production -security: - - apiKey: [] -externalDocs: - description: OpenRouter Documentation - url: https://openrouter.ai/docs tags: - name: API Keys description: API key management endpoints diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index cc4a5d99..aab665fc 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,8 +2,8 @@ speakeasyVersion: 1.680.0 sources: OpenRouter API: sourceNamespace: open-router-chat-completions-api - sourceRevisionDigest: sha256:270250dd1c0a82a50bdbdbd9bcc22d6b7c859aa05de55451931cd7b7d1e89373 - sourceBlobDigest: sha256:92b2bc8a211cdec86645642bf778db7ba41ae73cee2d1fc0671b7057f45d0203 + sourceRevisionDigest: sha256:22df1300691236b8e160db07a8abae8fba8a102a49b33b49906d5ac7e71e5b85 + sourceBlobDigest: sha256:be6799b85a1e49da7dedea08cffa510807bd72b0b8e41e4fc1a7035e254d29cb tags: - latest - main @@ -12,8 +12,8 @@ targets: openrouter: source: OpenRouter API sourceNamespace: open-router-chat-completions-api - sourceRevisionDigest: sha256:270250dd1c0a82a50bdbdbd9bcc22d6b7c859aa05de55451931cd7b7d1e89373 - sourceBlobDigest: sha256:92b2bc8a211cdec86645642bf778db7ba41ae73cee2d1fc0671b7057f45d0203 + sourceRevisionDigest: sha256:22df1300691236b8e160db07a8abae8fba8a102a49b33b49906d5ac7e71e5b85 + sourceBlobDigest: sha256:be6799b85a1e49da7dedea08cffa510807bd72b0b8e41e4fc1a7035e254d29cb workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.680.0 diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 76711bea..fefb4206 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -1,5 +1,7 @@ # AssistantMessage +Assistant message for requests and responses + ## Example Usage ```typescript @@ -12,13 +14,13 @@ let value: AssistantMessage = { ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `role` | *"assistant"* | :heavy_check_mark: | N/A | -| `content` | *models.AssistantMessageContent* | :heavy_minus_sign: | N/A | -| `name` | *string* | :heavy_minus_sign: | N/A | -| `toolCalls` | [models.ChatMessageToolCall](../models/chatmessagetoolcall.md)[] | :heavy_minus_sign: | N/A | -| `refusal` | *string* | :heavy_minus_sign: | N/A | -| `reasoning` | *string* | :heavy_minus_sign: | N/A | -| `reasoningDetails` | *models.Schema19*[] | :heavy_minus_sign: | N/A | -| `images` | [models.Image](../models/image.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | [models.AssistantMessageRole](../models/assistantmessagerole.md) | :heavy_check_mark: | N/A | | +| `content` | *models.AssistantMessageContent* | :heavy_minus_sign: | Assistant message content | | +| `name` | *string* | :heavy_minus_sign: | Optional name for the assistant | | +| `toolCalls` | [models.ChatMessageToolCall](../models/chatmessagetoolcall.md)[] | :heavy_minus_sign: | Tool calls made by the assistant | | +| `refusal` | *string* | :heavy_minus_sign: | Refusal message if content was refused | | +| `reasoning` | *string* | :heavy_minus_sign: | Reasoning output | | +| `reasoningDetails` | *models.ReasoningDetailUnion*[] | :heavy_minus_sign: | Reasoning details for extended thinking models | | +| `images` | [models.AssistantMessageImages](../models/assistantmessageimages.md)[] | :heavy_minus_sign: | Generated images from image generation models | [
{
"image_url": {
"url": "data:image/png;base64,iVBORw0KGgo..."
}
}
] | \ No newline at end of file diff --git a/docs/models/assistantmessagecontent.md b/docs/models/assistantmessagecontent.md index 30a5052e..b3a4c9eb 100644 --- a/docs/models/assistantmessagecontent.md +++ b/docs/models/assistantmessagecontent.md @@ -1,5 +1,7 @@ # AssistantMessageContent +Assistant message content + ## Supported Types @@ -14,11 +16,15 @@ const value: string = ""; ```typescript const value: models.ChatMessageContentItem[] = [ { - type: "image_url", - imageUrl: { - url: "https://pretty-reservation.org", - }, + type: "text", + text: "Hello, world!", }, ]; ``` +### `any` + +```typescript +const value: any = ""; +``` + diff --git a/docs/models/assistantmessageimages.md b/docs/models/assistantmessageimages.md new file mode 100644 index 00000000..3d865524 --- /dev/null +++ b/docs/models/assistantmessageimages.md @@ -0,0 +1,19 @@ +# AssistantMessageImages + +## Example Usage + +```typescript +import { AssistantMessageImages } from "@openrouter/sdk/models"; + +let value: AssistantMessageImages = { + imageUrl: { + url: "https://whole-tribe.net", + }, +}; +``` + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `imageUrl` | [models.AssistantMessageImagesImageUrl](../models/assistantmessageimagesimageurl.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessageimagesimageurl.md b/docs/models/assistantmessageimagesimageurl.md new file mode 100644 index 00000000..a58509e9 --- /dev/null +++ b/docs/models/assistantmessageimagesimageurl.md @@ -0,0 +1,17 @@ +# AssistantMessageImagesImageUrl + +## Example Usage + +```typescript +import { AssistantMessageImagesImageUrl } from "@openrouter/sdk/models"; + +let value: AssistantMessageImagesImageUrl = { + url: "https://mysterious-exterior.com/", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | +| `url` | *string* | :heavy_check_mark: | URL or base64-encoded data of the generated image | \ No newline at end of file diff --git a/docs/models/assistantmessageimageurl.md b/docs/models/assistantmessageimageurl.md deleted file mode 100644 index 9d590578..00000000 --- a/docs/models/assistantmessageimageurl.md +++ /dev/null @@ -1,17 +0,0 @@ -# AssistantMessageImageUrl - -## Example Usage - -```typescript -import { AssistantMessageImageUrl } from "@openrouter/sdk/models"; - -let value: AssistantMessageImageUrl = { - url: "https://winding-fraudster.net", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `url` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessagerole.md b/docs/models/assistantmessagerole.md new file mode 100644 index 00000000..7f02f827 --- /dev/null +++ b/docs/models/assistantmessagerole.md @@ -0,0 +1,15 @@ +# AssistantMessageRole + +## Example Usage + +```typescript +import { AssistantMessageRole } from "@openrouter/sdk/models"; + +let value: AssistantMessageRole = "assistant"; +``` + +## Values + +```typescript +"assistant" +``` \ No newline at end of file diff --git a/docs/models/sortenum.md b/docs/models/by.md similarity index 62% rename from docs/models/sortenum.md rename to docs/models/by.md index ecee27db..8e95225b 100644 --- a/docs/models/sortenum.md +++ b/docs/models/by.md @@ -1,11 +1,13 @@ -# SortEnum +# By + +The provider sorting strategy (price, throughput, latency) ## Example Usage ```typescript -import { SortEnum } from "@openrouter/sdk/models"; +import { By } from "@openrouter/sdk/models"; -let value: SortEnum = "throughput"; +let value: By = "price"; ``` ## Values diff --git a/docs/models/chatcompletionfinishreason.md b/docs/models/chatcompletionfinishreason.md index b1301308..8c39f973 100644 --- a/docs/models/chatcompletionfinishreason.md +++ b/docs/models/chatcompletionfinishreason.md @@ -5,7 +5,7 @@ ```typescript import { ChatCompletionFinishReason } from "@openrouter/sdk/models"; -let value: ChatCompletionFinishReason = "error"; +let value: ChatCompletionFinishReason = "stop"; ``` ## Values diff --git a/docs/models/chaterrorerror.md b/docs/models/chaterrorerror.md deleted file mode 100644 index f0436c70..00000000 --- a/docs/models/chaterrorerror.md +++ /dev/null @@ -1,21 +0,0 @@ -# ChatErrorError - -## Example Usage - -```typescript -import { ChatErrorError } from "@openrouter/sdk/models"; - -let value: ChatErrorError = { - code: "", - message: "", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `code` | *models.Code* | :heavy_check_mark: | N/A | -| `message` | *string* | :heavy_check_mark: | N/A | -| `param` | *string* | :heavy_minus_sign: | N/A | -| `type` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatgenerationparams.md b/docs/models/chatgenerationparams.md index c5802561..c7929769 100644 --- a/docs/models/chatgenerationparams.md +++ b/docs/models/chatgenerationparams.md @@ -1,5 +1,7 @@ # ChatGenerationParams +Chat completion request parameters + ## Example Usage ```typescript @@ -9,42 +11,48 @@ let value: ChatGenerationParams = { messages: [ { role: "system", - content: "", + content: "You are a helpful assistant.", + }, + { + role: "user", + content: "What is the capital of France?", }, ], + temperature: 0.7, }; ``` ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `provider` | [models.Schema0](../models/schema0.md) | :heavy_minus_sign: | N/A | -| `plugins` | *models.Schema17*[] | :heavy_minus_sign: | N/A | -| `route` | [models.Route](../models/route.md) | :heavy_minus_sign: | N/A | -| `user` | *string* | :heavy_minus_sign: | N/A | -| `sessionId` | *string* | :heavy_minus_sign: | N/A | -| `messages` | *models.Message*[] | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_minus_sign: | N/A | -| `models` | *string*[] | :heavy_minus_sign: | N/A | -| `frequencyPenalty` | *number* | :heavy_minus_sign: | N/A | -| `logitBias` | Record | :heavy_minus_sign: | N/A | -| `logprobs` | *boolean* | :heavy_minus_sign: | N/A | -| `topLogprobs` | *number* | :heavy_minus_sign: | N/A | -| `maxCompletionTokens` | *number* | :heavy_minus_sign: | N/A | -| `maxTokens` | *number* | :heavy_minus_sign: | N/A | -| `metadata` | Record | :heavy_minus_sign: | N/A | -| `presencePenalty` | *number* | :heavy_minus_sign: | N/A | -| `reasoning` | [models.Reasoning](../models/reasoning.md) | :heavy_minus_sign: | N/A | -| `responseFormat` | *models.ResponseFormat* | :heavy_minus_sign: | N/A | -| `seed` | *number* | :heavy_minus_sign: | N/A | -| `stop` | *models.Stop* | :heavy_minus_sign: | N/A | -| `stream` | *boolean* | :heavy_minus_sign: | N/A | -| `streamOptions` | [models.ChatStreamOptions](../models/chatstreamoptions.md) | :heavy_minus_sign: | N/A | -| `temperature` | *number* | :heavy_minus_sign: | N/A | -| `toolChoice` | *any* | :heavy_minus_sign: | N/A | -| `tools` | [models.ToolDefinitionJson](../models/tooldefinitionjson.md)[] | :heavy_minus_sign: | N/A | -| `topP` | *number* | :heavy_minus_sign: | N/A | -| `debug` | [models.Debug](../models/debug.md) | :heavy_minus_sign: | N/A | -| `imageConfig` | Record | :heavy_minus_sign: | N/A | -| `modalities` | [models.Modality](../models/modality.md)[] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `provider` | [models.ChatGenerationParamsProvider](../models/chatgenerationparamsprovider.md) | :heavy_minus_sign: | When multiple model providers are available, optionally indicate your routing preference. | | +| `plugins` | *models.ChatGenerationParamsPluginUnion*[] | :heavy_minus_sign: | Plugins you want to enable for this request, including their settings. | | +| `user` | *string* | :heavy_minus_sign: | Unique user identifier | user-123 | +| `sessionId` | *string* | :heavy_minus_sign: | A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. | | +| `trace` | [models.ChatGenerationParamsTrace](../models/chatgenerationparamstrace.md) | :heavy_minus_sign: | Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. | | +| `messages` | *models.Message*[] | :heavy_check_mark: | List of messages for the conversation | [
{
"role": "user",
"content": "Hello!"
}
] | +| `model` | *string* | :heavy_minus_sign: | Model to use for completion | openai/gpt-4 | +| `models` | *string*[] | :heavy_minus_sign: | Models to use for completion | [
"openai/gpt-4",
"openai/gpt-4o"
] | +| `frequencyPenalty` | *number* | :heavy_minus_sign: | Frequency penalty (-2.0 to 2.0) | 0 | +| `logitBias` | Record | :heavy_minus_sign: | Token logit bias adjustments | {
"50256": -100
} | +| `logprobs` | *boolean* | :heavy_minus_sign: | Return log probabilities | false | +| `topLogprobs` | *number* | :heavy_minus_sign: | Number of top log probabilities to return (0-20) | 5 | +| `maxCompletionTokens` | *number* | :heavy_minus_sign: | Maximum tokens in completion | 100 | +| `maxTokens` | *number* | :heavy_minus_sign: | Maximum tokens (deprecated, use max_completion_tokens) | 100 | +| `metadata` | Record | :heavy_minus_sign: | Key-value pairs for additional object information (max 16 pairs, 64 char keys, 512 char values) | {
"user_id": "user-123",
"session_id": "session-456"
} | +| `presencePenalty` | *number* | :heavy_minus_sign: | Presence penalty (-2.0 to 2.0) | 0 | +| `reasoning` | [models.Reasoning](../models/reasoning.md) | :heavy_minus_sign: | Configuration options for reasoning models | {
"effort": "medium",
"summary": "concise"
} | +| `responseFormat` | *models.ResponseFormat* | :heavy_minus_sign: | Response format configuration | {
"type": "json_object"
} | +| `seed` | *number* | :heavy_minus_sign: | Random seed for deterministic outputs | 42 | +| `stop` | *models.Stop* | :heavy_minus_sign: | Stop sequences (up to 4) | [
""
] | +| `stream` | *boolean* | :heavy_minus_sign: | Enable streaming response | false | +| `streamOptions` | [models.ChatStreamOptions](../models/chatstreamoptions.md) | :heavy_minus_sign: | Streaming configuration options | {
"include_usage": true
} | +| `temperature` | *number* | :heavy_minus_sign: | Sampling temperature (0-2) | 0.7 | +| `parallelToolCalls` | *boolean* | :heavy_minus_sign: | N/A | | +| `toolChoice` | *models.ToolChoiceOption* | :heavy_minus_sign: | Tool choice configuration | auto | +| `tools` | [models.ToolDefinitionJson](../models/tooldefinitionjson.md)[] | :heavy_minus_sign: | Available tools for function calling | [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather"
}
}
] | +| `topP` | *number* | :heavy_minus_sign: | Nucleus sampling parameter (0-1) | 1 | +| `debug` | [models.DebugOptions](../models/debugoptions.md) | :heavy_minus_sign: | Debug options for inspecting request transformations (streaming only) | {
"echo_upstream_body": true
} | +| `imageConfig` | Record | :heavy_minus_sign: | Provider-specific image configuration options. Keys and values vary by model/provider. See https://openrouter.ai/docs/guides/overview/multimodal/image-generation for more details. | {
"aspect_ratio": "16:9"
} | +| `modalities` | [models.Modality](../models/modality.md)[] | :heavy_minus_sign: | Output modalities for the response. Supported values are "text" and "image". | [
"text",
"image"
] | \ No newline at end of file diff --git a/docs/models/chatgenerationparamsby.md b/docs/models/chatgenerationparamsby.md new file mode 100644 index 00000000..77543175 --- /dev/null +++ b/docs/models/chatgenerationparamsby.md @@ -0,0 +1,19 @@ +# ChatGenerationParamsBy + +The provider sorting strategy (price, throughput, latency) + +## Example Usage + +```typescript +import { ChatGenerationParamsBy } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsBy = "price"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"price" | "throughput" | "latency" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/chatgenerationparamsignore.md b/docs/models/chatgenerationparamsignore.md new file mode 100644 index 00000000..beb05ef9 --- /dev/null +++ b/docs/models/chatgenerationparamsignore.md @@ -0,0 +1,17 @@ +# ChatGenerationParamsIgnore + + +## Supported Types + +### `models.ProviderName` + +```typescript +const value: models.ProviderName = "OpenAI"; +``` + +### `string` + +```typescript +const value: string = ""; +``` + diff --git a/docs/models/chatgenerationparamsmaxprice.md b/docs/models/chatgenerationparamsmaxprice.md new file mode 100644 index 00000000..e35aa9d9 --- /dev/null +++ b/docs/models/chatgenerationparamsmaxprice.md @@ -0,0 +1,21 @@ +# ChatGenerationParamsMaxPrice + +The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. + +## Example Usage + +```typescript +import { ChatGenerationParamsMaxPrice } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsMaxPrice = {}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `prompt` | *string* | :heavy_minus_sign: | Price per million prompt tokens | 1000 | +| `completion` | *string* | :heavy_minus_sign: | N/A | 1000 | +| `image` | *string* | :heavy_minus_sign: | N/A | 1000 | +| `audio` | *string* | :heavy_minus_sign: | N/A | 1000 | +| `request` | *string* | :heavy_minus_sign: | N/A | 1000 | \ No newline at end of file diff --git a/docs/models/schema5.md b/docs/models/chatgenerationparamsonly.md similarity index 52% rename from docs/models/schema5.md rename to docs/models/chatgenerationparamsonly.md index c8bd7fbc..10fe5def 100644 --- a/docs/models/schema5.md +++ b/docs/models/chatgenerationparamsonly.md @@ -1,12 +1,12 @@ -# Schema5 +# ChatGenerationParamsOnly ## Supported Types -### `models.Schema5Enum` +### `models.ProviderName` ```typescript -const value: models.Schema5Enum = "Novita"; +const value: models.ProviderName = "OpenAI"; ``` ### `string` diff --git a/docs/models/chatgenerationparamsorder.md b/docs/models/chatgenerationparamsorder.md new file mode 100644 index 00000000..b01d92ed --- /dev/null +++ b/docs/models/chatgenerationparamsorder.md @@ -0,0 +1,17 @@ +# ChatGenerationParamsOrder + + +## Supported Types + +### `models.ProviderName` + +```typescript +const value: models.ProviderName = "OpenAI"; +``` + +### `string` + +```typescript +const value: string = ""; +``` + diff --git a/docs/models/chatgenerationparamspartition.md b/docs/models/chatgenerationparamspartition.md new file mode 100644 index 00000000..0245cc5c --- /dev/null +++ b/docs/models/chatgenerationparamspartition.md @@ -0,0 +1,19 @@ +# ChatGenerationParamsPartition + +Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + +## Example Usage + +```typescript +import { ChatGenerationParamsPartition } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsPartition = "model"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"model" | "none" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/pluginautorouter.md b/docs/models/chatgenerationparamspluginautorouter.md similarity index 96% rename from docs/models/pluginautorouter.md rename to docs/models/chatgenerationparamspluginautorouter.md index fa36cf54..bc272191 100644 --- a/docs/models/pluginautorouter.md +++ b/docs/models/chatgenerationparamspluginautorouter.md @@ -1,11 +1,11 @@ -# PluginAutoRouter +# ChatGenerationParamsPluginAutoRouter ## Example Usage ```typescript -import { PluginAutoRouter } from "@openrouter/sdk/models"; +import { ChatGenerationParamsPluginAutoRouter } from "@openrouter/sdk/models"; -let value: PluginAutoRouter = { +let value: ChatGenerationParamsPluginAutoRouter = { id: "auto-router", }; ``` diff --git a/docs/models/pluginfileparser.md b/docs/models/chatgenerationparamspluginfileparser.md similarity index 91% rename from docs/models/pluginfileparser.md rename to docs/models/chatgenerationparamspluginfileparser.md index 0fe4dc1b..4ac46f9c 100644 --- a/docs/models/pluginfileparser.md +++ b/docs/models/chatgenerationparamspluginfileparser.md @@ -1,11 +1,11 @@ -# PluginFileParser +# ChatGenerationParamsPluginFileParser ## Example Usage ```typescript -import { PluginFileParser } from "@openrouter/sdk/models"; +import { ChatGenerationParamsPluginFileParser } from "@openrouter/sdk/models"; -let value: PluginFileParser = { +let value: ChatGenerationParamsPluginFileParser = { id: "file-parser", }; ``` diff --git a/docs/models/schema17moderation.md b/docs/models/chatgenerationparamspluginmoderation.md similarity index 66% rename from docs/models/schema17moderation.md rename to docs/models/chatgenerationparamspluginmoderation.md index 0e5427bd..651179b6 100644 --- a/docs/models/schema17moderation.md +++ b/docs/models/chatgenerationparamspluginmoderation.md @@ -1,11 +1,11 @@ -# Schema17Moderation +# ChatGenerationParamsPluginModeration ## Example Usage ```typescript -import { Schema17Moderation } from "@openrouter/sdk/models"; +import { ChatGenerationParamsPluginModeration } from "@openrouter/sdk/models"; -let value: Schema17Moderation = { +let value: ChatGenerationParamsPluginModeration = { id: "moderation", }; ``` diff --git a/docs/models/pluginresponsehealing.md b/docs/models/chatgenerationparamspluginresponsehealing.md similarity index 89% rename from docs/models/pluginresponsehealing.md rename to docs/models/chatgenerationparamspluginresponsehealing.md index 440949a0..7688b175 100644 --- a/docs/models/pluginresponsehealing.md +++ b/docs/models/chatgenerationparamspluginresponsehealing.md @@ -1,11 +1,11 @@ -# PluginResponseHealing +# ChatGenerationParamsPluginResponseHealing ## Example Usage ```typescript -import { PluginResponseHealing } from "@openrouter/sdk/models"; +import { ChatGenerationParamsPluginResponseHealing } from "@openrouter/sdk/models"; -let value: PluginResponseHealing = { +let value: ChatGenerationParamsPluginResponseHealing = { id: "response-healing", }; ``` diff --git a/docs/models/chatgenerationparamspluginunion.md b/docs/models/chatgenerationparamspluginunion.md new file mode 100644 index 00000000..fcecb0ab --- /dev/null +++ b/docs/models/chatgenerationparamspluginunion.md @@ -0,0 +1,45 @@ +# ChatGenerationParamsPluginUnion + + +## Supported Types + +### `models.ChatGenerationParamsPluginAutoRouter` + +```typescript +const value: models.ChatGenerationParamsPluginAutoRouter = { + id: "auto-router", +}; +``` + +### `models.ChatGenerationParamsPluginModeration` + +```typescript +const value: models.ChatGenerationParamsPluginModeration = { + id: "moderation", +}; +``` + +### `models.ChatGenerationParamsPluginWeb` + +```typescript +const value: models.ChatGenerationParamsPluginWeb = { + id: "web", +}; +``` + +### `models.ChatGenerationParamsPluginFileParser` + +```typescript +const value: models.ChatGenerationParamsPluginFileParser = { + id: "file-parser", +}; +``` + +### `models.ChatGenerationParamsPluginResponseHealing` + +```typescript +const value: models.ChatGenerationParamsPluginResponseHealing = { + id: "response-healing", +}; +``` + diff --git a/docs/models/pluginweb.md b/docs/models/chatgenerationparamspluginweb.md similarity index 94% rename from docs/models/pluginweb.md rename to docs/models/chatgenerationparamspluginweb.md index 9f1c1ed0..cd592c9e 100644 --- a/docs/models/pluginweb.md +++ b/docs/models/chatgenerationparamspluginweb.md @@ -1,11 +1,11 @@ -# PluginWeb +# ChatGenerationParamsPluginWeb ## Example Usage ```typescript -import { PluginWeb } from "@openrouter/sdk/models"; +import { ChatGenerationParamsPluginWeb } from "@openrouter/sdk/models"; -let value: PluginWeb = { +let value: ChatGenerationParamsPluginWeb = { id: "web", }; ``` diff --git a/docs/models/chatgenerationparamsprovider.md b/docs/models/chatgenerationparamsprovider.md new file mode 100644 index 00000000..5e19af77 --- /dev/null +++ b/docs/models/chatgenerationparamsprovider.md @@ -0,0 +1,29 @@ +# ChatGenerationParamsProvider + +When multiple model providers are available, optionally indicate your routing preference. + +## Example Usage + +```typescript +import { ChatGenerationParamsProvider } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsProvider = {}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `allowFallbacks` | *boolean* | :heavy_minus_sign: | Whether to allow backup providers to serve requests
- true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the next best provider.
- false: use only the primary/custom provider, and return the upstream error if it's unavailable.
| | +| `requireParameters` | *boolean* | :heavy_minus_sign: | Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. | | +| `dataCollection` | [models.DataCollection](../models/datacollection.md) | :heavy_minus_sign: | Data collection setting. If no available model provider meets the requirement, your request will return an error.
- allow: (default) allow providers which store user data non-transiently and may train on it

- deny: use only providers which do not collect user data. | allow | +| `zdr` | *boolean* | :heavy_minus_sign: | Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do not retain prompts will be used. | true | +| `enforceDistillableText` | *boolean* | :heavy_minus_sign: | Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used. | true | +| `order` | *models.ChatGenerationParamsOrder*[] | :heavy_minus_sign: | An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message. | | +| `only` | *models.ChatGenerationParamsOnly*[] | :heavy_minus_sign: | List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. | | +| `ignore` | *models.ChatGenerationParamsIgnore*[] | :heavy_minus_sign: | List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. | | +| `quantizations` | [models.Quantization](../models/quantization.md)[] | :heavy_minus_sign: | A list of quantization levels to filter the provider by. | | +| `sort` | *models.ChatGenerationParamsSortUnion* | :heavy_minus_sign: | N/A | price | +| `maxPrice` | [models.ChatGenerationParamsMaxPrice](../models/chatgenerationparamsmaxprice.md) | :heavy_minus_sign: | The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. | | +| `preferredMinThroughput` | *models.PreferredMinThroughput* | :heavy_minus_sign: | Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. | 100 | +| `preferredMaxLatency` | *models.PreferredMaxLatency* | :heavy_minus_sign: | Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. | 5 | \ No newline at end of file diff --git a/docs/models/chatgenerationparamsprovidersort.md b/docs/models/chatgenerationparamsprovidersort.md new file mode 100644 index 00000000..49d01bd7 --- /dev/null +++ b/docs/models/chatgenerationparamsprovidersort.md @@ -0,0 +1,19 @@ +# ChatGenerationParamsProviderSort + +The provider sorting strategy (price, throughput, latency) + +## Example Usage + +```typescript +import { ChatGenerationParamsProviderSort } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsProviderSort = "price"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"price" | "throughput" | "latency" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/chatgenerationparamsprovidersortconfig.md b/docs/models/chatgenerationparamsprovidersortconfig.md new file mode 100644 index 00000000..6fabc8e6 --- /dev/null +++ b/docs/models/chatgenerationparamsprovidersortconfig.md @@ -0,0 +1,16 @@ +# ChatGenerationParamsProviderSortConfig + +## Example Usage + +```typescript +import { ChatGenerationParamsProviderSortConfig } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsProviderSortConfig = {}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `by` | [models.ChatGenerationParamsBy](../models/chatgenerationparamsby.md) | :heavy_minus_sign: | The provider sorting strategy (price, throughput, latency) | price | +| `partition` | [models.ChatGenerationParamsPartition](../models/chatgenerationparamspartition.md) | :heavy_minus_sign: | Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. | model | \ No newline at end of file diff --git a/docs/models/chatgenerationparamsprovidersortconfigenum.md b/docs/models/chatgenerationparamsprovidersortconfigenum.md new file mode 100644 index 00000000..a194eedc --- /dev/null +++ b/docs/models/chatgenerationparamsprovidersortconfigenum.md @@ -0,0 +1,15 @@ +# ChatGenerationParamsProviderSortConfigEnum + +## Example Usage + +```typescript +import { ChatGenerationParamsProviderSortConfigEnum } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsProviderSortConfigEnum = "latency"; +``` + +## Values + +```typescript +"price" | "throughput" | "latency" +``` \ No newline at end of file diff --git a/docs/models/chatgenerationparamsprovidersortconfigunion.md b/docs/models/chatgenerationparamsprovidersortconfigunion.md new file mode 100644 index 00000000..9c9e2bdb --- /dev/null +++ b/docs/models/chatgenerationparamsprovidersortconfigunion.md @@ -0,0 +1,17 @@ +# ChatGenerationParamsProviderSortConfigUnion + + +## Supported Types + +### `models.ChatGenerationParamsProviderSortConfig` + +```typescript +const value: models.ChatGenerationParamsProviderSortConfig = {}; +``` + +### `models.ChatGenerationParamsProviderSortConfigEnum` + +```typescript +const value: models.ChatGenerationParamsProviderSortConfigEnum = "throughput"; +``` + diff --git a/docs/models/chatgenerationparamssortenum.md b/docs/models/chatgenerationparamssortenum.md new file mode 100644 index 00000000..586baae6 --- /dev/null +++ b/docs/models/chatgenerationparamssortenum.md @@ -0,0 +1,17 @@ +# ChatGenerationParamsSortEnum + +## Example Usage + +```typescript +import { ChatGenerationParamsSortEnum } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsSortEnum = "price"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"price" | "throughput" | "latency" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/chatgenerationparamssortunion.md b/docs/models/chatgenerationparamssortunion.md new file mode 100644 index 00000000..bc8cf793 --- /dev/null +++ b/docs/models/chatgenerationparamssortunion.md @@ -0,0 +1,25 @@ +# ChatGenerationParamsSortUnion + +The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing is performed. + + +## Supported Types + +### `models.ChatGenerationParamsProviderSort` + +```typescript +const value: models.ChatGenerationParamsProviderSort = "price"; +``` + +### `models.ChatGenerationParamsProviderSortConfigUnion` + +```typescript +const value: models.ChatGenerationParamsProviderSortConfigUnion = {}; +``` + +### `models.ChatGenerationParamsSortEnum` + +```typescript +const value: models.ChatGenerationParamsSortEnum = "price"; +``` + diff --git a/docs/models/chatgenerationparamstrace.md b/docs/models/chatgenerationparamstrace.md new file mode 100644 index 00000000..62191c84 --- /dev/null +++ b/docs/models/chatgenerationparamstrace.md @@ -0,0 +1,22 @@ +# ChatGenerationParamsTrace + +Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + +## Example Usage + +```typescript +import { ChatGenerationParamsTrace } from "@openrouter/sdk/models"; + +let value: ChatGenerationParamsTrace = {}; +``` + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `traceId` | *string* | :heavy_minus_sign: | N/A | +| `traceName` | *string* | :heavy_minus_sign: | N/A | +| `spanName` | *string* | :heavy_minus_sign: | N/A | +| `generationName` | *string* | :heavy_minus_sign: | N/A | +| `parentSpanId` | *string* | :heavy_minus_sign: | N/A | +| `additionalProperties` | Record | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatgenerationtokenusage.md b/docs/models/chatgenerationtokenusage.md index e5ba6582..8b5bd765 100644 --- a/docs/models/chatgenerationtokenusage.md +++ b/docs/models/chatgenerationtokenusage.md @@ -1,14 +1,16 @@ # ChatGenerationTokenUsage +Token usage statistics + ## Example Usage ```typescript import { ChatGenerationTokenUsage } from "@openrouter/sdk/models"; let value: ChatGenerationTokenUsage = { - completionTokens: 9399.77, - promptTokens: 9559.6, - totalTokens: 7060.03, + completionTokens: 15, + promptTokens: 10, + totalTokens: 25, }; ``` @@ -16,8 +18,8 @@ let value: ChatGenerationTokenUsage = { | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `completionTokens` | *number* | :heavy_check_mark: | N/A | -| `promptTokens` | *number* | :heavy_check_mark: | N/A | -| `totalTokens` | *number* | :heavy_check_mark: | N/A | -| `completionTokensDetails` | [models.CompletionTokensDetails](../models/completiontokensdetails.md) | :heavy_minus_sign: | N/A | -| `promptTokensDetails` | [models.PromptTokensDetails](../models/prompttokensdetails.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `completionTokens` | *number* | :heavy_check_mark: | Number of tokens in the completion | +| `promptTokens` | *number* | :heavy_check_mark: | Number of tokens in the prompt | +| `totalTokens` | *number* | :heavy_check_mark: | Total number of tokens | +| `completionTokensDetails` | [models.CompletionTokensDetails](../models/completiontokensdetails.md) | :heavy_minus_sign: | Detailed completion token usage | +| `promptTokensDetails` | [models.PromptTokensDetails](../models/prompttokensdetails.md) | :heavy_minus_sign: | Detailed prompt token usage | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitem.md b/docs/models/chatmessagecontentitem.md index c66c9f77..60b3ec3f 100644 --- a/docs/models/chatmessagecontentitem.md +++ b/docs/models/chatmessagecontentitem.md @@ -1,5 +1,7 @@ # ChatMessageContentItem +Content part for chat completion messages + ## Supported Types @@ -8,7 +10,7 @@ ```typescript const value: models.ChatMessageContentItemText = { type: "text", - text: "", + text: "Hello, world!", }; ``` @@ -18,7 +20,7 @@ const value: models.ChatMessageContentItemText = { const value: models.ChatMessageContentItemImage = { type: "image_url", imageUrl: { - url: "https://pretty-reservation.org", + url: "https://better-nephew.com/", }, }; ``` @@ -35,24 +37,13 @@ const value: models.ChatMessageContentItemAudio = { }; ``` -### `models.ChatMessageContentItemVideo` - -```typescript -const value: models.ChatMessageContentItemVideo = { - type: "video_url", - videoUrl: { - url: "https://palatable-subexpression.com/", - }, -}; -``` - -### `models.ChatMessageContentItemVideo` +### `models.ChatMessageContentItem1` ```typescript -const value: models.ChatMessageContentItemVideo = { +const value: models.ChatMessageContentItem1 = { type: "video_url", videoUrl: { - url: "https://palatable-subexpression.com/", + url: "https://standard-step.net/", }, }; ``` diff --git a/docs/models/chatmessagecontentitem1.md b/docs/models/chatmessagecontentitem1.md new file mode 100644 index 00000000..f975ea8e --- /dev/null +++ b/docs/models/chatmessagecontentitem1.md @@ -0,0 +1,27 @@ +# ChatMessageContentItem1 + + +## Supported Types + +### `models.ChatMessageContentItemVideoLegacy` + +```typescript +const value: models.ChatMessageContentItemVideoLegacy = { + type: "input_video", + videoUrl: { + url: "https://standard-step.net/", + }, +}; +``` + +### `models.ChatMessageContentItemVideo` + +```typescript +const value: models.ChatMessageContentItemVideo = { + type: "video_url", + videoUrl: { + url: "https://standard-step.net/", + }, +}; +``` + diff --git a/docs/models/chatmessagecontentitemaudio.md b/docs/models/chatmessagecontentitemaudio.md index 5553e7f3..8952ff6e 100644 --- a/docs/models/chatmessagecontentitemaudio.md +++ b/docs/models/chatmessagecontentitemaudio.md @@ -1,5 +1,7 @@ # ChatMessageContentItemAudio +Audio input content part. Supported audio formats vary by provider. + ## Example Usage ```typescript @@ -18,5 +20,5 @@ let value: ChatMessageContentItemAudio = { | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `type` | *"input_audio"* | :heavy_check_mark: | N/A | +| `type` | [models.ChatMessageContentItemAudioType](../models/chatmessagecontentitemaudiotype.md) | :heavy_check_mark: | N/A | | `inputAudio` | [models.ChatMessageContentItemAudioInputAudio](../models/chatmessagecontentitemaudioinputaudio.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemaudioinputaudio.md b/docs/models/chatmessagecontentitemaudioinputaudio.md index 6bc501cd..67f13de3 100644 --- a/docs/models/chatmessagecontentitemaudioinputaudio.md +++ b/docs/models/chatmessagecontentitemaudioinputaudio.md @@ -13,7 +13,7 @@ let value: ChatMessageContentItemAudioInputAudio = { ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `data` | *string* | :heavy_check_mark: | N/A | -| `format` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| `data` | *string* | :heavy_check_mark: | Base64 encoded audio data | +| `format` | *string* | :heavy_check_mark: | Audio format (e.g., wav, mp3, flac, m4a, ogg, aiff, aac, pcm16, pcm24). Supported formats vary by provider. | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemaudiotype.md b/docs/models/chatmessagecontentitemaudiotype.md new file mode 100644 index 00000000..8b322e99 --- /dev/null +++ b/docs/models/chatmessagecontentitemaudiotype.md @@ -0,0 +1,15 @@ +# ChatMessageContentItemAudioType + +## Example Usage + +```typescript +import { ChatMessageContentItemAudioType } from "@openrouter/sdk/models"; + +let value: ChatMessageContentItemAudioType = "input_audio"; +``` + +## Values + +```typescript +"input_audio" +``` \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemcachecontrol.md b/docs/models/chatmessagecontentitemcachecontrol.md index 1ed1fa42..7af306bc 100644 --- a/docs/models/chatmessagecontentitemcachecontrol.md +++ b/docs/models/chatmessagecontentitemcachecontrol.md @@ -1,5 +1,7 @@ # ChatMessageContentItemCacheControl +Cache control for the content part + ## Example Usage ```typescript @@ -12,7 +14,7 @@ let value: ChatMessageContentItemCacheControl = { ## Fields -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *"ephemeral"* | :heavy_check_mark: | N/A | -| `ttl` | [models.Ttl](../models/ttl.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `type` | [models.ChatMessageContentItemCacheControlType](../models/chatmessagecontentitemcachecontroltype.md) | :heavy_check_mark: | N/A | +| `ttl` | [models.Ttl](../models/ttl.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemcachecontroltype.md b/docs/models/chatmessagecontentitemcachecontroltype.md new file mode 100644 index 00000000..9fa295c5 --- /dev/null +++ b/docs/models/chatmessagecontentitemcachecontroltype.md @@ -0,0 +1,15 @@ +# ChatMessageContentItemCacheControlType + +## Example Usage + +```typescript +import { ChatMessageContentItemCacheControlType } from "@openrouter/sdk/models"; + +let value: ChatMessageContentItemCacheControlType = "ephemeral"; +``` + +## Values + +```typescript +"ephemeral" +``` \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemimage.md b/docs/models/chatmessagecontentitemimage.md index 662a771c..7e24ee55 100644 --- a/docs/models/chatmessagecontentitemimage.md +++ b/docs/models/chatmessagecontentitemimage.md @@ -1,5 +1,7 @@ # ChatMessageContentItemImage +Image content part for vision models + ## Example Usage ```typescript @@ -8,7 +10,7 @@ import { ChatMessageContentItemImage } from "@openrouter/sdk/models"; let value: ChatMessageContentItemImage = { type: "image_url", imageUrl: { - url: "https://pretty-reservation.org", + url: "https://better-nephew.com/", }, }; ``` @@ -17,5 +19,5 @@ let value: ChatMessageContentItemImage = { | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | -| `type` | *"image_url"* | :heavy_check_mark: | N/A | +| `type` | [models.ChatMessageContentItemImageType](../models/chatmessagecontentitemimagetype.md) | :heavy_check_mark: | N/A | | `imageUrl` | [models.ChatMessageContentItemImageImageUrl](../models/chatmessagecontentitemimageimageurl.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemimagedetail.md b/docs/models/chatmessagecontentitemimagedetail.md index 20db7846..109e3c15 100644 --- a/docs/models/chatmessagecontentitemimagedetail.md +++ b/docs/models/chatmessagecontentitemimagedetail.md @@ -1,5 +1,7 @@ # ChatMessageContentItemImageDetail +Image detail level for vision models + ## Example Usage ```typescript diff --git a/docs/models/chatmessagecontentitemimageimageurl.md b/docs/models/chatmessagecontentitemimageimageurl.md index a3891c14..697ca9f4 100644 --- a/docs/models/chatmessagecontentitemimageimageurl.md +++ b/docs/models/chatmessagecontentitemimageimageurl.md @@ -14,5 +14,5 @@ let value: ChatMessageContentItemImageImageUrl = { | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `url` | *string* | :heavy_check_mark: | N/A | -| `detail` | [models.ChatMessageContentItemImageDetail](../models/chatmessagecontentitemimagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `url` | *string* | :heavy_check_mark: | URL of the image (data: URLs supported) | +| `detail` | [models.ChatMessageContentItemImageDetail](../models/chatmessagecontentitemimagedetail.md) | :heavy_minus_sign: | Image detail level for vision models | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemimagetype.md b/docs/models/chatmessagecontentitemimagetype.md new file mode 100644 index 00000000..3c1a6e9f --- /dev/null +++ b/docs/models/chatmessagecontentitemimagetype.md @@ -0,0 +1,15 @@ +# ChatMessageContentItemImageType + +## Example Usage + +```typescript +import { ChatMessageContentItemImageType } from "@openrouter/sdk/models"; + +let value: ChatMessageContentItemImageType = "image_url"; +``` + +## Values + +```typescript +"image_url" +``` \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemtext.md b/docs/models/chatmessagecontentitemtext.md index 1aa324dd..c401763a 100644 --- a/docs/models/chatmessagecontentitemtext.md +++ b/docs/models/chatmessagecontentitemtext.md @@ -1,5 +1,7 @@ # ChatMessageContentItemText +Text content part + ## Example Usage ```typescript @@ -7,14 +9,14 @@ import { ChatMessageContentItemText } from "@openrouter/sdk/models"; let value: ChatMessageContentItemText = { type: "text", - text: "", + text: "Hello, world!", }; ``` ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `type` | *"text"* | :heavy_check_mark: | N/A | -| `text` | *string* | :heavy_check_mark: | N/A | -| `cacheControl` | [models.ChatMessageContentItemCacheControl](../models/chatmessagecontentitemcachecontrol.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `type` | [models.ChatMessageContentItemTextType](../models/chatmessagecontentitemtexttype.md) | :heavy_check_mark: | N/A | | +| `text` | *string* | :heavy_check_mark: | N/A | | +| `cacheControl` | [models.ChatMessageContentItemCacheControl](../models/chatmessagecontentitemcachecontrol.md) | :heavy_minus_sign: | Cache control for the content part | {
"type": "ephemeral",
"ttl": "5m"
} | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemtexttype.md b/docs/models/chatmessagecontentitemtexttype.md new file mode 100644 index 00000000..78d04703 --- /dev/null +++ b/docs/models/chatmessagecontentitemtexttype.md @@ -0,0 +1,15 @@ +# ChatMessageContentItemTextType + +## Example Usage + +```typescript +import { ChatMessageContentItemTextType } from "@openrouter/sdk/models"; + +let value: ChatMessageContentItemTextType = "text"; +``` + +## Values + +```typescript +"text" +``` \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemvideo.md b/docs/models/chatmessagecontentitemvideo.md index 8bcb8beb..75c62e55 100644 --- a/docs/models/chatmessagecontentitemvideo.md +++ b/docs/models/chatmessagecontentitemvideo.md @@ -1,27 +1,23 @@ # ChatMessageContentItemVideo +Video input content part -## Supported Types - -### `models.ChatMessageContentItemVideoInputVideo` +## Example Usage ```typescript -const value: models.ChatMessageContentItemVideoInputVideo = { - type: "input_video", - videoUrl: { - url: "https://salty-diversity.biz", - }, -}; -``` - -### `models.ChatMessageContentItemVideoVideoURL` +import { ChatMessageContentItemVideo } from "@openrouter/sdk/models"; -```typescript -const value: models.ChatMessageContentItemVideoVideoURL = { +let value: ChatMessageContentItemVideo = { type: "video_url", videoUrl: { - url: "https://palatable-subexpression.com/", + url: "https://standard-step.net/", }, }; ``` +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `type` | *"video_url"* | :heavy_check_mark: | N/A | +| `videoUrl` | [models.VideoInput](../models/videoinput.md) | :heavy_check_mark: | Video input object | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemvideoinputvideo.md b/docs/models/chatmessagecontentitemvideoinputvideo.md deleted file mode 100644 index 44fd3739..00000000 --- a/docs/models/chatmessagecontentitemvideoinputvideo.md +++ /dev/null @@ -1,21 +0,0 @@ -# ChatMessageContentItemVideoInputVideo - -## Example Usage - -```typescript -import { ChatMessageContentItemVideoInputVideo } from "@openrouter/sdk/models"; - -let value: ChatMessageContentItemVideoInputVideo = { - type: "input_video", - videoUrl: { - url: "https://salty-diversity.biz", - }, -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `type` | *"input_video"* | :heavy_check_mark: | N/A | -| `videoUrl` | [models.VideoUrl1](../models/videourl1.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemvideolegacy.md b/docs/models/chatmessagecontentitemvideolegacy.md new file mode 100644 index 00000000..8b6d6266 --- /dev/null +++ b/docs/models/chatmessagecontentitemvideolegacy.md @@ -0,0 +1,25 @@ +# ~~ChatMessageContentItemVideoLegacy~~ + +Video input content part (legacy format - deprecated) + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + +## Example Usage + +```typescript +import { ChatMessageContentItemVideoLegacy } from "@openrouter/sdk/models"; + +let value: ChatMessageContentItemVideoLegacy = { + type: "input_video", + videoUrl: { + url: "https://standard-step.net/", + }, +}; +``` + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `type` | *"input_video"* | :heavy_check_mark: | N/A | +| `videoUrl` | [models.VideoInput](../models/videoinput.md) | :heavy_check_mark: | Video input object | \ No newline at end of file diff --git a/docs/models/chatmessagecontentitemvideovideourl.md b/docs/models/chatmessagecontentitemvideovideourl.md deleted file mode 100644 index 6853983e..00000000 --- a/docs/models/chatmessagecontentitemvideovideourl.md +++ /dev/null @@ -1,21 +0,0 @@ -# ChatMessageContentItemVideoVideoURL - -## Example Usage - -```typescript -import { ChatMessageContentItemVideoVideoURL } from "@openrouter/sdk/models"; - -let value: ChatMessageContentItemVideoVideoURL = { - type: "video_url", - videoUrl: { - url: "https://palatable-subexpression.com/", - }, -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `type` | *"video_url"* | :heavy_check_mark: | N/A | -| `videoUrl` | [models.VideoUrl2](../models/videourl2.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmessagetokenlogprob.md b/docs/models/chatmessagetokenlogprob.md index 886381e6..9062deea 100644 --- a/docs/models/chatmessagetokenlogprob.md +++ b/docs/models/chatmessagetokenlogprob.md @@ -1,21 +1,21 @@ # ChatMessageTokenLogprob +Token log probability information + ## Example Usage ```typescript import { ChatMessageTokenLogprob } from "@openrouter/sdk/models"; let value: ChatMessageTokenLogprob = { - token: "", - logprob: 8717.76, - bytes: [], + token: " Hello", + logprob: -0.612345, + bytes: null, topLogprobs: [ { - token: "", - logprob: 9715.54, - bytes: [ - 7041.35, - ], + token: " Hello", + logprob: -0.612345, + bytes: null, }, ], }; @@ -25,7 +25,7 @@ let value: ChatMessageTokenLogprob = { | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `token` | *string* | :heavy_check_mark: | N/A | -| `logprob` | *number* | :heavy_check_mark: | N/A | -| `bytes` | *number*[] | :heavy_check_mark: | N/A | -| `topLogprobs` | [models.ChatMessageTokenLogprobTopLogprob](../models/chatmessagetokenlogprobtoplogprob.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file +| `token` | *string* | :heavy_check_mark: | The token | +| `logprob` | *number* | :heavy_check_mark: | Log probability of the token | +| `bytes` | *number*[] | :heavy_check_mark: | UTF-8 bytes of the token | +| `topLogprobs` | [models.ChatMessageTokenLogprobTopLogprob](../models/chatmessagetokenlogprobtoplogprob.md)[] | :heavy_check_mark: | Top alternative tokens with probabilities | \ No newline at end of file diff --git a/docs/models/chatmessagetokenlogprobs.md b/docs/models/chatmessagetokenlogprobs.md index 01cd792f..e125f27d 100644 --- a/docs/models/chatmessagetokenlogprobs.md +++ b/docs/models/chatmessagetokenlogprobs.md @@ -1,5 +1,7 @@ # ChatMessageTokenLogprobs +Log probabilities for the completion + ## Example Usage ```typescript @@ -8,31 +10,13 @@ import { ChatMessageTokenLogprobs } from "@openrouter/sdk/models"; let value: ChatMessageTokenLogprobs = { content: [ { - token: "", - logprob: 2764.68, - bytes: [ - 1199.17, - 6426.57, - ], - topLogprobs: [ - { - token: "", - logprob: 9715.54, - bytes: [ - 7041.35, - ], - }, - ], - }, - ], - refusal: [ - { - token: "", - logprob: 9280.35, - bytes: [], + token: " Hello", + logprob: -0.612345, + bytes: null, topLogprobs: [], }, ], + refusal: null, }; ``` @@ -40,5 +24,5 @@ let value: ChatMessageTokenLogprobs = { | Field | Type | Required | Description | | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `content` | [models.ChatMessageTokenLogprob](../models/chatmessagetokenlogprob.md)[] | :heavy_check_mark: | N/A | -| `refusal` | [models.ChatMessageTokenLogprob](../models/chatmessagetokenlogprob.md)[] | :heavy_check_mark: | N/A | \ No newline at end of file +| `content` | [models.ChatMessageTokenLogprob](../models/chatmessagetokenlogprob.md)[] | :heavy_check_mark: | Log probabilities for content tokens | +| `refusal` | [models.ChatMessageTokenLogprob](../models/chatmessagetokenlogprob.md)[] | :heavy_check_mark: | Log probabilities for refusal tokens | \ No newline at end of file diff --git a/docs/models/chatmessagetoolcall.md b/docs/models/chatmessagetoolcall.md index a56e6bc8..98c9ec0f 100644 --- a/docs/models/chatmessagetoolcall.md +++ b/docs/models/chatmessagetoolcall.md @@ -1,16 +1,18 @@ # ChatMessageToolCall +Tool call made by the assistant + ## Example Usage ```typescript import { ChatMessageToolCall } from "@openrouter/sdk/models"; let value: ChatMessageToolCall = { - id: "", + id: "call_abc123", type: "function", function: { - name: "", - arguments: "", + name: "get_current_weather", + arguments: "{\"location\": \"Boston, MA\"}", }, }; ``` @@ -19,6 +21,6 @@ let value: ChatMessageToolCall = { | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `id` | *string* | :heavy_check_mark: | N/A | -| `type` | *"function"* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_check_mark: | Tool call identifier | +| `type` | [models.ChatMessageToolCallType](../models/chatmessagetoolcalltype.md) | :heavy_check_mark: | N/A | | `function` | [models.ChatMessageToolCallFunction](../models/chatmessagetoolcallfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmessagetoolcallfunction.md b/docs/models/chatmessagetoolcallfunction.md index 124477b6..02aada4f 100644 --- a/docs/models/chatmessagetoolcallfunction.md +++ b/docs/models/chatmessagetoolcallfunction.md @@ -13,7 +13,7 @@ let value: ChatMessageToolCallFunction = { ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `name` | *string* | :heavy_check_mark: | N/A | -| `arguments` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | +| `name` | *string* | :heavy_check_mark: | Function name to call | +| `arguments` | *string* | :heavy_check_mark: | Function arguments as JSON string | \ No newline at end of file diff --git a/docs/models/chatmessagetoolcalltype.md b/docs/models/chatmessagetoolcalltype.md new file mode 100644 index 00000000..527e75da --- /dev/null +++ b/docs/models/chatmessagetoolcalltype.md @@ -0,0 +1,15 @@ +# ChatMessageToolCallType + +## Example Usage + +```typescript +import { ChatMessageToolCallType } from "@openrouter/sdk/models"; + +let value: ChatMessageToolCallType = "function"; +``` + +## Values + +```typescript +"function" +``` \ No newline at end of file diff --git a/docs/models/chatresponse.md b/docs/models/chatresponse.md index 2cab9bbc..89d69ec1 100644 --- a/docs/models/chatresponse.md +++ b/docs/models/chatresponse.md @@ -1,27 +1,37 @@ # ChatResponse +Chat completion response + ## Example Usage ```typescript import { ChatResponse } from "@openrouter/sdk/models"; let value: ChatResponse = { - id: "", - choices: [], - created: 9184.01, - model: "Focus", + id: "chatcmpl-123", + choices: [ + { + finishReason: "stop", + index: 0, + message: { + role: "assistant", + }, + }, + ], + created: 1677652288, + model: "openai/gpt-4", object: "chat.completion", }; ``` ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `id` | *string* | :heavy_check_mark: | N/A | -| `choices` | [models.ChatResponseChoice](../models/chatresponsechoice.md)[] | :heavy_check_mark: | N/A | -| `created` | *number* | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `object` | *"chat.completion"* | :heavy_check_mark: | N/A | -| `systemFingerprint` | *string* | :heavy_minus_sign: | N/A | -| `usage` | [models.ChatGenerationTokenUsage](../models/chatgenerationtokenusage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *string* | :heavy_check_mark: | Unique completion identifier | chatcmpl-123 | +| `choices` | [models.ChatResponseChoice](../models/chatresponsechoice.md)[] | :heavy_check_mark: | List of completion choices | | +| `created` | *number* | :heavy_check_mark: | Unix timestamp of creation | 1677652288 | +| `model` | *string* | :heavy_check_mark: | Model used for completion | openai/gpt-4 | +| `object` | [models.ChatResponseObject](../models/chatresponseobject.md) | :heavy_check_mark: | N/A | | +| `systemFingerprint` | *string* | :heavy_minus_sign: | System fingerprint | fp_44709d6fcb | +| `usage` | [models.ChatGenerationTokenUsage](../models/chatgenerationtokenusage.md) | :heavy_minus_sign: | Token usage statistics | {
"completion_tokens": 15,
"prompt_tokens": 10,
"total_tokens": 25,
"completion_tokens_details": {
"reasoning_tokens": 5
},
"prompt_tokens_details": {
"cached_tokens": 2
}
} | \ No newline at end of file diff --git a/docs/models/chatresponsechoice.md b/docs/models/chatresponsechoice.md index 58451f44..bccdba82 100644 --- a/docs/models/chatresponsechoice.md +++ b/docs/models/chatresponsechoice.md @@ -1,5 +1,7 @@ # ChatResponseChoice +Chat completion choice + ## Example Usage ```typescript @@ -7,7 +9,7 @@ import { ChatResponseChoice } from "@openrouter/sdk/models"; let value: ChatResponseChoice = { finishReason: "stop", - index: 2823.76, + index: 0, message: { role: "assistant", }, @@ -16,9 +18,9 @@ let value: ChatResponseChoice = { ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `finishReason` | [models.ChatCompletionFinishReason](../models/chatcompletionfinishreason.md) | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | -| `logprobs` | [models.ChatMessageTokenLogprobs](../models/chatmessagetokenlogprobs.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | +| `finishReason` | *any* | :heavy_check_mark: | N/A | | +| `index` | *number* | :heavy_check_mark: | Choice index | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | Assistant message for requests and responses | {
"role": "user",
"content": "What is the capital of France?"
} | +| `logprobs` | [models.ChatMessageTokenLogprobs](../models/chatmessagetokenlogprobs.md) | :heavy_minus_sign: | Log probabilities for the completion | {
"content": [
{
"token": " Hello",
"logprob": -0.612345,
"bytes": null,
"top_logprobs": []
}
],
"refusal": null
} | \ No newline at end of file diff --git a/docs/models/chatresponseobject.md b/docs/models/chatresponseobject.md new file mode 100644 index 00000000..cefb86e3 --- /dev/null +++ b/docs/models/chatresponseobject.md @@ -0,0 +1,15 @@ +# ChatResponseObject + +## Example Usage + +```typescript +import { ChatResponseObject } from "@openrouter/sdk/models"; + +let value: ChatResponseObject = "chat.completion"; +``` + +## Values + +```typescript +"chat.completion" +``` \ No newline at end of file diff --git a/docs/models/chatstreamingchoice.md b/docs/models/chatstreamingchoice.md index 8751bd5a..c915f1ae 100644 --- a/docs/models/chatstreamingchoice.md +++ b/docs/models/chatstreamingchoice.md @@ -1,5 +1,7 @@ # ChatStreamingChoice +Streaming completion choice chunk + ## Example Usage ```typescript @@ -7,16 +9,16 @@ import { ChatStreamingChoice } from "@openrouter/sdk/models"; let value: ChatStreamingChoice = { delta: {}, - finishReason: "error", - index: 3511.86, + finishReason: null, + index: 0, }; ``` ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `delta` | [models.ChatStreamingMessageChunk](../models/chatstreamingmessagechunk.md) | :heavy_check_mark: | N/A | -| `finishReason` | [models.ChatCompletionFinishReason](../models/chatcompletionfinishreason.md) | :heavy_check_mark: | N/A | -| `index` | *number* | :heavy_check_mark: | N/A | -| `logprobs` | [models.ChatMessageTokenLogprobs](../models/chatmessagetokenlogprobs.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | +| `delta` | [models.ChatStreamingMessageChunk](../models/chatstreamingmessagechunk.md) | :heavy_check_mark: | Delta changes in streaming response | {
"role": "assistant",
"content": "Hello"
} | +| `finishReason` | *any* | :heavy_check_mark: | N/A | | +| `index` | *number* | :heavy_check_mark: | Choice index | 0 | +| `logprobs` | [models.ChatMessageTokenLogprobs](../models/chatmessagetokenlogprobs.md) | :heavy_minus_sign: | Log probabilities for the completion | {
"content": [
{
"token": " Hello",
"logprob": -0.612345,
"bytes": null,
"top_logprobs": []
}
],
"refusal": null
} | \ No newline at end of file diff --git a/docs/models/chatstreamingmessagechunk.md b/docs/models/chatstreamingmessagechunk.md index 09d63c5b..7cb663d9 100644 --- a/docs/models/chatstreamingmessagechunk.md +++ b/docs/models/chatstreamingmessagechunk.md @@ -1,5 +1,7 @@ # ChatStreamingMessageChunk +Delta changes in streaming response + ## Example Usage ```typescript @@ -10,11 +12,11 @@ let value: ChatStreamingMessageChunk = {}; ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `role` | [models.ChatStreamingMessageChunkRole](../models/chatstreamingmessagechunkrole.md) | :heavy_minus_sign: | N/A | -| `content` | *string* | :heavy_minus_sign: | N/A | -| `reasoning` | *string* | :heavy_minus_sign: | N/A | -| `refusal` | *string* | :heavy_minus_sign: | N/A | -| `toolCalls` | [models.ChatStreamingMessageToolCall](../models/chatstreamingmessagetoolcall.md)[] | :heavy_minus_sign: | N/A | -| `reasoningDetails` | *models.Schema19*[] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `role` | [models.ChatStreamingMessageChunkRole](../models/chatstreamingmessagechunkrole.md) | :heavy_minus_sign: | The role of the message author | assistant | +| `content` | *string* | :heavy_minus_sign: | Message content delta | Hello | +| `reasoning` | *string* | :heavy_minus_sign: | Reasoning content delta | I need to | +| `refusal` | *string* | :heavy_minus_sign: | Refusal message delta | | +| `toolCalls` | [models.ChatStreamingMessageToolCall](../models/chatstreamingmessagetoolcall.md)[] | :heavy_minus_sign: | Tool calls delta | | +| `reasoningDetails` | *models.ReasoningDetailUnion*[] | :heavy_minus_sign: | Reasoning details for extended thinking models | | \ No newline at end of file diff --git a/docs/models/chatstreamingmessagechunkrole.md b/docs/models/chatstreamingmessagechunkrole.md index db525ff5..3c82f4ed 100644 --- a/docs/models/chatstreamingmessagechunkrole.md +++ b/docs/models/chatstreamingmessagechunkrole.md @@ -1,5 +1,7 @@ # ChatStreamingMessageChunkRole +The role of the message author + ## Example Usage ```typescript diff --git a/docs/models/chatstreamingmessagetoolcall.md b/docs/models/chatstreamingmessagetoolcall.md index d090a9e6..791022be 100644 --- a/docs/models/chatstreamingmessagetoolcall.md +++ b/docs/models/chatstreamingmessagetoolcall.md @@ -1,20 +1,22 @@ # ChatStreamingMessageToolCall +Tool call delta for streaming responses + ## Example Usage ```typescript import { ChatStreamingMessageToolCall } from "@openrouter/sdk/models"; let value: ChatStreamingMessageToolCall = { - index: 3974.82, + index: 0, }; ``` ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | -| `index` | *number* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_minus_sign: | N/A | -| `type` | *"function"* | :heavy_minus_sign: | N/A | -| `function` | [models.ChatStreamingMessageToolCallFunction](../models/chatstreamingmessagetoolcallfunction.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `index` | *number* | :heavy_check_mark: | Tool call index in the array | 0 | +| `id` | *string* | :heavy_minus_sign: | Tool call identifier | call_abc123 | +| `type` | [models.ChatStreamingMessageToolCallType](../models/chatstreamingmessagetoolcalltype.md) | :heavy_minus_sign: | Tool call type | function | +| `function` | [models.ChatStreamingMessageToolCallFunction](../models/chatstreamingmessagetoolcallfunction.md) | :heavy_minus_sign: | Function call details | | \ No newline at end of file diff --git a/docs/models/chatstreamingmessagetoolcallfunction.md b/docs/models/chatstreamingmessagetoolcallfunction.md index 8c28ab9f..253733e3 100644 --- a/docs/models/chatstreamingmessagetoolcallfunction.md +++ b/docs/models/chatstreamingmessagetoolcallfunction.md @@ -1,5 +1,7 @@ # ChatStreamingMessageToolCallFunction +Function call details + ## Example Usage ```typescript @@ -10,7 +12,7 @@ let value: ChatStreamingMessageToolCallFunction = {}; ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `name` | *string* | :heavy_minus_sign: | N/A | -| `arguments` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | +| `name` | *string* | :heavy_minus_sign: | Function name | get_weather | +| `arguments` | *string* | :heavy_minus_sign: | Function arguments as JSON string | {"location": | \ No newline at end of file diff --git a/docs/models/chatstreamingmessagetoolcalltype.md b/docs/models/chatstreamingmessagetoolcalltype.md new file mode 100644 index 00000000..231785a3 --- /dev/null +++ b/docs/models/chatstreamingmessagetoolcalltype.md @@ -0,0 +1,17 @@ +# ChatStreamingMessageToolCallType + +Tool call type + +## Example Usage + +```typescript +import { ChatStreamingMessageToolCallType } from "@openrouter/sdk/models"; + +let value: ChatStreamingMessageToolCallType = "function"; +``` + +## Values + +```typescript +"function" +``` \ No newline at end of file diff --git a/docs/models/chatstreamingresponsechunk.md b/docs/models/chatstreamingresponsechunk.md index 21f49bbb..ac04d77f 100644 --- a/docs/models/chatstreamingresponsechunk.md +++ b/docs/models/chatstreamingresponsechunk.md @@ -1,29 +1,36 @@ # ChatStreamingResponseChunk +Streaming chat completion chunk + ## Example Usage ```typescript import { ChatStreamingResponseChunk } from "@openrouter/sdk/models"; let value: ChatStreamingResponseChunk = { - data: { - id: "", - choices: [ - { - delta: {}, - finishReason: "error", - index: 3793.72, - }, - ], - created: 932.78, - model: "Ranchero", - object: "chat.completion.chunk", - }, + id: "chatcmpl-123", + choices: [ + { + delta: {}, + finishReason: null, + index: 0, + }, + ], + created: 1677652288, + model: "openai/gpt-4", + object: "chat.completion.chunk", }; ``` ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `data` | [models.ChatStreamingResponseChunkData](../models/chatstreamingresponsechunkdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *string* | :heavy_check_mark: | Unique chunk identifier | chatcmpl-123 | +| `choices` | [models.ChatStreamingChoice](../models/chatstreamingchoice.md)[] | :heavy_check_mark: | List of streaming chunk choices | | +| `created` | *number* | :heavy_check_mark: | Unix timestamp of creation | 1677652288 | +| `model` | *string* | :heavy_check_mark: | Model used for completion | openai/gpt-4 | +| `object` | [models.ChatStreamingResponseChunkObject](../models/chatstreamingresponsechunkobject.md) | :heavy_check_mark: | N/A | | +| `systemFingerprint` | *string* | :heavy_minus_sign: | System fingerprint | fp_44709d6fcb | +| `error` | [models.ErrorT](../models/errort.md) | :heavy_minus_sign: | Error information | {
"message": "Rate limit exceeded",
"code": 429
} | +| `usage` | [models.ChatGenerationTokenUsage](../models/chatgenerationtokenusage.md) | :heavy_minus_sign: | Token usage statistics | {
"completion_tokens": 15,
"prompt_tokens": 10,
"total_tokens": 25,
"completion_tokens_details": {
"reasoning_tokens": 5
},
"prompt_tokens_details": {
"cached_tokens": 2
}
} | \ No newline at end of file diff --git a/docs/models/chatstreamingresponsechunkdata.md b/docs/models/chatstreamingresponsechunkdata.md deleted file mode 100644 index 65dbdc79..00000000 --- a/docs/models/chatstreamingresponsechunkdata.md +++ /dev/null @@ -1,28 +0,0 @@ -# ChatStreamingResponseChunkData - -## Example Usage - -```typescript -import { ChatStreamingResponseChunkData } from "@openrouter/sdk/models"; - -let value: ChatStreamingResponseChunkData = { - id: "", - choices: [], - created: 3957.6, - model: "F-150", - object: "chat.completion.chunk", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *string* | :heavy_check_mark: | N/A | -| `choices` | [models.ChatStreamingChoice](../models/chatstreamingchoice.md)[] | :heavy_check_mark: | N/A | -| `created` | *number* | :heavy_check_mark: | N/A | -| `model` | *string* | :heavy_check_mark: | N/A | -| `object` | *"chat.completion.chunk"* | :heavy_check_mark: | N/A | -| `systemFingerprint` | *string* | :heavy_minus_sign: | N/A | -| `error` | [models.ChatStreamingResponseChunkError](../models/chatstreamingresponsechunkerror.md) | :heavy_minus_sign: | N/A | -| `usage` | [models.ChatGenerationTokenUsage](../models/chatgenerationtokenusage.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatstreamingresponsechunkerror.md b/docs/models/chatstreamingresponsechunkerror.md deleted file mode 100644 index fe862efc..00000000 --- a/docs/models/chatstreamingresponsechunkerror.md +++ /dev/null @@ -1,19 +0,0 @@ -# ChatStreamingResponseChunkError - -## Example Usage - -```typescript -import { ChatStreamingResponseChunkError } from "@openrouter/sdk/models"; - -let value: ChatStreamingResponseChunkError = { - message: "", - code: 669.92, -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `message` | *string* | :heavy_check_mark: | N/A | -| `code` | *number* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatstreamingresponsechunkobject.md b/docs/models/chatstreamingresponsechunkobject.md new file mode 100644 index 00000000..f812dd47 --- /dev/null +++ b/docs/models/chatstreamingresponsechunkobject.md @@ -0,0 +1,15 @@ +# ChatStreamingResponseChunkObject + +## Example Usage + +```typescript +import { ChatStreamingResponseChunkObject } from "@openrouter/sdk/models"; + +let value: ChatStreamingResponseChunkObject = "chat.completion.chunk"; +``` + +## Values + +```typescript +"chat.completion.chunk" +``` \ No newline at end of file diff --git a/docs/models/chatstreamoptions.md b/docs/models/chatstreamoptions.md index 09880475..0c3c5436 100644 --- a/docs/models/chatstreamoptions.md +++ b/docs/models/chatstreamoptions.md @@ -1,5 +1,7 @@ # ChatStreamOptions +Streaming configuration options + ## Example Usage ```typescript @@ -10,6 +12,6 @@ let value: ChatStreamOptions = {}; ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `includeUsage` | *boolean* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| ~~`includeUsage`~~ | *boolean* | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

Deprecated: This field has no effect. Full usage details are always included. | true | \ No newline at end of file diff --git a/docs/models/code.md b/docs/models/code.md index bf5f59b6..7a9e7e3c 100644 --- a/docs/models/code.md +++ b/docs/models/code.md @@ -1,17 +1,17 @@ # Code - -## Supported Types - -### `string` +## Example Usage ```typescript -const value: string = ""; +import { Code } from "@openrouter/sdk/models"; + +let value: Code = "server_error"; ``` -### `number` +## Values -```typescript -const value: number = 1284.03; -``` +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. +```typescript +"server_error" | "rate_limit_exceeded" | "invalid_prompt" | "vector_store_timeout" | "invalid_image" | "invalid_image_format" | "invalid_base64_image" | "invalid_image_url" | "image_too_large" | "image_too_small" | "image_parse_error" | "image_content_policy_violation" | "invalid_image_mode" | "image_file_too_large" | "unsupported_image_media_type" | "empty_image_file" | "failed_to_download_image" | "image_file_not_found" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/codeenum.md b/docs/models/codeenum.md deleted file mode 100644 index ad2c971b..00000000 --- a/docs/models/codeenum.md +++ /dev/null @@ -1,17 +0,0 @@ -# CodeEnum - -## Example Usage - -```typescript -import { CodeEnum } from "@openrouter/sdk/models"; - -let value: CodeEnum = "unsupported_image_media_type"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"server_error" | "rate_limit_exceeded" | "invalid_prompt" | "vector_store_timeout" | "invalid_image" | "invalid_image_format" | "invalid_base64_image" | "invalid_image_url" | "image_too_large" | "image_too_small" | "image_parse_error" | "image_content_policy_violation" | "invalid_image_mode" | "image_file_too_large" | "unsupported_image_media_type" | "empty_image_file" | "failed_to_download_image" | "image_file_not_found" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/completion.md b/docs/models/completion.md deleted file mode 100644 index 2abe861d..00000000 --- a/docs/models/completion.md +++ /dev/null @@ -1,23 +0,0 @@ -# Completion - - -## Supported Types - -### `number` - -```typescript -const value: number = 1284.03; -``` - -### `string` - -```typescript -const value: string = ""; -``` - -### `any` - -```typescript -const value: any = ""; -``` - diff --git a/docs/models/completiontokensdetails.md b/docs/models/completiontokensdetails.md index 3c89a927..f5a794e0 100644 --- a/docs/models/completiontokensdetails.md +++ b/docs/models/completiontokensdetails.md @@ -1,5 +1,7 @@ # CompletionTokensDetails +Detailed completion token usage + ## Example Usage ```typescript @@ -10,9 +12,9 @@ let value: CompletionTokensDetails = {}; ## Fields -| Field | Type | Required | Description | -| -------------------------- | -------------------------- | -------------------------- | -------------------------- | -| `reasoningTokens` | *number* | :heavy_minus_sign: | N/A | -| `audioTokens` | *number* | :heavy_minus_sign: | N/A | -| `acceptedPredictionTokens` | *number* | :heavy_minus_sign: | N/A | -| `rejectedPredictionTokens` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `reasoningTokens` | *number* | :heavy_minus_sign: | Tokens used for reasoning | +| `audioTokens` | *number* | :heavy_minus_sign: | Tokens used for audio output | +| `acceptedPredictionTokens` | *number* | :heavy_minus_sign: | Accepted prediction tokens | +| `rejectedPredictionTokens` | *number* | :heavy_minus_sign: | Rejected prediction tokens | \ No newline at end of file diff --git a/docs/models/modelscountresponsedata.md b/docs/models/data.md similarity index 82% rename from docs/models/modelscountresponsedata.md rename to docs/models/data.md index ac261a10..fa243aa9 100644 --- a/docs/models/modelscountresponsedata.md +++ b/docs/models/data.md @@ -1,13 +1,13 @@ -# ModelsCountResponseData +# Data Model count data ## Example Usage ```typescript -import { ModelsCountResponseData } from "@openrouter/sdk/models"; +import { Data } from "@openrouter/sdk/models"; -let value: ModelsCountResponseData = { +let value: Data = { count: 150, }; ``` diff --git a/docs/models/debug.md b/docs/models/debug.md deleted file mode 100644 index 5e67ef89..00000000 --- a/docs/models/debug.md +++ /dev/null @@ -1,15 +0,0 @@ -# Debug - -## Example Usage - -```typescript -import { Debug } from "@openrouter/sdk/models"; - -let value: Debug = {}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `echoUpstreamBody` | *boolean* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/debugoptions.md b/docs/models/debugoptions.md new file mode 100644 index 00000000..a32e2f49 --- /dev/null +++ b/docs/models/debugoptions.md @@ -0,0 +1,17 @@ +# DebugOptions + +Debug options for inspecting request transformations (streaming only) + +## Example Usage + +```typescript +import { DebugOptions } from "@openrouter/sdk/models"; + +let value: DebugOptions = {}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | +| `echoUpstreamBody` | *boolean* | :heavy_minus_sign: | If true, includes the transformed upstream request body in a debug chunk at the start of the stream. Only works with streaming mode. | true | \ No newline at end of file diff --git a/docs/models/developermessage.md b/docs/models/developermessage.md index 977250d1..b835eb71 100644 --- a/docs/models/developermessage.md +++ b/docs/models/developermessage.md @@ -1,5 +1,7 @@ # DeveloperMessage +Developer message + ## Example Usage ```typescript @@ -7,14 +9,14 @@ import { DeveloperMessage } from "@openrouter/sdk/models"; let value: DeveloperMessage = { role: "developer", - content: [], + content: "What is the capital of France?", }; ``` ## Fields -| Field | Type | Required | Description | -| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | -| `role` | *"developer"* | :heavy_check_mark: | N/A | -| `content` | *models.DeveloperMessageContent* | :heavy_check_mark: | N/A | -| `name` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `role` | *"developer"* | :heavy_check_mark: | N/A | | +| `content` | *models.DeveloperMessageContent* | :heavy_check_mark: | Developer message content | This is a message from the developer. | +| `name` | *string* | :heavy_minus_sign: | Optional name for the developer message | Developer | \ No newline at end of file diff --git a/docs/models/developermessagecontent.md b/docs/models/developermessagecontent.md index ed6f7e59..50dfe3f4 100644 --- a/docs/models/developermessagecontent.md +++ b/docs/models/developermessagecontent.md @@ -1,12 +1,14 @@ # DeveloperMessageContent +Developer message content + ## Supported Types ### `string` ```typescript -const value: string = ""; +const value: string = "This is a message from the developer."; ``` ### `models.ChatMessageContentItemText[]` diff --git a/docs/models/effort.md b/docs/models/effort.md index adcb3b7a..64797c3e 100644 --- a/docs/models/effort.md +++ b/docs/models/effort.md @@ -1,11 +1,13 @@ # Effort +Constrains effort on reasoning for reasoning models + ## Example Usage ```typescript import { Effort } from "@openrouter/sdk/models"; -let value: Effort = "high"; +let value: Effort = "medium"; ``` ## Values diff --git a/docs/models/engine.md b/docs/models/engine.md deleted file mode 100644 index 0c80b2fb..00000000 --- a/docs/models/engine.md +++ /dev/null @@ -1,17 +0,0 @@ -# Engine - -## Example Usage - -```typescript -import { Engine } from "@openrouter/sdk/models"; - -let value: Engine = "exa"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"native" | "exa" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/errors/chaterror.md b/docs/models/errors/chaterror.md deleted file mode 100644 index 192cf484..00000000 --- a/docs/models/errors/chaterror.md +++ /dev/null @@ -1,15 +0,0 @@ -# ChatError - -## Example Usage - -```typescript -import { ChatError } from "@openrouter/sdk/models/errors"; - -// No examples available for this model -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `error` | [models.ChatErrorError](../../models/chaterrorerror.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/errort.md b/docs/models/errort.md new file mode 100644 index 00000000..60ef38cc --- /dev/null +++ b/docs/models/errort.md @@ -0,0 +1,21 @@ +# ErrorT + +Error information + +## Example Usage + +```typescript +import { ErrorT } from "@openrouter/sdk/models"; + +let value: ErrorT = { + message: "Rate limit exceeded", + code: 429, +}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `message` | *string* | :heavy_check_mark: | Error message | Rate limit exceeded | +| `code` | *number* | :heavy_check_mark: | Error code | 429 | \ No newline at end of file diff --git a/docs/models/image.md b/docs/models/image.md deleted file mode 100644 index 54823863..00000000 --- a/docs/models/image.md +++ /dev/null @@ -1,19 +0,0 @@ -# Image - -## Example Usage - -```typescript -import { Image } from "@openrouter/sdk/models"; - -let value: Image = { - imageUrl: { - url: "https://inexperienced-derby.org/", - }, -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `imageUrl` | [models.AssistantMessageImageUrl](../models/assistantmessageimageurl.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jsonschemaconfig.md b/docs/models/jsonschemaconfig.md index aa34788e..04d51304 100644 --- a/docs/models/jsonschemaconfig.md +++ b/docs/models/jsonschemaconfig.md @@ -1,20 +1,22 @@ # JSONSchemaConfig +JSON Schema configuration object + ## Example Usage ```typescript import { JSONSchemaConfig } from "@openrouter/sdk/models"; let value: JSONSchemaConfig = { - name: "", + name: "math_response", }; ``` ## Fields -| Field | Type | Required | Description | -| --------------------- | --------------------- | --------------------- | --------------------- | -| `name` | *string* | :heavy_check_mark: | N/A | -| `description` | *string* | :heavy_minus_sign: | N/A | -| `schema` | Record | :heavy_minus_sign: | N/A | -| `strict` | *boolean* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `name` | *string* | :heavy_check_mark: | Schema name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) | math_response | +| `description` | *string* | :heavy_minus_sign: | Schema description for the model | A mathematical response | +| `schema` | Record | :heavy_minus_sign: | JSON Schema object | {
"type": "object",
"properties": {
"answer": {
"type": "number"
}
},
"required": [
"answer"
]
} | +| `strict` | *boolean* | :heavy_minus_sign: | Enable strict schema adherence | false | \ No newline at end of file diff --git a/docs/models/message.md b/docs/models/message.md index 32d64363..351b6c0e 100644 --- a/docs/models/message.md +++ b/docs/models/message.md @@ -1,5 +1,7 @@ # Message +Chat completion message with role-based discrimination + ## Supported Types @@ -8,7 +10,7 @@ ```typescript const value: models.SystemMessage = { role: "system", - content: [], + content: "What is the capital of France?", }; ``` @@ -17,7 +19,7 @@ const value: models.SystemMessage = { ```typescript const value: models.UserMessage = { role: "user", - content: "", + content: "What is the capital of France?", }; ``` @@ -26,7 +28,7 @@ const value: models.UserMessage = { ```typescript const value: models.DeveloperMessage = { role: "developer", - content: [], + content: "What is the capital of France?", }; ``` @@ -43,8 +45,8 @@ const value: models.AssistantMessage = { ```typescript const value: models.ToolResponseMessage = { role: "tool", - content: [], - toolCallId: "", + content: "What is the capital of France?", + toolCallId: "call_abc123", }; ``` diff --git a/docs/models/modelscountresponse.md b/docs/models/modelscountresponse.md index fb30c852..caf9b9a3 100644 --- a/docs/models/modelscountresponse.md +++ b/docs/models/modelscountresponse.md @@ -16,6 +16,6 @@ let value: ModelsCountResponse = { ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `data` | [models.ModelsCountResponseData](../models/modelscountresponsedata.md) | :heavy_check_mark: | Model count data | {
"count": 150
} | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `data` | [models.Data](../models/data.md) | :heavy_check_mark: | Model count data | {
"count": 150
} | \ No newline at end of file diff --git a/docs/models/namedtoolchoice.md b/docs/models/namedtoolchoice.md index a1fb1102..44c711f4 100644 --- a/docs/models/namedtoolchoice.md +++ b/docs/models/namedtoolchoice.md @@ -1,5 +1,7 @@ # NamedToolChoice +Named tool choice for specific function + ## Example Usage ```typescript @@ -8,7 +10,7 @@ import { NamedToolChoice } from "@openrouter/sdk/models"; let value: NamedToolChoice = { type: "function", function: { - name: "", + name: "get_weather", }, }; ``` @@ -17,5 +19,5 @@ let value: NamedToolChoice = { | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `type` | *"function"* | :heavy_check_mark: | N/A | +| `type` | [models.NamedToolChoiceType](../models/namedtoolchoicetype.md) | :heavy_check_mark: | N/A | | `function` | [models.NamedToolChoiceFunction](../models/namedtoolchoicefunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/namedtoolchoicefunction.md b/docs/models/namedtoolchoicefunction.md index bfa5de0d..46e429c0 100644 --- a/docs/models/namedtoolchoicefunction.md +++ b/docs/models/namedtoolchoicefunction.md @@ -6,12 +6,12 @@ import { NamedToolChoiceFunction } from "@openrouter/sdk/models"; let value: NamedToolChoiceFunction = { - name: "", + name: "get_weather", }; ``` ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `name` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------- | --------------------- | --------------------- | --------------------- | --------------------- | +| `name` | *string* | :heavy_check_mark: | Function name to call | get_weather | \ No newline at end of file diff --git a/docs/models/namedtoolchoicetype.md b/docs/models/namedtoolchoicetype.md new file mode 100644 index 00000000..933269bd --- /dev/null +++ b/docs/models/namedtoolchoicetype.md @@ -0,0 +1,15 @@ +# NamedToolChoiceType + +## Example Usage + +```typescript +import { NamedToolChoiceType } from "@openrouter/sdk/models"; + +let value: NamedToolChoiceType = "function"; +``` + +## Values + +```typescript +"function" +``` \ No newline at end of file diff --git a/docs/models/objectt.md b/docs/models/objectt.md deleted file mode 100644 index d614af0c..00000000 --- a/docs/models/objectt.md +++ /dev/null @@ -1,15 +0,0 @@ -# ObjectT - -## Example Usage - -```typescript -import { ObjectT } from "@openrouter/sdk/models"; - -let value: ObjectT = "response"; -``` - -## Values - -```typescript -"response" -``` \ No newline at end of file diff --git a/docs/models/openairesponsesreasoningconfig.md b/docs/models/openairesponsesreasoningconfig.md index d16880b5..1f1555f4 100644 --- a/docs/models/openairesponsesreasoningconfig.md +++ b/docs/models/openairesponsesreasoningconfig.md @@ -10,7 +10,7 @@ let value: OpenAIResponsesReasoningConfig = {}; ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `effort` | [models.OpenAIResponsesReasoningEffort](../models/openairesponsesreasoningeffort.md) | :heavy_minus_sign: | N/A | -| `summary` | [models.ReasoningSummaryVerbosity](../models/reasoningsummaryverbosity.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `effort` | [models.OpenAIResponsesReasoningEffort](../models/openairesponsesreasoningeffort.md) | :heavy_minus_sign: | N/A | | +| `summary` | [models.ReasoningSummaryVerbosity](../models/reasoningsummaryverbosity.md) | :heavy_minus_sign: | N/A | auto | \ No newline at end of file diff --git a/docs/models/openresponsesnonstreamingresponse.md b/docs/models/openresponsesnonstreamingresponse.md index 3d8dd191..7a9ff6c5 100644 --- a/docs/models/openresponsesnonstreamingresponse.md +++ b/docs/models/openresponsesnonstreamingresponse.md @@ -46,7 +46,7 @@ let value: OpenResponsesNonStreamingResponse = { | Field | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *string* | :heavy_check_mark: | N/A | | -| `object` | [models.ObjectT](../models/objectt.md) | :heavy_check_mark: | N/A | | +| `object` | [models.OpenResponsesNonStreamingResponseObject](../models/openresponsesnonstreamingresponseobject.md) | :heavy_check_mark: | N/A | | | `createdAt` | *number* | :heavy_check_mark: | N/A | | | `model` | *string* | :heavy_check_mark: | N/A | | | `status` | [models.OpenAIResponsesResponseStatus](../models/openairesponsesresponsestatus.md) | :heavy_check_mark: | N/A | | diff --git a/docs/models/openresponsesnonstreamingresponseobject.md b/docs/models/openresponsesnonstreamingresponseobject.md new file mode 100644 index 00000000..c92ea5fc --- /dev/null +++ b/docs/models/openresponsesnonstreamingresponseobject.md @@ -0,0 +1,15 @@ +# OpenResponsesNonStreamingResponseObject + +## Example Usage + +```typescript +import { OpenResponsesNonStreamingResponseObject } from "@openrouter/sdk/models"; + +let value: OpenResponsesNonStreamingResponseObject = "response"; +``` + +## Values + +```typescript +"response" +``` \ No newline at end of file diff --git a/docs/models/openresponsesreasoningconfig.md b/docs/models/openresponsesreasoningconfig.md index 04d610fe..3af97c46 100644 --- a/docs/models/openresponsesreasoningconfig.md +++ b/docs/models/openresponsesreasoningconfig.md @@ -12,9 +12,9 @@ let value: OpenResponsesReasoningConfig = {}; ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `effort` | [models.OpenAIResponsesReasoningEffort](../models/openairesponsesreasoningeffort.md) | :heavy_minus_sign: | N/A | -| `summary` | [models.ReasoningSummaryVerbosity](../models/reasoningsummaryverbosity.md) | :heavy_minus_sign: | N/A | -| `maxTokens` | *number* | :heavy_minus_sign: | N/A | -| `enabled` | *boolean* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `effort` | [models.OpenAIResponsesReasoningEffort](../models/openairesponsesreasoningeffort.md) | :heavy_minus_sign: | N/A | | +| `summary` | [models.ReasoningSummaryVerbosity](../models/reasoningsummaryverbosity.md) | :heavy_minus_sign: | N/A | auto | +| `maxTokens` | *number* | :heavy_minus_sign: | N/A | | +| `enabled` | *boolean* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/openresponsesrequest.md b/docs/models/openresponsesrequest.md index 15c30012..3134fc56 100644 --- a/docs/models/openresponsesrequest.md +++ b/docs/models/openresponsesrequest.md @@ -44,7 +44,8 @@ let value: OpenResponsesRequest = {}; | `serviceTier` | [models.ServiceTier](../models/servicetier.md) | :heavy_minus_sign: | N/A | | | `truncation` | [models.Truncation](../models/truncation.md) | :heavy_minus_sign: | N/A | auto | | `stream` | *boolean* | :heavy_minus_sign: | N/A | | -| `provider` | [models.Provider](../models/provider.md) | :heavy_minus_sign: | When multiple model providers are available, optionally indicate your routing preference. | | -| `plugins` | *models.Plugin*[] | :heavy_minus_sign: | Plugins you want to enable for this request, including their settings. | | +| `provider` | [models.OpenResponsesRequestProvider](../models/openresponsesrequestprovider.md) | :heavy_minus_sign: | When multiple model providers are available, optionally indicate your routing preference. | | +| `plugins` | *models.OpenResponsesRequestPluginUnion*[] | :heavy_minus_sign: | Plugins you want to enable for this request, including their settings. | | | `user` | *string* | :heavy_minus_sign: | A unique identifier representing your end-user, which helps distinguish between different users of your app. This allows your app to identify specific users in case of abuse reports, preventing your entire app from being affected by the actions of individual users. Maximum of 128 characters. | | -| `sessionId` | *string* | :heavy_minus_sign: | A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. | | \ No newline at end of file +| `sessionId` | *string* | :heavy_minus_sign: | A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. | | +| `trace` | [models.OpenResponsesRequestTrace](../models/openresponsesrequesttrace.md) | :heavy_minus_sign: | Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. | | \ No newline at end of file diff --git a/docs/models/openresponsesrequestpluginautorouter.md b/docs/models/openresponsesrequestpluginautorouter.md new file mode 100644 index 00000000..9d4f2c14 --- /dev/null +++ b/docs/models/openresponsesrequestpluginautorouter.md @@ -0,0 +1,19 @@ +# OpenResponsesRequestPluginAutoRouter + +## Example Usage + +```typescript +import { OpenResponsesRequestPluginAutoRouter } from "@openrouter/sdk/models"; + +let value: OpenResponsesRequestPluginAutoRouter = { + id: "auto-router", +}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *"auto-router"* | :heavy_check_mark: | N/A | | +| `enabled` | *boolean* | :heavy_minus_sign: | Set to false to disable the auto-router plugin for this request. Defaults to true. | | +| `allowedModels` | *string*[] | :heavy_minus_sign: | List of model patterns to filter which models the auto-router can route between. Supports wildcards (e.g., "anthropic/*" matches all Anthropic models). When not specified, uses the default supported models list. | [
"anthropic/*",
"openai/gpt-4o",
"google/*"
] | \ No newline at end of file diff --git a/docs/models/openresponsesrequestpluginfileparser.md b/docs/models/openresponsesrequestpluginfileparser.md new file mode 100644 index 00000000..759a754b --- /dev/null +++ b/docs/models/openresponsesrequestpluginfileparser.md @@ -0,0 +1,19 @@ +# OpenResponsesRequestPluginFileParser + +## Example Usage + +```typescript +import { OpenResponsesRequestPluginFileParser } from "@openrouter/sdk/models"; + +let value: OpenResponsesRequestPluginFileParser = { + id: "file-parser", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `id` | *"file-parser"* | :heavy_check_mark: | N/A | +| `enabled` | *boolean* | :heavy_minus_sign: | Set to false to disable the file-parser plugin for this request. Defaults to true. | +| `pdf` | [models.PDFParserOptions](../models/pdfparseroptions.md) | :heavy_minus_sign: | Options for PDF parsing. | \ No newline at end of file diff --git a/docs/models/pluginmoderation.md b/docs/models/openresponsesrequestpluginmoderation.md similarity index 66% rename from docs/models/pluginmoderation.md rename to docs/models/openresponsesrequestpluginmoderation.md index 69a9dba0..f0117c5f 100644 --- a/docs/models/pluginmoderation.md +++ b/docs/models/openresponsesrequestpluginmoderation.md @@ -1,11 +1,11 @@ -# PluginModeration +# OpenResponsesRequestPluginModeration ## Example Usage ```typescript -import { PluginModeration } from "@openrouter/sdk/models"; +import { OpenResponsesRequestPluginModeration } from "@openrouter/sdk/models"; -let value: PluginModeration = { +let value: OpenResponsesRequestPluginModeration = { id: "moderation", }; ``` diff --git a/docs/models/openresponsesrequestpluginresponsehealing.md b/docs/models/openresponsesrequestpluginresponsehealing.md new file mode 100644 index 00000000..91584751 --- /dev/null +++ b/docs/models/openresponsesrequestpluginresponsehealing.md @@ -0,0 +1,18 @@ +# OpenResponsesRequestPluginResponseHealing + +## Example Usage + +```typescript +import { OpenResponsesRequestPluginResponseHealing } from "@openrouter/sdk/models"; + +let value: OpenResponsesRequestPluginResponseHealing = { + id: "response-healing", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `id` | *"response-healing"* | :heavy_check_mark: | N/A | +| `enabled` | *boolean* | :heavy_minus_sign: | Set to false to disable the response-healing plugin for this request. Defaults to true. | \ No newline at end of file diff --git a/docs/models/openresponsesrequestpluginunion.md b/docs/models/openresponsesrequestpluginunion.md new file mode 100644 index 00000000..ebf50062 --- /dev/null +++ b/docs/models/openresponsesrequestpluginunion.md @@ -0,0 +1,45 @@ +# OpenResponsesRequestPluginUnion + + +## Supported Types + +### `models.OpenResponsesRequestPluginAutoRouter` + +```typescript +const value: models.OpenResponsesRequestPluginAutoRouter = { + id: "auto-router", +}; +``` + +### `models.OpenResponsesRequestPluginModeration` + +```typescript +const value: models.OpenResponsesRequestPluginModeration = { + id: "moderation", +}; +``` + +### `models.OpenResponsesRequestPluginWeb` + +```typescript +const value: models.OpenResponsesRequestPluginWeb = { + id: "web", +}; +``` + +### `models.OpenResponsesRequestPluginFileParser` + +```typescript +const value: models.OpenResponsesRequestPluginFileParser = { + id: "file-parser", +}; +``` + +### `models.OpenResponsesRequestPluginResponseHealing` + +```typescript +const value: models.OpenResponsesRequestPluginResponseHealing = { + id: "response-healing", +}; +``` + diff --git a/docs/models/openresponsesrequestpluginweb.md b/docs/models/openresponsesrequestpluginweb.md new file mode 100644 index 00000000..333dc3c7 --- /dev/null +++ b/docs/models/openresponsesrequestpluginweb.md @@ -0,0 +1,21 @@ +# OpenResponsesRequestPluginWeb + +## Example Usage + +```typescript +import { OpenResponsesRequestPluginWeb } from "@openrouter/sdk/models"; + +let value: OpenResponsesRequestPluginWeb = { + id: "web", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `id` | *"web"* | :heavy_check_mark: | N/A | +| `enabled` | *boolean* | :heavy_minus_sign: | Set to false to disable the web-search plugin for this request. Defaults to true. | +| `maxResults` | *number* | :heavy_minus_sign: | N/A | +| `searchPrompt` | *string* | :heavy_minus_sign: | N/A | +| `engine` | [models.WebSearchEngine](../models/websearchengine.md) | :heavy_minus_sign: | The search engine to use for web search. | \ No newline at end of file diff --git a/docs/models/provider.md b/docs/models/openresponsesrequestprovider.md similarity index 99% rename from docs/models/provider.md rename to docs/models/openresponsesrequestprovider.md index 999451dd..b66f084e 100644 --- a/docs/models/provider.md +++ b/docs/models/openresponsesrequestprovider.md @@ -1,13 +1,13 @@ -# Provider +# OpenResponsesRequestProvider When multiple model providers are available, optionally indicate your routing preference. ## Example Usage ```typescript -import { Provider } from "@openrouter/sdk/models"; +import { OpenResponsesRequestProvider } from "@openrouter/sdk/models"; -let value: Provider = {}; +let value: OpenResponsesRequestProvider = {}; ``` ## Fields diff --git a/docs/models/openresponsesrequesttrace.md b/docs/models/openresponsesrequesttrace.md new file mode 100644 index 00000000..35fe5c2c --- /dev/null +++ b/docs/models/openresponsesrequesttrace.md @@ -0,0 +1,22 @@ +# OpenResponsesRequestTrace + +Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + +## Example Usage + +```typescript +import { OpenResponsesRequestTrace } from "@openrouter/sdk/models"; + +let value: OpenResponsesRequestTrace = {}; +``` + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `traceId` | *string* | :heavy_minus_sign: | N/A | +| `traceName` | *string* | :heavy_minus_sign: | N/A | +| `spanName` | *string* | :heavy_minus_sign: | N/A | +| `generationName` | *string* | :heavy_minus_sign: | N/A | +| `parentSpanId` | *string* | :heavy_minus_sign: | N/A | +| `additionalProperties` | Record | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createauthkeyscoderequestbody.md b/docs/models/operations/createauthkeyscoderequestbody.md index dbb3cde4..9e51f134 100644 --- a/docs/models/operations/createauthkeyscoderequestbody.md +++ b/docs/models/operations/createauthkeyscoderequestbody.md @@ -18,4 +18,6 @@ let value: CreateAuthKeysCodeRequestBody = { | `codeChallenge` | *string* | :heavy_minus_sign: | PKCE code challenge for enhanced security | E9Melhoa2OwvFrEMTJguCHaoeK1t8URWbuGJSstw-cM | | `codeChallengeMethod` | [operations.CreateAuthKeysCodeCodeChallengeMethod](../../models/operations/createauthkeyscodecodechallengemethod.md) | :heavy_minus_sign: | The method used to generate the code challenge | S256 | | `limit` | *number* | :heavy_minus_sign: | Credit limit for the API key to be created | 100 | -| `expiresAt` | [Date](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date) | :heavy_minus_sign: | Optional expiration time for the API key to be created | | \ No newline at end of file +| `expiresAt` | [Date](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date) | :heavy_minus_sign: | Optional expiration time for the API key to be created | | +| `keyLabel` | *string* | :heavy_minus_sign: | Optional custom label for the API key. Defaults to the app name if not provided. | My Custom Key | +| `usageLimitType` | [operations.UsageLimitType](../../models/operations/usagelimittype.md) | :heavy_minus_sign: | Optional credit limit reset interval. When set, the credit limit resets on this interval. | monthly | \ No newline at end of file diff --git a/docs/models/operations/providername.md b/docs/models/operations/providername.md index 0bf0432a..b2bfaaf5 100644 --- a/docs/models/operations/providername.md +++ b/docs/models/operations/providername.md @@ -13,5 +13,5 @@ let value: ProviderName = "Lambda"; This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. ```typescript -"AnyScale" | "Atoma" | "Cent-ML" | "CrofAI" | "Enfer" | "GoPomelo" | "HuggingFace" | "Hyperbolic 2" | "InoCloud" | "Kluster" | "Lambda" | "Lepton" | "Lynn 2" | "Lynn" | "Mancer" | "Meta" | "Modal" | "Nineteen" | "OctoAI" | "Recursal" | "Reflection" | "Replicate" | "SambaNova 2" | "SF Compute" | "Targon" | "Together 2" | "Ubicloud" | "01.AI" | "AI21" | "AionLabs" | "Alibaba" | "Ambient" | "Amazon Bedrock" | "Amazon Nova" | "Anthropic" | "Arcee AI" | "AtlasCloud" | "Avian" | "Azure" | "BaseTen" | "BytePlus" | "Black Forest Labs" | "Cerebras" | "Chutes" | "Cirrascale" | "Clarifai" | "Cloudflare" | "Cohere" | "Crusoe" | "DeepInfra" | "DeepSeek" | "Featherless" | "Fireworks" | "Friendli" | "GMICloud" | "Google" | "Google AI Studio" | "Groq" | "Hyperbolic" | "Inception" | "Inceptron" | "InferenceNet" | "Infermatic" | "Inflection" | "Liquid" | "Mara" | "Mancer 2" | "Minimax" | "ModelRun" | "Mistral" | "Modular" | "Moonshot AI" | "Morph" | "NCompass" | "Nebius" | "NextBit" | "Novita" | "Nvidia" | "OpenAI" | "OpenInference" | "Parasail" | "Perplexity" | "Phala" | "Relace" | "SambaNova" | "Seed" | "SiliconFlow" | "Sourceful" | "StepFun" | "Stealth" | "StreamLake" | "Switchpoint" | "Together" | "Upstage" | "Venice" | "WandB" | "Xiaomi" | "xAI" | "Z.AI" | "FakeProvider" | Unrecognized +"AnyScale" | "Atoma" | "Cent-ML" | "CrofAI" | "Enfer" | "GoPomelo" | "HuggingFace" | "Hyperbolic 2" | "InoCloud" | "Kluster" | "Lambda" | "Lepton" | "Lynn 2" | "Lynn" | "Mancer" | "Meta" | "Modal" | "Nineteen" | "OctoAI" | "Recursal" | "Reflection" | "Replicate" | "SambaNova 2" | "SF Compute" | "Targon" | "Together 2" | "Ubicloud" | "01.AI" | "AI21" | "AionLabs" | "Alibaba" | "Ambient" | "Amazon Bedrock" | "Amazon Nova" | "Anthropic" | "Arcee AI" | "AtlasCloud" | "Avian" | "Azure" | "BaseTen" | "BytePlus" | "Black Forest Labs" | "Cerebras" | "Chutes" | "Cirrascale" | "Clarifai" | "Cloudflare" | "Cohere" | "Crusoe" | "DeepInfra" | "DeepSeek" | "Featherless" | "Fireworks" | "Friendli" | "GMICloud" | "Google" | "Google AI Studio" | "Groq" | "Hyperbolic" | "Inception" | "Inceptron" | "InferenceNet" | "Infermatic" | "Io Net" | "Inflection" | "Liquid" | "Mara" | "Mancer 2" | "Minimax" | "ModelRun" | "Mistral" | "Modular" | "Moonshot AI" | "Morph" | "NCompass" | "Nebius" | "NextBit" | "Novita" | "Nvidia" | "OpenAI" | "OpenInference" | "Parasail" | "Perplexity" | "Phala" | "Relace" | "SambaNova" | "Seed" | "SiliconFlow" | "Sourceful" | "StepFun" | "Stealth" | "StreamLake" | "Switchpoint" | "Together" | "Upstage" | "Venice" | "WandB" | "Xiaomi" | "xAI" | "Z.AI" | "FakeProvider" | Unrecognized ``` \ No newline at end of file diff --git a/docs/models/operations/sendchatcompletionrequestrequest.md b/docs/models/operations/sendchatcompletionrequestrequest.md index 095d1e16..2f37d1c2 100644 --- a/docs/models/operations/sendchatcompletionrequestrequest.md +++ b/docs/models/operations/sendchatcompletionrequestrequest.md @@ -8,19 +8,24 @@ import { SendChatCompletionRequestRequest } from "@openrouter/sdk/models/operati let value: SendChatCompletionRequestRequest = { chatGenerationParams: { messages: [ + { + role: "system", + content: "You are a helpful assistant.", + }, { role: "user", - content: "", + content: "What is the capital of France?", }, ], + temperature: 0.7, }, }; ``` ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | -| `httpReferer` | *string* | :heavy_minus_sign: | The app identifier should be your app's URL and is used as the primary identifier for rankings.
This is used to track API usage per application.
| -| `xTitle` | *string* | :heavy_minus_sign: | The app display name allows you to customize how your app appears in OpenRouter's dashboard.
| -| `chatGenerationParams` | [models.ChatGenerationParams](../../models/chatgenerationparams.md) | :heavy_check_mark: | Chat completion request parameters | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `httpReferer` | *string* | :heavy_minus_sign: | The app identifier should be your app's URL and is used as the primary identifier for rankings.
This is used to track API usage per application.
| | +| `xTitle` | *string* | :heavy_minus_sign: | The app display name allows you to customize how your app appears in OpenRouter's dashboard.
| | +| `chatGenerationParams` | [models.ChatGenerationParams](../../models/chatgenerationparams.md) | :heavy_check_mark: | N/A | {
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "What is the capital of France?"
}
],
"model": "openai/gpt-4",
"temperature": 0.7,
"max_tokens": 150
} | \ No newline at end of file diff --git a/docs/models/operations/sendchatcompletionrequestresponse.md b/docs/models/operations/sendchatcompletionrequestresponse.md index 0f0bc60a..4a5ce2ef 100644 --- a/docs/models/operations/sendchatcompletionrequestresponse.md +++ b/docs/models/operations/sendchatcompletionrequestresponse.md @@ -7,13 +7,21 @@ ```typescript const value: models.ChatResponse = { - id: "", - choices: [], - created: 9184.01, - model: "Focus", + id: "chatcmpl-123", + choices: [ + { + finishReason: "stop", + index: 0, + message: { + role: "assistant", + }, + }, + ], + created: 1677652288, + model: "openai/gpt-4", object: "chat.completion", }; ``` -### `EventStream` +### `EventStream` diff --git a/docs/models/operations/sendchatcompletionrequestresponsebody.md b/docs/models/operations/sendchatcompletionrequestresponsebody.md new file mode 100644 index 00000000..2143a66a --- /dev/null +++ b/docs/models/operations/sendchatcompletionrequestresponsebody.md @@ -0,0 +1,31 @@ +# SendChatCompletionRequestResponseBody + +Successful chat completion response + +## Example Usage + +```typescript +import { SendChatCompletionRequestResponseBody } from "@openrouter/sdk/models/operations"; + +let value: SendChatCompletionRequestResponseBody = { + data: { + id: "chatcmpl-123", + choices: [ + { + delta: {}, + finishReason: null, + index: 0, + }, + ], + created: 1677652288, + model: "openai/gpt-4", + object: "chat.completion.chunk", + }, +}; +``` + +## Fields + +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `data` | [models.ChatStreamingResponseChunk](../../models/chatstreamingresponsechunk.md) | :heavy_check_mark: | Streaming chat completion chunk | {
"id": "chatcmpl-123",
"object": "chat.completion.chunk",
"created": 1677652288,
"model": "openai/gpt-4",
"choices": [
{
"index": 0,
"delta": {
"role": "assistant",
"content": "Hello"
},
"finish_reason": null
}
]
} | \ No newline at end of file diff --git a/docs/models/operations/usagelimittype.md b/docs/models/operations/usagelimittype.md new file mode 100644 index 00000000..d408cad7 --- /dev/null +++ b/docs/models/operations/usagelimittype.md @@ -0,0 +1,19 @@ +# UsageLimitType + +Optional credit limit reset interval. When set, the credit limit resets on this interval. + +## Example Usage + +```typescript +import { UsageLimitType } from "@openrouter/sdk/models/operations"; + +let value: UsageLimitType = "monthly"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"daily" | "weekly" | "monthly" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/partition.md b/docs/models/partition.md index d95933ac..f2b9277d 100644 --- a/docs/models/partition.md +++ b/docs/models/partition.md @@ -1,11 +1,13 @@ # Partition +Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + ## Example Usage ```typescript import { Partition } from "@openrouter/sdk/models"; -let value: Partition = "none"; +let value: Partition = "model"; ``` ## Values diff --git a/docs/models/pdf.md b/docs/models/pdf.md deleted file mode 100644 index 67850913..00000000 --- a/docs/models/pdf.md +++ /dev/null @@ -1,15 +0,0 @@ -# Pdf - -## Example Usage - -```typescript -import { Pdf } from "@openrouter/sdk/models"; - -let value: Pdf = {}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `engine` | [models.PdfEngine](../models/pdfengine.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/pdfengine.md b/docs/models/pdfengine.md deleted file mode 100644 index 5781ce26..00000000 --- a/docs/models/pdfengine.md +++ /dev/null @@ -1,17 +0,0 @@ -# PdfEngine - -## Example Usage - -```typescript -import { PdfEngine } from "@openrouter/sdk/models"; - -let value: PdfEngine = "pdf-text"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"mistral-ocr" | "pdf-text" | "native" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/plugin.md b/docs/models/plugin.md deleted file mode 100644 index 632bac23..00000000 --- a/docs/models/plugin.md +++ /dev/null @@ -1,45 +0,0 @@ -# Plugin - - -## Supported Types - -### `models.PluginAutoRouter` - -```typescript -const value: models.PluginAutoRouter = { - id: "auto-router", -}; -``` - -### `models.PluginModeration` - -```typescript -const value: models.PluginModeration = { - id: "moderation", -}; -``` - -### `models.PluginWeb` - -```typescript -const value: models.PluginWeb = { - id: "web", -}; -``` - -### `models.PluginFileParser` - -```typescript -const value: models.PluginFileParser = { - id: "file-parser", -}; -``` - -### `models.PluginResponseHealing` - -```typescript -const value: models.PluginResponseHealing = { - id: "response-healing", -}; -``` - diff --git a/docs/models/prompt.md b/docs/models/prompt.md deleted file mode 100644 index 9b0ca48a..00000000 --- a/docs/models/prompt.md +++ /dev/null @@ -1,23 +0,0 @@ -# Prompt - - -## Supported Types - -### `number` - -```typescript -const value: number = 1284.03; -``` - -### `string` - -```typescript -const value: string = ""; -``` - -### `any` - -```typescript -const value: any = ""; -``` - diff --git a/docs/models/prompttokensdetails.md b/docs/models/prompttokensdetails.md index 6b96206e..ccc55e39 100644 --- a/docs/models/prompttokensdetails.md +++ b/docs/models/prompttokensdetails.md @@ -1,5 +1,7 @@ # PromptTokensDetails +Detailed prompt token usage + ## Example Usage ```typescript @@ -10,9 +12,9 @@ let value: PromptTokensDetails = {}; ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `cachedTokens` | *number* | :heavy_minus_sign: | N/A | -| `cacheWriteTokens` | *number* | :heavy_minus_sign: | N/A | -| `audioTokens` | *number* | :heavy_minus_sign: | N/A | -| `videoTokens` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `cachedTokens` | *number* | :heavy_minus_sign: | Cached prompt tokens | +| `cacheWriteTokens` | *number* | :heavy_minus_sign: | Tokens written to cache. Only returned for models with explicit caching and cache write pricing. | +| `audioTokens` | *number* | :heavy_minus_sign: | Audio input tokens | +| `videoTokens` | *number* | :heavy_minus_sign: | Video input tokens | \ No newline at end of file diff --git a/docs/models/providername.md b/docs/models/providername.md index 0483a53e..0e34e26b 100644 --- a/docs/models/providername.md +++ b/docs/models/providername.md @@ -13,5 +13,5 @@ let value: ProviderName = "OpenAI"; This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. ```typescript -"AI21" | "AionLabs" | "Alibaba" | "Ambient" | "Amazon Bedrock" | "Amazon Nova" | "Anthropic" | "Arcee AI" | "AtlasCloud" | "Avian" | "Azure" | "BaseTen" | "BytePlus" | "Black Forest Labs" | "Cerebras" | "Chutes" | "Cirrascale" | "Clarifai" | "Cloudflare" | "Cohere" | "Crusoe" | "DeepInfra" | "DeepSeek" | "Featherless" | "Fireworks" | "Friendli" | "GMICloud" | "Google" | "Google AI Studio" | "Groq" | "Hyperbolic" | "Inception" | "Inceptron" | "InferenceNet" | "Infermatic" | "Inflection" | "Liquid" | "Mara" | "Mancer 2" | "Minimax" | "ModelRun" | "Mistral" | "Modular" | "Moonshot AI" | "Morph" | "NCompass" | "Nebius" | "NextBit" | "Novita" | "Nvidia" | "OpenAI" | "OpenInference" | "Parasail" | "Perplexity" | "Phala" | "Relace" | "SambaNova" | "Seed" | "SiliconFlow" | "Sourceful" | "StepFun" | "Stealth" | "StreamLake" | "Switchpoint" | "Together" | "Upstage" | "Venice" | "WandB" | "Xiaomi" | "xAI" | "Z.AI" | "FakeProvider" | Unrecognized +"AI21" | "AionLabs" | "Alibaba" | "Ambient" | "Amazon Bedrock" | "Amazon Nova" | "Anthropic" | "Arcee AI" | "AtlasCloud" | "Avian" | "Azure" | "BaseTen" | "BytePlus" | "Black Forest Labs" | "Cerebras" | "Chutes" | "Cirrascale" | "Clarifai" | "Cloudflare" | "Cohere" | "Crusoe" | "DeepInfra" | "DeepSeek" | "Featherless" | "Fireworks" | "Friendli" | "GMICloud" | "Google" | "Google AI Studio" | "Groq" | "Hyperbolic" | "Inception" | "Inceptron" | "InferenceNet" | "Infermatic" | "Io Net" | "Inflection" | "Liquid" | "Mara" | "Mancer 2" | "Minimax" | "ModelRun" | "Mistral" | "Modular" | "Moonshot AI" | "Morph" | "NCompass" | "Nebius" | "NextBit" | "Novita" | "Nvidia" | "OpenAI" | "OpenInference" | "Parasail" | "Perplexity" | "Phala" | "Relace" | "SambaNova" | "Seed" | "SiliconFlow" | "Sourceful" | "StepFun" | "Stealth" | "StreamLake" | "Switchpoint" | "Together" | "Upstage" | "Venice" | "WandB" | "Xiaomi" | "xAI" | "Z.AI" | "FakeProvider" | Unrecognized ``` \ No newline at end of file diff --git a/docs/models/providerpreferences.md b/docs/models/providerpreferences.md index 029880ce..306c9db4 100644 --- a/docs/models/providerpreferences.md +++ b/docs/models/providerpreferences.md @@ -23,7 +23,7 @@ let value: ProviderPreferences = {}; | `only` | *models.ProviderPreferencesOnly*[] | :heavy_minus_sign: | List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. | | | `ignore` | *models.ProviderPreferencesIgnore*[] | :heavy_minus_sign: | List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. | | | `quantizations` | [models.Quantization](../models/quantization.md)[] | :heavy_minus_sign: | A list of quantization levels to filter the provider by. | | -| `sort` | *models.ProviderPreferencesSortUnion* | :heavy_minus_sign: | N/A | | +| `sort` | *models.ProviderPreferencesSortUnion* | :heavy_minus_sign: | N/A | price | | `maxPrice` | [models.ProviderPreferencesMaxPrice](../models/providerpreferencesmaxprice.md) | :heavy_minus_sign: | The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. | | | `preferredMinThroughput` | *models.PreferredMinThroughput* | :heavy_minus_sign: | Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. | 100 | | `preferredMaxLatency` | *models.PreferredMaxLatency* | :heavy_minus_sign: | Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. | 5 | \ No newline at end of file diff --git a/docs/models/providerpreferencesby.md b/docs/models/providerpreferencesby.md new file mode 100644 index 00000000..3fe7a639 --- /dev/null +++ b/docs/models/providerpreferencesby.md @@ -0,0 +1,19 @@ +# ProviderPreferencesBy + +The provider sorting strategy (price, throughput, latency) + +## Example Usage + +```typescript +import { ProviderPreferencesBy } from "@openrouter/sdk/models"; + +let value: ProviderPreferencesBy = "price"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"price" | "throughput" | "latency" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/providerpreferencespartition.md b/docs/models/providerpreferencespartition.md index b47e3e07..20b636f3 100644 --- a/docs/models/providerpreferencespartition.md +++ b/docs/models/providerpreferencespartition.md @@ -1,5 +1,7 @@ # ProviderPreferencesPartition +Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + ## Example Usage ```typescript diff --git a/docs/models/providerpreferencesprovidersort.md b/docs/models/providerpreferencesprovidersort.md index cc1cd8a3..9b947c3e 100644 --- a/docs/models/providerpreferencesprovidersort.md +++ b/docs/models/providerpreferencesprovidersort.md @@ -1,11 +1,13 @@ # ProviderPreferencesProviderSort +The provider sorting strategy (price, throughput, latency) + ## Example Usage ```typescript import { ProviderPreferencesProviderSort } from "@openrouter/sdk/models"; -let value: ProviderPreferencesProviderSort = "latency"; +let value: ProviderPreferencesProviderSort = "price"; ``` ## Values diff --git a/docs/models/providerpreferencesprovidersortconfig.md b/docs/models/providerpreferencesprovidersortconfig.md index 8387bc41..5fe0b816 100644 --- a/docs/models/providerpreferencesprovidersortconfig.md +++ b/docs/models/providerpreferencesprovidersortconfig.md @@ -10,7 +10,7 @@ let value: ProviderPreferencesProviderSortConfig = {}; ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `by` | [models.ProviderSort](../models/providersort.md) | :heavy_minus_sign: | N/A | price | -| `partition` | [models.ProviderPreferencesPartition](../models/providerpreferencespartition.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `by` | [models.ProviderPreferencesBy](../models/providerpreferencesby.md) | :heavy_minus_sign: | The provider sorting strategy (price, throughput, latency) | price | +| `partition` | [models.ProviderPreferencesPartition](../models/providerpreferencespartition.md) | :heavy_minus_sign: | Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. | model | \ No newline at end of file diff --git a/docs/models/providerpreferencesprovidersortconfigenum.md b/docs/models/providerpreferencesprovidersortconfigenum.md new file mode 100644 index 00000000..1fefe429 --- /dev/null +++ b/docs/models/providerpreferencesprovidersortconfigenum.md @@ -0,0 +1,15 @@ +# ProviderPreferencesProviderSortConfigEnum + +## Example Usage + +```typescript +import { ProviderPreferencesProviderSortConfigEnum } from "@openrouter/sdk/models"; + +let value: ProviderPreferencesProviderSortConfigEnum = "price"; +``` + +## Values + +```typescript +"price" | "throughput" | "latency" +``` \ No newline at end of file diff --git a/docs/models/providersortconfigunion.md b/docs/models/providerpreferencesprovidersortconfigunion.md similarity index 50% rename from docs/models/providersortconfigunion.md rename to docs/models/providerpreferencesprovidersortconfigunion.md index bcd482db..1acd3e02 100644 --- a/docs/models/providersortconfigunion.md +++ b/docs/models/providerpreferencesprovidersortconfigunion.md @@ -1,4 +1,4 @@ -# ProviderSortConfigUnion +# ProviderPreferencesProviderSortConfigUnion ## Supported Types @@ -9,9 +9,9 @@ const value: models.ProviderPreferencesProviderSortConfig = {}; ``` -### `models.ProviderSortConfigEnum` +### `models.ProviderPreferencesProviderSortConfigEnum` ```typescript -const value: models.ProviderSortConfigEnum = "throughput"; +const value: models.ProviderPreferencesProviderSortConfigEnum = "latency"; ``` diff --git a/docs/models/providerpreferencessortenum.md b/docs/models/providerpreferencessortenum.md new file mode 100644 index 00000000..3feec340 --- /dev/null +++ b/docs/models/providerpreferencessortenum.md @@ -0,0 +1,17 @@ +# ProviderPreferencesSortEnum + +## Example Usage + +```typescript +import { ProviderPreferencesSortEnum } from "@openrouter/sdk/models"; + +let value: ProviderPreferencesSortEnum = "price"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"price" | "throughput" | "latency" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/providerpreferencessortunion.md b/docs/models/providerpreferencessortunion.md index e75e3fd7..0532bfa0 100644 --- a/docs/models/providerpreferencessortunion.md +++ b/docs/models/providerpreferencessortunion.md @@ -8,18 +8,18 @@ The sorting strategy to use for this request, if "order" is not specified. When ### `models.ProviderPreferencesProviderSort` ```typescript -const value: models.ProviderPreferencesProviderSort = "throughput"; +const value: models.ProviderPreferencesProviderSort = "price"; ``` -### `models.ProviderSortConfigUnion` +### `models.ProviderPreferencesProviderSortConfigUnion` ```typescript -const value: models.ProviderSortConfigUnion = "latency"; +const value: models.ProviderPreferencesProviderSortConfigUnion = "throughput"; ``` -### `models.SortEnum` +### `models.ProviderPreferencesSortEnum` ```typescript -const value: models.SortEnum = "throughput"; +const value: models.ProviderPreferencesSortEnum = "price"; ``` diff --git a/docs/models/providersort.md b/docs/models/providersort.md index c4f9aa4a..33c90282 100644 --- a/docs/models/providersort.md +++ b/docs/models/providersort.md @@ -1,5 +1,7 @@ # ProviderSort +The provider sorting strategy (price, throughput, latency) + ## Example Usage ```typescript diff --git a/docs/models/providersortconfig.md b/docs/models/providersortconfig.md index 217c157d..4b676349 100644 --- a/docs/models/providersortconfig.md +++ b/docs/models/providersortconfig.md @@ -1,5 +1,7 @@ # ProviderSortConfig +The provider sorting strategy (price, throughput, latency) + ## Example Usage ```typescript @@ -10,7 +12,7 @@ let value: ProviderSortConfig = {}; ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | -| `by` | [models.ProviderSort](../models/providersort.md) | :heavy_minus_sign: | N/A | price | -| `partition` | [models.Partition](../models/partition.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `by` | [models.By](../models/by.md) | :heavy_minus_sign: | The provider sorting strategy (price, throughput, latency) | price | +| `partition` | [models.Partition](../models/partition.md) | :heavy_minus_sign: | Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. | model | \ No newline at end of file diff --git a/docs/models/providersortconfigenum.md b/docs/models/providersortconfigenum.md deleted file mode 100644 index 61ad06a2..00000000 --- a/docs/models/providersortconfigenum.md +++ /dev/null @@ -1,15 +0,0 @@ -# ProviderSortConfigEnum - -## Example Usage - -```typescript -import { ProviderSortConfigEnum } from "@openrouter/sdk/models"; - -let value: ProviderSortConfigEnum = "throughput"; -``` - -## Values - -```typescript -"price" | "throughput" | "latency" -``` \ No newline at end of file diff --git a/docs/models/providersortunion.md b/docs/models/providersortunion.md deleted file mode 100644 index 1a6cae19..00000000 --- a/docs/models/providersortunion.md +++ /dev/null @@ -1,17 +0,0 @@ -# ProviderSortUnion - - -## Supported Types - -### `models.ProviderSort` - -```typescript -const value: models.ProviderSort = "price"; -``` - -### `models.ProviderSortConfig` - -```typescript -const value: models.ProviderSortConfig = {}; -``` - diff --git a/docs/models/reasoning.md b/docs/models/reasoning.md index f62c63cc..f595b98f 100644 --- a/docs/models/reasoning.md +++ b/docs/models/reasoning.md @@ -1,5 +1,7 @@ # Reasoning +Configuration options for reasoning models + ## Example Usage ```typescript @@ -10,7 +12,7 @@ let value: Reasoning = {}; ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `effort` | [models.Effort](../models/effort.md) | :heavy_minus_sign: | N/A | -| `summary` | [models.ReasoningSummaryVerbosity](../models/reasoningsummaryverbosity.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------- | --------------------------------------------------- | --------------------------------------------------- | --------------------------------------------------- | --------------------------------------------------- | +| `effort` | [models.Effort](../models/effort.md) | :heavy_minus_sign: | Constrains effort on reasoning for reasoning models | medium | +| `summary` | *any* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/reasoningdetailencrypted.md b/docs/models/reasoningdetailencrypted.md new file mode 100644 index 00000000..42f226cb --- /dev/null +++ b/docs/models/reasoningdetailencrypted.md @@ -0,0 +1,24 @@ +# ReasoningDetailEncrypted + +Reasoning detail encrypted schema + +## Example Usage + +```typescript +import { ReasoningDetailEncrypted } from "@openrouter/sdk/models"; + +let value: ReasoningDetailEncrypted = { + type: "reasoning.encrypted", + data: "", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `type` | *"reasoning.encrypted"* | :heavy_check_mark: | N/A | +| `data` | *string* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_minus_sign: | N/A | +| `format` | [models.ReasoningDetailEncryptedFormat](../models/reasoningdetailencryptedformat.md) | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/reasoningdetailencryptedformat.md b/docs/models/reasoningdetailencryptedformat.md new file mode 100644 index 00000000..dee604b0 --- /dev/null +++ b/docs/models/reasoningdetailencryptedformat.md @@ -0,0 +1,17 @@ +# ReasoningDetailEncryptedFormat + +## Example Usage + +```typescript +import { ReasoningDetailEncryptedFormat } from "@openrouter/sdk/models"; + +let value: ReasoningDetailEncryptedFormat = "xai-responses-v1"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"unknown" | "openai-responses-v1" | "azure-openai-responses-v1" | "xai-responses-v1" | "anthropic-claude-v1" | "google-gemini-v1" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/reasoningdetailsummary.md b/docs/models/reasoningdetailsummary.md new file mode 100644 index 00000000..85c377e2 --- /dev/null +++ b/docs/models/reasoningdetailsummary.md @@ -0,0 +1,25 @@ +# ReasoningDetailSummary + +Reasoning detail summary schema + +## Example Usage + +```typescript +import { ReasoningDetailSummary } from "@openrouter/sdk/models"; + +let value: ReasoningDetailSummary = { + type: "reasoning.summary", + summary: + "The model analyzed the problem by first identifying key constraints, then evaluating possible solutions...", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *"reasoning.summary"* | :heavy_check_mark: | N/A | +| `summary` | *string* | :heavy_check_mark: | N/A | +| `id` | *string* | :heavy_minus_sign: | N/A | +| `format` | [models.ReasoningDetailSummaryFormat](../models/reasoningdetailsummaryformat.md) | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema21.md b/docs/models/reasoningdetailsummaryformat.md similarity index 67% rename from docs/models/schema21.md rename to docs/models/reasoningdetailsummaryformat.md index 7cddb122..74b3a21c 100644 --- a/docs/models/schema21.md +++ b/docs/models/reasoningdetailsummaryformat.md @@ -1,11 +1,11 @@ -# Schema21 +# ReasoningDetailSummaryFormat ## Example Usage ```typescript -import { Schema21 } from "@openrouter/sdk/models"; +import { ReasoningDetailSummaryFormat } from "@openrouter/sdk/models"; -let value: Schema21 = "openai-responses-v1"; +let value: ReasoningDetailSummaryFormat = "unknown"; ``` ## Values diff --git a/docs/models/reasoningdetailtext.md b/docs/models/reasoningdetailtext.md new file mode 100644 index 00000000..0a428c24 --- /dev/null +++ b/docs/models/reasoningdetailtext.md @@ -0,0 +1,24 @@ +# ReasoningDetailText + +Reasoning detail text schema + +## Example Usage + +```typescript +import { ReasoningDetailText } from "@openrouter/sdk/models"; + +let value: ReasoningDetailText = { + type: "reasoning.text", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `type` | *"reasoning.text"* | :heavy_check_mark: | N/A | +| `text` | *string* | :heavy_minus_sign: | N/A | +| `signature` | *string* | :heavy_minus_sign: | N/A | +| `id` | *string* | :heavy_minus_sign: | N/A | +| `format` | [models.ReasoningDetailTextFormat](../models/reasoningdetailtextformat.md) | :heavy_minus_sign: | N/A | +| `index` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/reasoningdetailtextformat.md b/docs/models/reasoningdetailtextformat.md new file mode 100644 index 00000000..fc27098c --- /dev/null +++ b/docs/models/reasoningdetailtextformat.md @@ -0,0 +1,17 @@ +# ReasoningDetailTextFormat + +## Example Usage + +```typescript +import { ReasoningDetailTextFormat } from "@openrouter/sdk/models"; + +let value: ReasoningDetailTextFormat = "xai-responses-v1"; +``` + +## Values + +This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. + +```typescript +"unknown" | "openai-responses-v1" | "azure-openai-responses-v1" | "xai-responses-v1" | "anthropic-claude-v1" | "google-gemini-v1" | Unrecognized +``` \ No newline at end of file diff --git a/docs/models/reasoningdetailunion.md b/docs/models/reasoningdetailunion.md new file mode 100644 index 00000000..88328d82 --- /dev/null +++ b/docs/models/reasoningdetailunion.md @@ -0,0 +1,34 @@ +# ReasoningDetailUnion + +Reasoning detail union schema + + +## Supported Types + +### `models.ReasoningDetailSummary` + +```typescript +const value: models.ReasoningDetailSummary = { + type: "reasoning.summary", + summary: + "The model analyzed the problem by first identifying key constraints, then evaluating possible solutions...", +}; +``` + +### `models.ReasoningDetailEncrypted` + +```typescript +const value: models.ReasoningDetailEncrypted = { + type: "reasoning.encrypted", + data: "", +}; +``` + +### `models.ReasoningDetailText` + +```typescript +const value: models.ReasoningDetailText = { + type: "reasoning.text", +}; +``` + diff --git a/docs/models/reasoningsummaryverbosity.md b/docs/models/reasoningsummaryverbosity.md index bcb3d98e..a7281603 100644 --- a/docs/models/reasoningsummaryverbosity.md +++ b/docs/models/reasoningsummaryverbosity.md @@ -5,7 +5,7 @@ ```typescript import { ReasoningSummaryVerbosity } from "@openrouter/sdk/models"; -let value: ReasoningSummaryVerbosity = "detailed"; +let value: ReasoningSummaryVerbosity = "auto"; ``` ## Values diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index ce8782c7..9e0c8027 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -1,5 +1,7 @@ # ResponseFormat +Response format configuration + ## Supported Types @@ -25,7 +27,7 @@ const value: models.ResponseFormatJSONObject = { const value: models.ResponseFormatJSONSchema = { type: "json_schema", jsonSchema: { - name: "", + name: "math_response", }, }; ``` @@ -35,14 +37,14 @@ const value: models.ResponseFormatJSONSchema = { ```typescript const value: models.ResponseFormatTextGrammar = { type: "grammar", - grammar: "", + grammar: "root ::= \"yes\" | \"no\"", }; ``` -### `models.ResponseFormatPython` +### `models.ResponseFormatTextPython` ```typescript -const value: models.ResponseFormatPython = { +const value: models.ResponseFormatTextPython = { type: "python", }; ``` diff --git a/docs/models/responseformatjsonobject.md b/docs/models/responseformatjsonobject.md index bdc5e7fc..cfdb215f 100644 --- a/docs/models/responseformatjsonobject.md +++ b/docs/models/responseformatjsonobject.md @@ -1,5 +1,7 @@ # ResponseFormatJSONObject +JSON object response format + ## Example Usage ```typescript diff --git a/docs/models/responseformatjsonschema.md b/docs/models/responseformatjsonschema.md index 176cbbe3..30c38b52 100644 --- a/docs/models/responseformatjsonschema.md +++ b/docs/models/responseformatjsonschema.md @@ -1,5 +1,7 @@ # ResponseFormatJSONSchema +JSON Schema response format for structured outputs + ## Example Usage ```typescript @@ -8,14 +10,14 @@ import { ResponseFormatJSONSchema } from "@openrouter/sdk/models"; let value: ResponseFormatJSONSchema = { type: "json_schema", jsonSchema: { - name: "", + name: "math_response", }, }; ``` ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `type` | *"json_schema"* | :heavy_check_mark: | N/A | -| `jsonSchema` | [models.JSONSchemaConfig](../models/jsonschemaconfig.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `type` | *"json_schema"* | :heavy_check_mark: | N/A | | +| `jsonSchema` | [models.JSONSchemaConfig](../models/jsonschemaconfig.md) | :heavy_check_mark: | JSON Schema configuration object | {
"name": "math_response",
"description": "A mathematical response",
"schema": {
"type": "object",
"properties": {
"answer": {
"type": "number"
}
},
"required": [
"answer"
]
},
"strict": true
} | \ No newline at end of file diff --git a/docs/models/responseformattext.md b/docs/models/responseformattext.md index 31d567a0..974957da 100644 --- a/docs/models/responseformattext.md +++ b/docs/models/responseformattext.md @@ -1,5 +1,7 @@ # ResponseFormatText +Default text response format + ## Example Usage ```typescript diff --git a/docs/models/responseformattextconfig.md b/docs/models/responseformattextconfig.md index 4f72b4d3..ffbadc4a 100644 --- a/docs/models/responseformattextconfig.md +++ b/docs/models/responseformattextconfig.md @@ -13,10 +13,10 @@ const value: models.ResponsesFormatText = { }; ``` -### `models.ResponsesFormatJSONObject` +### `models.ResponseFormatJSONObject` ```typescript -const value: models.ResponsesFormatJSONObject = { +const value: models.ResponseFormatJSONObject = { type: "json_object", }; ``` diff --git a/docs/models/responseformattextgrammar.md b/docs/models/responseformattextgrammar.md index b3176d02..e4fe1611 100644 --- a/docs/models/responseformattextgrammar.md +++ b/docs/models/responseformattextgrammar.md @@ -1,5 +1,7 @@ # ResponseFormatTextGrammar +Custom grammar response format + ## Example Usage ```typescript @@ -7,13 +9,13 @@ import { ResponseFormatTextGrammar } from "@openrouter/sdk/models"; let value: ResponseFormatTextGrammar = { type: "grammar", - grammar: "", + grammar: "root ::= \"yes\" | \"no\"", }; ``` ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `type` | *"grammar"* | :heavy_check_mark: | N/A | -| `grammar` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `type` | *"grammar"* | :heavy_check_mark: | N/A | | +| `grammar` | *string* | :heavy_check_mark: | Custom grammar for text generation | root ::= "yes" \| "no" | \ No newline at end of file diff --git a/docs/models/responseformatpython.md b/docs/models/responseformattextpython.md similarity index 66% rename from docs/models/responseformatpython.md rename to docs/models/responseformattextpython.md index b9dfc226..4bda61a2 100644 --- a/docs/models/responseformatpython.md +++ b/docs/models/responseformattextpython.md @@ -1,11 +1,13 @@ -# ResponseFormatPython +# ResponseFormatTextPython + +Python code response format ## Example Usage ```typescript -import { ResponseFormatPython } from "@openrouter/sdk/models"; +import { ResponseFormatTextPython } from "@openrouter/sdk/models"; -let value: ResponseFormatPython = { +let value: ResponseFormatTextPython = { type: "python", }; ``` diff --git a/docs/models/responseserrorfield.md b/docs/models/responseserrorfield.md index 2596ef9c..b29a01e1 100644 --- a/docs/models/responseserrorfield.md +++ b/docs/models/responseserrorfield.md @@ -15,7 +15,7 @@ let value: ResponsesErrorField = { ## Fields -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `code` | [models.CodeEnum](../models/codeenum.md) | :heavy_check_mark: | N/A | -| `message` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `code` | [models.Code](../models/code.md) | :heavy_check_mark: | N/A | +| `message` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsesformatjsonobject.md b/docs/models/responsesformatjsonobject.md deleted file mode 100644 index 8cd57234..00000000 --- a/docs/models/responsesformatjsonobject.md +++ /dev/null @@ -1,19 +0,0 @@ -# ResponsesFormatJSONObject - -JSON object response format - -## Example Usage - -```typescript -import { ResponsesFormatJSONObject } from "@openrouter/sdk/models"; - -let value: ResponsesFormatJSONObject = { - type: "json_object", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `type` | *"json_object"* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/route.md b/docs/models/route.md deleted file mode 100644 index 5f434fff..00000000 --- a/docs/models/route.md +++ /dev/null @@ -1,17 +0,0 @@ -# Route - -## Example Usage - -```typescript -import { Route } from "@openrouter/sdk/models"; - -let value: Route = "fallback"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"fallback" | "sort" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/schema0.md b/docs/models/schema0.md deleted file mode 100644 index 8e622e20..00000000 --- a/docs/models/schema0.md +++ /dev/null @@ -1,27 +0,0 @@ -# Schema0 - -## Example Usage - -```typescript -import { Schema0 } from "@openrouter/sdk/models"; - -let value: Schema0 = {}; -``` - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `allowFallbacks` | *boolean* | :heavy_minus_sign: | N/A | -| `requireParameters` | *boolean* | :heavy_minus_sign: | N/A | -| `dataCollection` | [models.Schema3](../models/schema3.md) | :heavy_minus_sign: | N/A | -| `zdr` | *boolean* | :heavy_minus_sign: | N/A | -| `enforceDistillableText` | *boolean* | :heavy_minus_sign: | N/A | -| `order` | *models.Schema5*[] | :heavy_minus_sign: | N/A | -| `only` | *models.Schema5*[] | :heavy_minus_sign: | N/A | -| `ignore` | *models.Schema5*[] | :heavy_minus_sign: | N/A | -| `quantizations` | [models.Schema8](../models/schema8.md)[] | :heavy_minus_sign: | N/A | -| `sort` | *models.ProviderSortUnion* | :heavy_minus_sign: | N/A | -| `maxPrice` | [models.Schema10](../models/schema10.md) | :heavy_minus_sign: | N/A | -| `preferredMinThroughput` | *models.Schema15Union* | :heavy_minus_sign: | N/A | -| `preferredMaxLatency` | *models.Schema15Union* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema10.md b/docs/models/schema10.md deleted file mode 100644 index 5d5cc367..00000000 --- a/docs/models/schema10.md +++ /dev/null @@ -1,19 +0,0 @@ -# Schema10 - -## Example Usage - -```typescript -import { Schema10 } from "@openrouter/sdk/models"; - -let value: Schema10 = {}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt` | *models.Prompt* | :heavy_minus_sign: | N/A | -| `completion` | *models.Completion* | :heavy_minus_sign: | N/A | -| `image` | *models.Schema14* | :heavy_minus_sign: | N/A | -| `audio` | *models.Schema14* | :heavy_minus_sign: | N/A | -| `request` | *models.Schema14* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema14.md b/docs/models/schema14.md deleted file mode 100644 index f9993743..00000000 --- a/docs/models/schema14.md +++ /dev/null @@ -1,23 +0,0 @@ -# Schema14 - - -## Supported Types - -### `number` - -```typescript -const value: number = 1284.03; -``` - -### `string` - -```typescript -const value: string = ""; -``` - -### `any` - -```typescript -const value: any = ""; -``` - diff --git a/docs/models/schema15.md b/docs/models/schema15.md deleted file mode 100644 index a9bf2ca0..00000000 --- a/docs/models/schema15.md +++ /dev/null @@ -1,18 +0,0 @@ -# Schema15 - -## Example Usage - -```typescript -import { Schema15 } from "@openrouter/sdk/models"; - -let value: Schema15 = {}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `p50` | *number* | :heavy_minus_sign: | N/A | -| `p75` | *number* | :heavy_minus_sign: | N/A | -| `p90` | *number* | :heavy_minus_sign: | N/A | -| `p99` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema15union.md b/docs/models/schema15union.md deleted file mode 100644 index cb5077a8..00000000 --- a/docs/models/schema15union.md +++ /dev/null @@ -1,17 +0,0 @@ -# Schema15Union - - -## Supported Types - -### `number` - -```typescript -const value: number = 1284.03; -``` - -### `models.Schema15` - -```typescript -const value: models.Schema15 = {}; -``` - diff --git a/docs/models/schema17.md b/docs/models/schema17.md deleted file mode 100644 index 5534020d..00000000 --- a/docs/models/schema17.md +++ /dev/null @@ -1,45 +0,0 @@ -# Schema17 - - -## Supported Types - -### `models.Schema17AutoRouter` - -```typescript -const value: models.Schema17AutoRouter = { - id: "auto-router", -}; -``` - -### `models.Schema17Moderation` - -```typescript -const value: models.Schema17Moderation = { - id: "moderation", -}; -``` - -### `models.Schema17Web` - -```typescript -const value: models.Schema17Web = { - id: "web", -}; -``` - -### `models.Schema17FileParser` - -```typescript -const value: models.Schema17FileParser = { - id: "file-parser", -}; -``` - -### `models.Schema17ResponseHealing` - -```typescript -const value: models.Schema17ResponseHealing = { - id: "response-healing", -}; -``` - diff --git a/docs/models/schema17autorouter.md b/docs/models/schema17autorouter.md deleted file mode 100644 index b84df676..00000000 --- a/docs/models/schema17autorouter.md +++ /dev/null @@ -1,19 +0,0 @@ -# Schema17AutoRouter - -## Example Usage - -```typescript -import { Schema17AutoRouter } from "@openrouter/sdk/models"; - -let value: Schema17AutoRouter = { - id: "auto-router", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `id` | *"auto-router"* | :heavy_check_mark: | N/A | -| `enabled` | *boolean* | :heavy_minus_sign: | N/A | -| `allowedModels` | *string*[] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema17fileparser.md b/docs/models/schema17fileparser.md deleted file mode 100644 index 1115ee3b..00000000 --- a/docs/models/schema17fileparser.md +++ /dev/null @@ -1,19 +0,0 @@ -# Schema17FileParser - -## Example Usage - -```typescript -import { Schema17FileParser } from "@openrouter/sdk/models"; - -let value: Schema17FileParser = { - id: "file-parser", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `id` | *"file-parser"* | :heavy_check_mark: | N/A | -| `enabled` | *boolean* | :heavy_minus_sign: | N/A | -| `pdf` | [models.Pdf](../models/pdf.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema17responsehealing.md b/docs/models/schema17responsehealing.md deleted file mode 100644 index ac17a1f8..00000000 --- a/docs/models/schema17responsehealing.md +++ /dev/null @@ -1,18 +0,0 @@ -# Schema17ResponseHealing - -## Example Usage - -```typescript -import { Schema17ResponseHealing } from "@openrouter/sdk/models"; - -let value: Schema17ResponseHealing = { - id: "response-healing", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| -------------------- | -------------------- | -------------------- | -------------------- | -| `id` | *"response-healing"* | :heavy_check_mark: | N/A | -| `enabled` | *boolean* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema17web.md b/docs/models/schema17web.md deleted file mode 100644 index 39f9a71b..00000000 --- a/docs/models/schema17web.md +++ /dev/null @@ -1,21 +0,0 @@ -# Schema17Web - -## Example Usage - -```typescript -import { Schema17Web } from "@openrouter/sdk/models"; - -let value: Schema17Web = { - id: "web", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | -| `id` | *"web"* | :heavy_check_mark: | N/A | -| `enabled` | *boolean* | :heavy_minus_sign: | N/A | -| `maxResults` | *number* | :heavy_minus_sign: | N/A | -| `searchPrompt` | *string* | :heavy_minus_sign: | N/A | -| `engine` | [models.Engine](../models/engine.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema19.md b/docs/models/schema19.md deleted file mode 100644 index aa2d2931..00000000 --- a/docs/models/schema19.md +++ /dev/null @@ -1,31 +0,0 @@ -# Schema19 - - -## Supported Types - -### `models.Schema19ReasoningSummary` - -```typescript -const value: models.Schema19ReasoningSummary = { - type: "reasoning.summary", - summary: "", -}; -``` - -### `models.Schema19ReasoningEncrypted` - -```typescript -const value: models.Schema19ReasoningEncrypted = { - type: "reasoning.encrypted", - data: "", -}; -``` - -### `models.Schema19ReasoningText` - -```typescript -const value: models.Schema19ReasoningText = { - type: "reasoning.text", -}; -``` - diff --git a/docs/models/schema19reasoningencrypted.md b/docs/models/schema19reasoningencrypted.md deleted file mode 100644 index 2ef1668e..00000000 --- a/docs/models/schema19reasoningencrypted.md +++ /dev/null @@ -1,22 +0,0 @@ -# Schema19ReasoningEncrypted - -## Example Usage - -```typescript -import { Schema19ReasoningEncrypted } from "@openrouter/sdk/models"; - -let value: Schema19ReasoningEncrypted = { - type: "reasoning.encrypted", - data: "", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `type` | *"reasoning.encrypted"* | :heavy_check_mark: | N/A | -| `data` | *string* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_minus_sign: | N/A | -| `format` | [models.Schema21](../models/schema21.md) | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema19reasoningsummary.md b/docs/models/schema19reasoningsummary.md deleted file mode 100644 index 9595a9de..00000000 --- a/docs/models/schema19reasoningsummary.md +++ /dev/null @@ -1,22 +0,0 @@ -# Schema19ReasoningSummary - -## Example Usage - -```typescript -import { Schema19ReasoningSummary } from "@openrouter/sdk/models"; - -let value: Schema19ReasoningSummary = { - type: "reasoning.summary", - summary: "", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `type` | *"reasoning.summary"* | :heavy_check_mark: | N/A | -| `summary` | *string* | :heavy_check_mark: | N/A | -| `id` | *string* | :heavy_minus_sign: | N/A | -| `format` | [models.Schema21](../models/schema21.md) | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema19reasoningtext.md b/docs/models/schema19reasoningtext.md deleted file mode 100644 index d9fbcced..00000000 --- a/docs/models/schema19reasoningtext.md +++ /dev/null @@ -1,22 +0,0 @@ -# Schema19ReasoningText - -## Example Usage - -```typescript -import { Schema19ReasoningText } from "@openrouter/sdk/models"; - -let value: Schema19ReasoningText = { - type: "reasoning.text", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `type` | *"reasoning.text"* | :heavy_check_mark: | N/A | -| `text` | *string* | :heavy_minus_sign: | N/A | -| `signature` | *string* | :heavy_minus_sign: | N/A | -| `id` | *string* | :heavy_minus_sign: | N/A | -| `format` | [models.Schema21](../models/schema21.md) | :heavy_minus_sign: | N/A | -| `index` | *number* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/schema3.md b/docs/models/schema3.md deleted file mode 100644 index 891a40d0..00000000 --- a/docs/models/schema3.md +++ /dev/null @@ -1,17 +0,0 @@ -# Schema3 - -## Example Usage - -```typescript -import { Schema3 } from "@openrouter/sdk/models"; - -let value: Schema3 = "allow"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"deny" | "allow" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/schema5enum.md b/docs/models/schema5enum.md deleted file mode 100644 index cd07a0e9..00000000 --- a/docs/models/schema5enum.md +++ /dev/null @@ -1,17 +0,0 @@ -# Schema5Enum - -## Example Usage - -```typescript -import { Schema5Enum } from "@openrouter/sdk/models"; - -let value: Schema5Enum = "Xiaomi"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"AI21" | "AionLabs" | "Alibaba" | "Ambient" | "Amazon Bedrock" | "Amazon Nova" | "Anthropic" | "Arcee AI" | "AtlasCloud" | "Avian" | "Azure" | "BaseTen" | "BytePlus" | "Black Forest Labs" | "Cerebras" | "Chutes" | "Cirrascale" | "Clarifai" | "Cloudflare" | "Cohere" | "Crusoe" | "DeepInfra" | "DeepSeek" | "Featherless" | "Fireworks" | "Friendli" | "GMICloud" | "Google" | "Google AI Studio" | "Groq" | "Hyperbolic" | "Inception" | "Inceptron" | "InferenceNet" | "Infermatic" | "Inflection" | "Liquid" | "Mara" | "Mancer 2" | "Minimax" | "ModelRun" | "Mistral" | "Modular" | "Moonshot AI" | "Morph" | "NCompass" | "Nebius" | "NextBit" | "Novita" | "Nvidia" | "OpenAI" | "OpenInference" | "Parasail" | "Perplexity" | "Phala" | "Relace" | "SambaNova" | "Seed" | "SiliconFlow" | "Sourceful" | "StepFun" | "Stealth" | "StreamLake" | "Switchpoint" | "Together" | "Upstage" | "Venice" | "WandB" | "Xiaomi" | "xAI" | "Z.AI" | "FakeProvider" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/schema8.md b/docs/models/schema8.md deleted file mode 100644 index bbf20ad7..00000000 --- a/docs/models/schema8.md +++ /dev/null @@ -1,17 +0,0 @@ -# Schema8 - -## Example Usage - -```typescript -import { Schema8 } from "@openrouter/sdk/models"; - -let value: Schema8 = "fp6"; -``` - -## Values - -This is an open enum. Unrecognized values will be captured as the `Unrecognized` branded type. - -```typescript -"int4" | "int8" | "fp4" | "fp6" | "fp8" | "fp16" | "bf16" | "fp32" | "unknown" | Unrecognized -``` \ No newline at end of file diff --git a/docs/models/stop.md b/docs/models/stop.md index 53db1523..7e111968 100644 --- a/docs/models/stop.md +++ b/docs/models/stop.md @@ -1,20 +1,29 @@ # Stop +Stop sequences (up to 4) + ## Supported Types ### `string` ```typescript -const value: string = ""; +const value: string = "[\"\"]"; ``` ### `string[]` ```typescript const value: string[] = [ - "", - "", + "", +]; +``` + +### `any` + +```typescript +const value: any = [ + "", ]; ``` diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index 5f7c34b4..274288da 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -1,5 +1,7 @@ # SystemMessage +System message for setting behavior + ## Example Usage ```typescript @@ -7,14 +9,14 @@ import { SystemMessage } from "@openrouter/sdk/models"; let value: SystemMessage = { role: "system", - content: [], + content: "What is the capital of France?", }; ``` ## Fields -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `role` | *"system"* | :heavy_check_mark: | N/A | -| `content` | *models.SystemMessageContent* | :heavy_check_mark: | N/A | -| `name` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `role` | *"system"* | :heavy_check_mark: | N/A | | +| `content` | *models.SystemMessageContent* | :heavy_check_mark: | System message content | You are a helpful assistant. | +| `name` | *string* | :heavy_minus_sign: | Optional name for the system message | Assistant Config | \ No newline at end of file diff --git a/docs/models/systemmessagecontent.md b/docs/models/systemmessagecontent.md index 47e563a3..31db158c 100644 --- a/docs/models/systemmessagecontent.md +++ b/docs/models/systemmessagecontent.md @@ -1,12 +1,14 @@ # SystemMessageContent +System message content + ## Supported Types ### `string` ```typescript -const value: string = ""; +const value: string = "You are a helpful assistant."; ``` ### `models.ChatMessageContentItemText[]` diff --git a/docs/models/toolchoiceoption.md b/docs/models/toolchoiceoption.md new file mode 100644 index 00000000..865edb69 --- /dev/null +++ b/docs/models/toolchoiceoption.md @@ -0,0 +1,36 @@ +# ToolChoiceOption + +Tool choice configuration + + +## Supported Types + +### `models.ToolChoiceOptionNone` + +```typescript +const value: models.ToolChoiceOptionNone = "none"; +``` + +### `models.ToolChoiceOptionAuto` + +```typescript +const value: models.ToolChoiceOptionAuto = "auto"; +``` + +### `models.ToolChoiceOptionRequired` + +```typescript +const value: models.ToolChoiceOptionRequired = "required"; +``` + +### `models.NamedToolChoice` + +```typescript +const value: models.NamedToolChoice = { + type: "function", + function: { + name: "get_weather", + }, +}; +``` + diff --git a/docs/models/toolchoiceoptionauto.md b/docs/models/toolchoiceoptionauto.md new file mode 100644 index 00000000..29e8d389 --- /dev/null +++ b/docs/models/toolchoiceoptionauto.md @@ -0,0 +1,15 @@ +# ToolChoiceOptionAuto + +## Example Usage + +```typescript +import { ToolChoiceOptionAuto } from "@openrouter/sdk/models"; + +let value: ToolChoiceOptionAuto = "auto"; +``` + +## Values + +```typescript +"auto" +``` \ No newline at end of file diff --git a/docs/models/toolchoiceoptionnone.md b/docs/models/toolchoiceoptionnone.md new file mode 100644 index 00000000..171706d7 --- /dev/null +++ b/docs/models/toolchoiceoptionnone.md @@ -0,0 +1,15 @@ +# ToolChoiceOptionNone + +## Example Usage + +```typescript +import { ToolChoiceOptionNone } from "@openrouter/sdk/models"; + +let value: ToolChoiceOptionNone = "none"; +``` + +## Values + +```typescript +"none" +``` \ No newline at end of file diff --git a/docs/models/toolchoiceoptionrequired.md b/docs/models/toolchoiceoptionrequired.md new file mode 100644 index 00000000..e2db43c1 --- /dev/null +++ b/docs/models/toolchoiceoptionrequired.md @@ -0,0 +1,15 @@ +# ToolChoiceOptionRequired + +## Example Usage + +```typescript +import { ToolChoiceOptionRequired } from "@openrouter/sdk/models"; + +let value: ToolChoiceOptionRequired = "required"; +``` + +## Values + +```typescript +"required" +``` \ No newline at end of file diff --git a/docs/models/tooldefinitionjson.md b/docs/models/tooldefinitionjson.md index d8993e62..ab5f6870 100644 --- a/docs/models/tooldefinitionjson.md +++ b/docs/models/tooldefinitionjson.md @@ -1,5 +1,7 @@ # ToolDefinitionJson +Tool definition for function calling + ## Example Usage ```typescript @@ -8,14 +10,15 @@ import { ToolDefinitionJson } from "@openrouter/sdk/models"; let value: ToolDefinitionJson = { type: "function", function: { - name: "", + name: "get_weather", }, }; ``` ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | *"function"* | :heavy_check_mark: | N/A | -| `function` | [models.ToolDefinitionJsonFunction](../models/tooldefinitionjsonfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `type` | [models.ToolDefinitionJsonType](../models/tooldefinitionjsontype.md) | :heavy_check_mark: | N/A | | +| `function` | [models.ToolDefinitionJsonFunction](../models/tooldefinitionjsonfunction.md) | :heavy_check_mark: | Function definition for tool calling | {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": [
"location"
]
}
} | +| `cacheControl` | [models.ChatMessageContentItemCacheControl](../models/chatmessagecontentitemcachecontrol.md) | :heavy_minus_sign: | Cache control for the content part | {
"type": "ephemeral",
"ttl": "5m"
} | \ No newline at end of file diff --git a/docs/models/tooldefinitionjsonfunction.md b/docs/models/tooldefinitionjsonfunction.md index 645a829d..0da6b3ca 100644 --- a/docs/models/tooldefinitionjsonfunction.md +++ b/docs/models/tooldefinitionjsonfunction.md @@ -1,20 +1,22 @@ # ToolDefinitionJsonFunction +Function definition for tool calling + ## Example Usage ```typescript import { ToolDefinitionJsonFunction } from "@openrouter/sdk/models"; let value: ToolDefinitionJsonFunction = { - name: "", + name: "get_weather", }; ``` ## Fields -| Field | Type | Required | Description | -| --------------------- | --------------------- | --------------------- | --------------------- | -| `name` | *string* | :heavy_check_mark: | N/A | -| `description` | *string* | :heavy_minus_sign: | N/A | -| `parameters` | Record | :heavy_minus_sign: | N/A | -| `strict` | *boolean* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `name` | *string* | :heavy_check_mark: | Function name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) | get_weather | +| `description` | *string* | :heavy_minus_sign: | Function description for the model | Get the current weather for a location | +| `parameters` | Record | :heavy_minus_sign: | Function parameters as JSON Schema object | {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": [
"location"
]
} | +| `strict` | *boolean* | :heavy_minus_sign: | Enable strict schema adherence | false | \ No newline at end of file diff --git a/docs/models/tooldefinitionjsontype.md b/docs/models/tooldefinitionjsontype.md new file mode 100644 index 00000000..5a02c200 --- /dev/null +++ b/docs/models/tooldefinitionjsontype.md @@ -0,0 +1,15 @@ +# ToolDefinitionJsonType + +## Example Usage + +```typescript +import { ToolDefinitionJsonType } from "@openrouter/sdk/models"; + +let value: ToolDefinitionJsonType = "function"; +``` + +## Values + +```typescript +"function" +``` \ No newline at end of file diff --git a/docs/models/toolresponsemessage.md b/docs/models/toolresponsemessage.md index 0d66edc9..50404c94 100644 --- a/docs/models/toolresponsemessage.md +++ b/docs/models/toolresponsemessage.md @@ -1,5 +1,7 @@ # ToolResponseMessage +Tool response message + ## Example Usage ```typescript @@ -7,15 +9,15 @@ import { ToolResponseMessage } from "@openrouter/sdk/models"; let value: ToolResponseMessage = { role: "tool", - content: [], - toolCallId: "", + content: "What is the capital of France?", + toolCallId: "call_abc123", }; ``` ## Fields -| Field | Type | Required | Description | -| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | -| `role` | *"tool"* | :heavy_check_mark: | N/A | -| `content` | *models.ToolResponseMessageContent* | :heavy_check_mark: | N/A | -| `toolCallId` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `role` | *"tool"* | :heavy_check_mark: | N/A | | +| `content` | *models.ToolResponseMessageContent* | :heavy_check_mark: | Tool response content | The weather in San Francisco is 72°F and sunny. | +| `toolCallId` | *string* | :heavy_check_mark: | ID of the assistant message tool call this message responds to | call_abc123 | \ No newline at end of file diff --git a/docs/models/toolresponsemessagecontent.md b/docs/models/toolresponsemessagecontent.md index 7926f5c3..2d46ed55 100644 --- a/docs/models/toolresponsemessagecontent.md +++ b/docs/models/toolresponsemessagecontent.md @@ -1,12 +1,14 @@ # ToolResponseMessageContent +Tool response content + ## Supported Types ### `string` ```typescript -const value: string = ""; +const value: string = "The weather in San Francisco is 72°F and sunny."; ``` ### `models.ChatMessageContentItem[]` @@ -14,9 +16,10 @@ const value: string = ""; ```typescript const value: models.ChatMessageContentItem[] = [ { - type: "image_url", - imageUrl: { - url: "https://pretty-reservation.org", + type: "input_audio", + inputAudio: { + data: "", + format: "", }, }, ]; diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 792d86b7..a9730290 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -1,5 +1,7 @@ # UserMessage +User message + ## Example Usage ```typescript @@ -7,14 +9,14 @@ import { UserMessage } from "@openrouter/sdk/models"; let value: UserMessage = { role: "user", - content: "", + content: "What is the capital of France?", }; ``` ## Fields -| Field | Type | Required | Description | -| --------------------------- | --------------------------- | --------------------------- | --------------------------- | -| `role` | *"user"* | :heavy_check_mark: | N/A | -| `content` | *models.UserMessageContent* | :heavy_check_mark: | N/A | -| `name` | *string* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `role` | *"user"* | :heavy_check_mark: | N/A | | +| `content` | *models.UserMessageContent* | :heavy_check_mark: | User message content | What is the capital of France? | +| `name` | *string* | :heavy_minus_sign: | Optional name for the user | User | \ No newline at end of file diff --git a/docs/models/usermessagecontent.md b/docs/models/usermessagecontent.md index 69eeecaf..70ea3af8 100644 --- a/docs/models/usermessagecontent.md +++ b/docs/models/usermessagecontent.md @@ -1,12 +1,14 @@ # UserMessageContent +User message content + ## Supported Types ### `string` ```typescript -const value: string = ""; +const value: string = "What is the capital of France?"; ``` ### `models.ChatMessageContentItem[]` @@ -14,9 +16,10 @@ const value: string = ""; ```typescript const value: models.ChatMessageContentItem[] = [ { - type: "image_url", - imageUrl: { - url: "https://pretty-reservation.org", + type: "input_audio", + inputAudio: { + data: "", + format: "", }, }, ]; diff --git a/docs/models/videoinput.md b/docs/models/videoinput.md new file mode 100644 index 00000000..776af932 --- /dev/null +++ b/docs/models/videoinput.md @@ -0,0 +1,19 @@ +# VideoInput + +Video input object + +## Example Usage + +```typescript +import { VideoInput } from "@openrouter/sdk/models"; + +let value: VideoInput = { + url: "https://funny-mainstream.org", +}; +``` + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `url` | *string* | :heavy_check_mark: | URL of the video (data: URLs supported) | \ No newline at end of file diff --git a/docs/models/videourl1.md b/docs/models/videourl1.md deleted file mode 100644 index aa61751a..00000000 --- a/docs/models/videourl1.md +++ /dev/null @@ -1,17 +0,0 @@ -# VideoUrl1 - -## Example Usage - -```typescript -import { VideoUrl1 } from "@openrouter/sdk/models"; - -let value: VideoUrl1 = { - url: "https://colorless-milestone.net/", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `url` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/videourl2.md b/docs/models/videourl2.md deleted file mode 100644 index 0ec1780e..00000000 --- a/docs/models/videourl2.md +++ /dev/null @@ -1,17 +0,0 @@ -# VideoUrl2 - -## Example Usage - -```typescript -import { VideoUrl2 } from "@openrouter/sdk/models"; - -let value: VideoUrl2 = { - url: "https://triangular-soup.info", -}; -``` - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `url` | *string* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index fade8858..e53280e2 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -25,7 +25,17 @@ const openRouter = new OpenRouter({ async function run() { const result = await openRouter.chat.send({ chatGenerationParams: { - messages: [], + messages: [ + { + role: "system", + content: "You are a helpful assistant.", + }, + { + role: "user", + content: "What is the capital of France?", + }, + ], + temperature: 0.7, }, }); @@ -54,7 +64,17 @@ const openRouter = new OpenRouterCore({ async function run() { const res = await chatSend(openRouter, { chatGenerationParams: { - messages: [], + messages: [ + { + role: "system", + content: "You are a helpful assistant.", + }, + { + role: "user", + content: "What is the capital of France?", + }, + ], + temperature: 0.7, }, }); if (res.ok) { @@ -83,8 +103,19 @@ run(); ### Errors -| Error Type | Status Code | Content Type | -| ----------------------------- | ----------------------------- | ----------------------------- | -| errors.ChatError | 400, 401, 429 | application/json | -| errors.ChatError | 500 | application/json | -| errors.OpenRouterDefaultError | 4XX, 5XX | \*/\* | \ No newline at end of file +| Error Type | Status Code | Content Type | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | +| errors.BadRequestResponseError | 400 | application/json | +| errors.UnauthorizedResponseError | 401 | application/json | +| errors.PaymentRequiredResponseError | 402 | application/json | +| errors.NotFoundResponseError | 404 | application/json | +| errors.RequestTimeoutResponseError | 408 | application/json | +| errors.PayloadTooLargeResponseError | 413 | application/json | +| errors.UnprocessableEntityResponseError | 422 | application/json | +| errors.TooManyRequestsResponseError | 429 | application/json | +| errors.InternalServerResponseError | 500 | application/json | +| errors.BadGatewayResponseError | 502 | application/json | +| errors.ServiceUnavailableResponseError | 503 | application/json | +| errors.EdgeNetworkTimeoutResponseError | 524 | application/json | +| errors.ProviderOverloadedResponseError | 529 | application/json | +| errors.OpenRouterDefaultError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/nextjs-example/package.json b/examples/nextjs-example/package.json index d3c89874..c19ed544 100644 --- a/examples/nextjs-example/package.json +++ b/examples/nextjs-example/package.json @@ -16,7 +16,7 @@ "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "lucide-react": "^0.544.0", - "next": "15.5.9", + "next": "15.5.10", "react": "19.1.0", "react-dom": "19.1.0", "tailwind-merge": "^3.3.1", @@ -29,7 +29,7 @@ "@types/react": "^19", "@types/react-dom": "^19", "eslint": "^9", - "eslint-config-next": "15.5.9", + "eslint-config-next": "15.5.10", "tailwindcss": "^4", "tw-animate-css": "^1.4.0", "typescript": "^5" diff --git a/jsr.json b/jsr.json index 2d1f7055..168f07e6 100644 --- a/jsr.json +++ b/jsr.json @@ -2,7 +2,7 @@ { "name": "@openrouter/sdk", - "version": "0.8.0", + "version": "0.9.4", "exports": { ".": "./src/index.ts", "./models/errors": "./src/models/errors/index.ts", diff --git a/package.json b/package.json index be1abdf1..7f042be9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@openrouter/sdk", - "version": "0.8.0", + "version": "0.9.4", "author": "OpenRouter", "description": "The OpenRouter TypeScript SDK is a type-safe toolkit for building AI applications with access to 300+ language models through a unified API.", "keywords": [ diff --git a/src/funcs/chatSend.ts b/src/funcs/chatSend.ts index 4333f575..b72b43c7 100644 --- a/src/funcs/chatSend.ts +++ b/src/funcs/chatSend.ts @@ -43,7 +43,19 @@ export function chatSend( ): APIPromise< Result< models.ChatResponse, - | errors.ChatError + | errors.BadRequestResponseError + | errors.UnauthorizedResponseError + | errors.PaymentRequiredResponseError + | errors.NotFoundResponseError + | errors.RequestTimeoutResponseError + | errors.PayloadTooLargeResponseError + | errors.UnprocessableEntityResponseError + | errors.TooManyRequestsResponseError + | errors.InternalServerResponseError + | errors.BadGatewayResponseError + | errors.ServiceUnavailableResponseError + | errors.EdgeNetworkTimeoutResponseError + | errors.ProviderOverloadedResponseError | OpenRouterError | ResponseValidationError | ConnectionError @@ -62,8 +74,20 @@ export function chatSend( options?: RequestOptions, ): APIPromise< Result< - EventStream, - | errors.ChatError + EventStream, + | errors.BadRequestResponseError + | errors.UnauthorizedResponseError + | errors.PaymentRequiredResponseError + | errors.NotFoundResponseError + | errors.RequestTimeoutResponseError + | errors.PayloadTooLargeResponseError + | errors.UnprocessableEntityResponseError + | errors.TooManyRequestsResponseError + | errors.InternalServerResponseError + | errors.BadGatewayResponseError + | errors.ServiceUnavailableResponseError + | errors.EdgeNetworkTimeoutResponseError + | errors.ProviderOverloadedResponseError | OpenRouterError | ResponseValidationError | ConnectionError @@ -81,7 +105,19 @@ export function chatSend( ): APIPromise< Result< operations.SendChatCompletionRequestResponse, - | errors.ChatError + | errors.BadRequestResponseError + | errors.UnauthorizedResponseError + | errors.PaymentRequiredResponseError + | errors.NotFoundResponseError + | errors.RequestTimeoutResponseError + | errors.PayloadTooLargeResponseError + | errors.UnprocessableEntityResponseError + | errors.TooManyRequestsResponseError + | errors.InternalServerResponseError + | errors.BadGatewayResponseError + | errors.ServiceUnavailableResponseError + | errors.EdgeNetworkTimeoutResponseError + | errors.ProviderOverloadedResponseError | OpenRouterError | ResponseValidationError | ConnectionError @@ -99,7 +135,19 @@ export function chatSend( ): APIPromise< Result< operations.SendChatCompletionRequestResponse, - | errors.ChatError + | errors.BadRequestResponseError + | errors.UnauthorizedResponseError + | errors.PaymentRequiredResponseError + | errors.NotFoundResponseError + | errors.RequestTimeoutResponseError + | errors.PayloadTooLargeResponseError + | errors.UnprocessableEntityResponseError + | errors.TooManyRequestsResponseError + | errors.InternalServerResponseError + | errors.BadGatewayResponseError + | errors.ServiceUnavailableResponseError + | errors.EdgeNetworkTimeoutResponseError + | errors.ProviderOverloadedResponseError | OpenRouterError | ResponseValidationError | ConnectionError @@ -125,7 +173,19 @@ async function $do( [ Result< operations.SendChatCompletionRequestResponse, - | errors.ChatError + | errors.BadRequestResponseError + | errors.UnauthorizedResponseError + | errors.PaymentRequiredResponseError + | errors.NotFoundResponseError + | errors.RequestTimeoutResponseError + | errors.PayloadTooLargeResponseError + | errors.UnprocessableEntityResponseError + | errors.TooManyRequestsResponseError + | errors.InternalServerResponseError + | errors.BadGatewayResponseError + | errors.ServiceUnavailableResponseError + | errors.EdgeNetworkTimeoutResponseError + | errors.ProviderOverloadedResponseError | OpenRouterError | ResponseValidationError | ConnectionError @@ -207,7 +267,23 @@ async function $do( const doResult = await client._do(req, { context, - errorCodes: ["400", "401", "429", "4XX", "500", "5XX"], + errorCodes: [ + "400", + "401", + "402", + "404", + "408", + "413", + "422", + "429", + "4XX", + "500", + "502", + "503", + "524", + "529", + "5XX", + ], retryConfig: context.retryConfig, retryCodes: context.retryCodes, }); @@ -222,7 +298,19 @@ async function $do( const [result] = await M.match< operations.SendChatCompletionRequestResponse, - | errors.ChatError + | errors.BadRequestResponseError + | errors.UnauthorizedResponseError + | errors.PaymentRequiredResponseError + | errors.NotFoundResponseError + | errors.RequestTimeoutResponseError + | errors.PayloadTooLargeResponseError + | errors.UnprocessableEntityResponseError + | errors.TooManyRequestsResponseError + | errors.InternalServerResponseError + | errors.BadGatewayResponseError + | errors.ServiceUnavailableResponseError + | errors.EdgeNetworkTimeoutResponseError + | errors.ProviderOverloadedResponseError | OpenRouterError | ResponseValidationError | ConnectionError @@ -234,8 +322,19 @@ async function $do( >( M.json(200, operations.SendChatCompletionRequestResponse$inboundSchema), M.sse(200, operations.SendChatCompletionRequestResponse$inboundSchema), - M.jsonErr([400, 401, 429], errors.ChatError$inboundSchema), - M.jsonErr(500, errors.ChatError$inboundSchema), + M.jsonErr(400, errors.BadRequestResponseError$inboundSchema), + M.jsonErr(401, errors.UnauthorizedResponseError$inboundSchema), + M.jsonErr(402, errors.PaymentRequiredResponseError$inboundSchema), + M.jsonErr(404, errors.NotFoundResponseError$inboundSchema), + M.jsonErr(408, errors.RequestTimeoutResponseError$inboundSchema), + M.jsonErr(413, errors.PayloadTooLargeResponseError$inboundSchema), + M.jsonErr(422, errors.UnprocessableEntityResponseError$inboundSchema), + M.jsonErr(429, errors.TooManyRequestsResponseError$inboundSchema), + M.jsonErr(500, errors.InternalServerResponseError$inboundSchema), + M.jsonErr(502, errors.BadGatewayResponseError$inboundSchema), + M.jsonErr(503, errors.ServiceUnavailableResponseError$inboundSchema), + M.jsonErr(524, errors.EdgeNetworkTimeoutResponseError$inboundSchema), + M.jsonErr(529, errors.ProviderOverloadedResponseError$inboundSchema), M.fail("4XX"), M.fail("5XX"), )(response, req, { extraFields: responseFields }); diff --git a/src/lib/config.ts b/src/lib/config.ts index d6c3be8e..7e3e27f8 100644 --- a/src/lib/config.ts +++ b/src/lib/config.ts @@ -76,7 +76,7 @@ export function serverURLFromOptions(options: SDKOptions): URL | null { export const SDK_METADATA = { language: "typescript", openapiDocVersion: "1.0.0", - sdkVersion: "0.8.0", + sdkVersion: "0.9.4", genVersion: "2.788.4", - userAgent: "speakeasy-sdk/typescript 0.8.0 2.788.4 1.0.0 @openrouter/sdk", + userAgent: "speakeasy-sdk/typescript 0.9.4 2.788.4 1.0.0 @openrouter/sdk", } as const; diff --git a/src/lib/security.ts b/src/lib/security.ts index 93be85a2..ac10c99c 100644 --- a/src/lib/security.ts +++ b/src/lib/security.ts @@ -247,7 +247,7 @@ export function resolveGlobalSecurity( [ { fieldName: "Authorization", - type: "http:bearer", + type: "apiKey:header", value: security?.apiKey ?? env().OPENROUTER_API_KEY, }, ], diff --git a/src/models/assistantmessage.ts b/src/models/assistantmessage.ts index 81ba1846..40c885ce 100644 --- a/src/models/assistantmessage.ts +++ b/src/models/assistantmessage.ts @@ -6,7 +6,14 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; +import { + AssistantMessageImages, + AssistantMessageImages$inboundSchema, + AssistantMessageImages$Outbound, + AssistantMessageImages$outboundSchema, +} from "./assistantmessageimages.js"; import { ChatMessageContentItem, ChatMessageContentItem$inboundSchema, @@ -21,48 +28,93 @@ import { } from "./chatmessagetoolcall.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; import { - Schema19, - Schema19$inboundSchema, - Schema19$Outbound, - Schema19$outboundSchema, -} from "./schema19.js"; - -export type AssistantMessageContent = string | Array; - -export type AssistantMessageImageUrl = { - url: string; -}; - -export type Image = { - imageUrl: AssistantMessageImageUrl; -}; + ReasoningDetailUnion, + ReasoningDetailUnion$inboundSchema, + ReasoningDetailUnion$Outbound, + ReasoningDetailUnion$outboundSchema, +} from "./reasoningdetailunion.js"; + +export const AssistantMessageRole = { + Assistant: "assistant", +} as const; +export type AssistantMessageRole = ClosedEnum; + +/** + * Assistant message content + */ +export type AssistantMessageContent = + | string + | Array + | any; +/** + * Assistant message for requests and responses + */ export type AssistantMessage = { - role: "assistant"; - content?: string | Array | null | undefined; + role: AssistantMessageRole; + /** + * Assistant message content + */ + content?: string | Array | any | null | undefined; + /** + * Optional name for the assistant + */ name?: string | undefined; + /** + * Tool calls made by the assistant + */ toolCalls?: Array | undefined; + /** + * Refusal message if content was refused + */ refusal?: string | null | undefined; + /** + * Reasoning output + */ reasoning?: string | null | undefined; - reasoningDetails?: Array | undefined; - images?: Array | undefined; + /** + * Reasoning details for extended thinking models + */ + reasoningDetails?: Array | undefined; + /** + * Generated images from image generation models + */ + images?: Array | undefined; }; +/** @internal */ +export const AssistantMessageRole$inboundSchema: z.ZodEnum< + typeof AssistantMessageRole +> = z.enum(AssistantMessageRole); +/** @internal */ +export const AssistantMessageRole$outboundSchema: z.ZodEnum< + typeof AssistantMessageRole +> = AssistantMessageRole$inboundSchema; + /** @internal */ export const AssistantMessageContent$inboundSchema: z.ZodType< AssistantMessageContent, unknown -> = z.union([z.string(), z.array(ChatMessageContentItem$inboundSchema)]); +> = z.union([ + z.string(), + z.array(ChatMessageContentItem$inboundSchema), + z.any(), +]); /** @internal */ export type AssistantMessageContent$Outbound = | string - | Array; + | Array + | any; /** @internal */ export const AssistantMessageContent$outboundSchema: z.ZodType< AssistantMessageContent$Outbound, AssistantMessageContent -> = z.union([z.string(), z.array(ChatMessageContentItem$outboundSchema)]); +> = z.union([ + z.string(), + z.array(ChatMessageContentItem$outboundSchema), + z.any(), +]); export function assistantMessageContentToJSON( assistantMessageContent: AssistantMessageContent, @@ -81,93 +133,25 @@ export function assistantMessageContentFromJSON( ); } -/** @internal */ -export const AssistantMessageImageUrl$inboundSchema: z.ZodType< - AssistantMessageImageUrl, - unknown -> = z.object({ - url: z.string(), -}); -/** @internal */ -export type AssistantMessageImageUrl$Outbound = { - url: string; -}; - -/** @internal */ -export const AssistantMessageImageUrl$outboundSchema: z.ZodType< - AssistantMessageImageUrl$Outbound, - AssistantMessageImageUrl -> = z.object({ - url: z.string(), -}); - -export function assistantMessageImageUrlToJSON( - assistantMessageImageUrl: AssistantMessageImageUrl, -): string { - return JSON.stringify( - AssistantMessageImageUrl$outboundSchema.parse(assistantMessageImageUrl), - ); -} -export function assistantMessageImageUrlFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => AssistantMessageImageUrl$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'AssistantMessageImageUrl' from JSON`, - ); -} - -/** @internal */ -export const Image$inboundSchema: z.ZodType = z.object({ - image_url: z.lazy(() => AssistantMessageImageUrl$inboundSchema), -}).transform((v) => { - return remap$(v, { - "image_url": "imageUrl", - }); -}); -/** @internal */ -export type Image$Outbound = { - image_url: AssistantMessageImageUrl$Outbound; -}; - -/** @internal */ -export const Image$outboundSchema: z.ZodType = z.object({ - imageUrl: z.lazy(() => AssistantMessageImageUrl$outboundSchema), -}).transform((v) => { - return remap$(v, { - imageUrl: "image_url", - }); -}); - -export function imageToJSON(image: Image): string { - return JSON.stringify(Image$outboundSchema.parse(image)); -} -export function imageFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => Image$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'Image' from JSON`, - ); -} - /** @internal */ export const AssistantMessage$inboundSchema: z.ZodType< AssistantMessage, unknown > = z.object({ - role: z.literal("assistant"), + role: AssistantMessageRole$inboundSchema, content: z.nullable( - z.union([z.string(), z.array(ChatMessageContentItem$inboundSchema)]), + z.union([ + z.string(), + z.array(ChatMessageContentItem$inboundSchema), + z.any(), + ]), ).optional(), name: z.string().optional(), tool_calls: z.array(ChatMessageToolCall$inboundSchema).optional(), refusal: z.nullable(z.string()).optional(), reasoning: z.nullable(z.string()).optional(), - reasoning_details: z.array(Schema19$inboundSchema).optional(), - images: z.array(z.lazy(() => Image$inboundSchema)).optional(), + reasoning_details: z.array(ReasoningDetailUnion$inboundSchema).optional(), + images: z.array(AssistantMessageImages$inboundSchema).optional(), }).transform((v) => { return remap$(v, { "tool_calls": "toolCalls", @@ -176,14 +160,19 @@ export const AssistantMessage$inboundSchema: z.ZodType< }); /** @internal */ export type AssistantMessage$Outbound = { - role: "assistant"; - content?: string | Array | null | undefined; + role: string; + content?: + | string + | Array + | any + | null + | undefined; name?: string | undefined; tool_calls?: Array | undefined; refusal?: string | null | undefined; reasoning?: string | null | undefined; - reasoning_details?: Array | undefined; - images?: Array | undefined; + reasoning_details?: Array | undefined; + images?: Array | undefined; }; /** @internal */ @@ -191,16 +180,20 @@ export const AssistantMessage$outboundSchema: z.ZodType< AssistantMessage$Outbound, AssistantMessage > = z.object({ - role: z.literal("assistant"), + role: AssistantMessageRole$outboundSchema, content: z.nullable( - z.union([z.string(), z.array(ChatMessageContentItem$outboundSchema)]), + z.union([ + z.string(), + z.array(ChatMessageContentItem$outboundSchema), + z.any(), + ]), ).optional(), name: z.string().optional(), toolCalls: z.array(ChatMessageToolCall$outboundSchema).optional(), refusal: z.nullable(z.string()).optional(), reasoning: z.nullable(z.string()).optional(), - reasoningDetails: z.array(Schema19$outboundSchema).optional(), - images: z.array(z.lazy(() => Image$outboundSchema)).optional(), + reasoningDetails: z.array(ReasoningDetailUnion$outboundSchema).optional(), + images: z.array(AssistantMessageImages$outboundSchema).optional(), }).transform((v) => { return remap$(v, { toolCalls: "tool_calls", diff --git a/src/models/assistantmessageimages.ts b/src/models/assistantmessageimages.ts new file mode 100644 index 00000000..296c5068 --- /dev/null +++ b/src/models/assistantmessageimages.ts @@ -0,0 +1,105 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 7b893584cc21 + */ + +import * as z from "zod/v4"; +import { remap as remap$ } from "../lib/primitives.js"; +import { safeParse } from "../lib/schemas.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; + +export type AssistantMessageImagesImageUrl = { + /** + * URL or base64-encoded data of the generated image + */ + url: string; +}; + +export type AssistantMessageImages = { + imageUrl: AssistantMessageImagesImageUrl; +}; + +/** @internal */ +export const AssistantMessageImagesImageUrl$inboundSchema: z.ZodType< + AssistantMessageImagesImageUrl, + unknown +> = z.object({ + url: z.string(), +}); +/** @internal */ +export type AssistantMessageImagesImageUrl$Outbound = { + url: string; +}; + +/** @internal */ +export const AssistantMessageImagesImageUrl$outboundSchema: z.ZodType< + AssistantMessageImagesImageUrl$Outbound, + AssistantMessageImagesImageUrl +> = z.object({ + url: z.string(), +}); + +export function assistantMessageImagesImageUrlToJSON( + assistantMessageImagesImageUrl: AssistantMessageImagesImageUrl, +): string { + return JSON.stringify( + AssistantMessageImagesImageUrl$outboundSchema.parse( + assistantMessageImagesImageUrl, + ), + ); +} +export function assistantMessageImagesImageUrlFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => AssistantMessageImagesImageUrl$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'AssistantMessageImagesImageUrl' from JSON`, + ); +} + +/** @internal */ +export const AssistantMessageImages$inboundSchema: z.ZodType< + AssistantMessageImages, + unknown +> = z.object({ + image_url: z.lazy(() => AssistantMessageImagesImageUrl$inboundSchema), +}).transform((v) => { + return remap$(v, { + "image_url": "imageUrl", + }); +}); +/** @internal */ +export type AssistantMessageImages$Outbound = { + image_url: AssistantMessageImagesImageUrl$Outbound; +}; + +/** @internal */ +export const AssistantMessageImages$outboundSchema: z.ZodType< + AssistantMessageImages$Outbound, + AssistantMessageImages +> = z.object({ + imageUrl: z.lazy(() => AssistantMessageImagesImageUrl$outboundSchema), +}).transform((v) => { + return remap$(v, { + imageUrl: "image_url", + }); +}); + +export function assistantMessageImagesToJSON( + assistantMessageImages: AssistantMessageImages, +): string { + return JSON.stringify( + AssistantMessageImages$outboundSchema.parse(assistantMessageImages), + ); +} +export function assistantMessageImagesFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => AssistantMessageImages$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'AssistantMessageImages' from JSON`, + ); +} diff --git a/src/models/chaterror.ts b/src/models/chaterror.ts deleted file mode 100644 index 9f509fc9..00000000 --- a/src/models/chaterror.ts +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: b107ec938dc1 - */ - -import * as z from "zod/v4"; -import { safeParse } from "../lib/schemas.js"; -import { Result as SafeParseResult } from "../types/fp.js"; -import { SDKValidationError } from "./errors/sdkvalidationerror.js"; - -export type Code = string | number; - -export type ChatErrorError = { - code: string | number | null; - message: string; - param?: string | null | undefined; - type?: string | null | undefined; -}; - -/** @internal */ -export const Code$inboundSchema: z.ZodType = z.union([ - z.string(), - z.number(), -]); - -export function codeFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => Code$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'Code' from JSON`, - ); -} - -/** @internal */ -export const ChatErrorError$inboundSchema: z.ZodType = - z.object({ - code: z.nullable(z.union([z.string(), z.number()])), - message: z.string(), - param: z.nullable(z.string()).optional(), - type: z.nullable(z.string()).optional(), - }); - -export function chatErrorErrorFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => ChatErrorError$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ChatErrorError' from JSON`, - ); -} diff --git a/src/models/chatgenerationparams.ts b/src/models/chatgenerationparams.ts index 5de6d658..01d4e661 100644 --- a/src/models/chatgenerationparams.ts +++ b/src/models/chatgenerationparams.ts @@ -6,107 +6,329 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import * as openEnums from "../types/enums.js"; -import { OpenEnum } from "../types/enums.js"; +import { ClosedEnum, OpenEnum } from "../types/enums.js"; import { ChatStreamOptions, ChatStreamOptions$Outbound, ChatStreamOptions$outboundSchema, } from "./chatstreamoptions.js"; +import { + DataCollection, + DataCollection$outboundSchema, +} from "./datacollection.js"; +import { + DebugOptions, + DebugOptions$Outbound, + DebugOptions$outboundSchema, +} from "./debugoptions.js"; import { Message, Message$Outbound, Message$outboundSchema, } from "./message.js"; import { - ProviderSortUnion, - ProviderSortUnion$Outbound, - ProviderSortUnion$outboundSchema, -} from "./providersortunion.js"; + PDFParserOptions, + PDFParserOptions$Outbound, + PDFParserOptions$outboundSchema, +} from "./pdfparseroptions.js"; +import { + PreferredMaxLatency, + PreferredMaxLatency$Outbound, + PreferredMaxLatency$outboundSchema, +} from "./preferredmaxlatency.js"; +import { + PreferredMinThroughput, + PreferredMinThroughput$Outbound, + PreferredMinThroughput$outboundSchema, +} from "./preferredminthroughput.js"; +import { ProviderName, ProviderName$outboundSchema } from "./providername.js"; +import { Quantization, Quantization$outboundSchema } from "./quantization.js"; import { - ReasoningSummaryVerbosity, - ReasoningSummaryVerbosity$outboundSchema, -} from "./reasoningsummaryverbosity.js"; + ResponseFormatJSONObject, + ResponseFormatJSONObject$Outbound, + ResponseFormatJSONObject$outboundSchema, +} from "./responseformatjsonobject.js"; import { ResponseFormatJSONSchema, ResponseFormatJSONSchema$Outbound, ResponseFormatJSONSchema$outboundSchema, } from "./responseformatjsonschema.js"; +import { + ResponseFormatText, + ResponseFormatText$Outbound, + ResponseFormatText$outboundSchema, +} from "./responseformattext.js"; import { ResponseFormatTextGrammar, ResponseFormatTextGrammar$Outbound, ResponseFormatTextGrammar$outboundSchema, } from "./responseformattextgrammar.js"; import { - Schema10, - Schema10$Outbound, - Schema10$outboundSchema, -} from "./schema10.js"; -import { - Schema17, - Schema17$Outbound, - Schema17$outboundSchema, -} from "./schema17.js"; + ResponseFormatTextPython, + ResponseFormatTextPython$Outbound, + ResponseFormatTextPython$outboundSchema, +} from "./responseformattextpython.js"; import { - Schema5, - Schema5$Outbound, - Schema5$outboundSchema, -} from "./schema5.js"; + ToolChoiceOption, + ToolChoiceOption$Outbound, + ToolChoiceOption$outboundSchema, +} from "./toolchoiceoption.js"; import { ToolDefinitionJson, ToolDefinitionJson$Outbound, ToolDefinitionJson$outboundSchema, } from "./tooldefinitionjson.js"; +import { + WebSearchEngine, + WebSearchEngine$outboundSchema, +} from "./websearchengine.js"; + +export type ChatGenerationParamsOrder = ProviderName | string; + +export type ChatGenerationParamsOnly = ProviderName | string; + +export type ChatGenerationParamsIgnore = ProviderName | string; -export const Schema3 = { - Deny: "deny", - Allow: "allow", +export const ChatGenerationParamsSortEnum = { + Price: "price", + Throughput: "throughput", + Latency: "latency", } as const; -export type Schema3 = OpenEnum; - -export const Schema8 = { - Int4: "int4", - Int8: "int8", - Fp4: "fp4", - Fp6: "fp6", - Fp8: "fp8", - Fp16: "fp16", - Bf16: "bf16", - Fp32: "fp32", - Unknown: "unknown", +export type ChatGenerationParamsSortEnum = OpenEnum< + typeof ChatGenerationParamsSortEnum +>; + +export const ChatGenerationParamsProviderSortConfigEnum = { + Price: "price", + Throughput: "throughput", + Latency: "latency", } as const; -export type Schema8 = OpenEnum; +export type ChatGenerationParamsProviderSortConfigEnum = ClosedEnum< + typeof ChatGenerationParamsProviderSortConfigEnum +>; -export type Schema15 = { - p50?: number | null | undefined; - p75?: number | null | undefined; - p90?: number | null | undefined; - p99?: number | null | undefined; +/** + * The provider sorting strategy (price, throughput, latency) + */ +export const ChatGenerationParamsBy = { + Price: "price", + Throughput: "throughput", + Latency: "latency", +} as const; +/** + * The provider sorting strategy (price, throughput, latency) + */ +export type ChatGenerationParamsBy = OpenEnum; + +/** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ +export const ChatGenerationParamsPartition = { + Model: "model", + None: "none", +} as const; +/** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ +export type ChatGenerationParamsPartition = OpenEnum< + typeof ChatGenerationParamsPartition +>; + +export type ChatGenerationParamsProviderSortConfig = { + /** + * The provider sorting strategy (price, throughput, latency) + */ + by?: ChatGenerationParamsBy | null | undefined; + /** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ + partition?: ChatGenerationParamsPartition | null | undefined; }; -export type Schema15Union = number | Schema15; +export type ChatGenerationParamsProviderSortConfigUnion = + | ChatGenerationParamsProviderSortConfig + | ChatGenerationParamsProviderSortConfigEnum; + +/** + * The provider sorting strategy (price, throughput, latency) + */ +export const ChatGenerationParamsProviderSort = { + Price: "price", + Throughput: "throughput", + Latency: "latency", +} as const; +/** + * The provider sorting strategy (price, throughput, latency) + */ +export type ChatGenerationParamsProviderSort = OpenEnum< + typeof ChatGenerationParamsProviderSort +>; + +/** + * The sorting strategy to use for this request, if "order" is not specified. When set, no load balancing is performed. + */ +export type ChatGenerationParamsSortUnion = + | ChatGenerationParamsProviderSort + | ChatGenerationParamsProviderSortConfig + | ChatGenerationParamsProviderSortConfigEnum + | ChatGenerationParamsSortEnum; + +/** + * The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. + */ +export type ChatGenerationParamsMaxPrice = { + /** + * Price per million prompt tokens + */ + prompt?: string | undefined; + completion?: string | undefined; + image?: string | undefined; + audio?: string | undefined; + request?: string | undefined; +}; -export type Schema0 = { +/** + * When multiple model providers are available, optionally indicate your routing preference. + */ +export type ChatGenerationParamsProvider = { + /** + * Whether to allow backup providers to serve requests + * + * @remarks + * - true: (default) when the primary provider (or your custom providers in "order") is unavailable, use the next best provider. + * - false: use only the primary/custom provider, and return the upstream error if it's unavailable. + */ allowFallbacks?: boolean | null | undefined; + /** + * Whether to filter providers to only those that support the parameters you've provided. If this setting is omitted or set to false, then providers will receive only the parameters they support, and ignore the rest. + */ requireParameters?: boolean | null | undefined; - dataCollection?: Schema3 | null | undefined; + /** + * Data collection setting. If no available model provider meets the requirement, your request will return an error. + * + * @remarks + * - allow: (default) allow providers which store user data non-transiently and may train on it + * + * - deny: use only providers which do not collect user data. + */ + dataCollection?: DataCollection | null | undefined; + /** + * Whether to restrict routing to only ZDR (Zero Data Retention) endpoints. When true, only endpoints that do not retain prompts will be used. + */ zdr?: boolean | null | undefined; + /** + * Whether to restrict routing to only models that allow text distillation. When true, only models where the author has allowed distillation will be used. + */ enforceDistillableText?: boolean | null | undefined; - order?: Array | null | undefined; - only?: Array | null | undefined; - ignore?: Array | null | undefined; - quantizations?: Array | null | undefined; - sort?: ProviderSortUnion | null | undefined; - maxPrice?: Schema10 | undefined; - preferredMinThroughput?: number | Schema15 | null | undefined; - preferredMaxLatency?: number | Schema15 | null | undefined; + /** + * An ordered list of provider slugs. The router will attempt to use the first provider in the subset of this list that supports your requested model, and fall back to the next if it is unavailable. If no providers are available, the request will fail with an error message. + */ + order?: Array | null | undefined; + /** + * List of provider slugs to allow. If provided, this list is merged with your account-wide allowed provider settings for this request. + */ + only?: Array | null | undefined; + /** + * List of provider slugs to ignore. If provided, this list is merged with your account-wide ignored provider settings for this request. + */ + ignore?: Array | null | undefined; + /** + * A list of quantization levels to filter the provider by. + */ + quantizations?: Array | null | undefined; + sort?: + | ChatGenerationParamsProviderSort + | ChatGenerationParamsProviderSortConfig + | ChatGenerationParamsProviderSortConfigEnum + | ChatGenerationParamsSortEnum + | null + | undefined; + /** + * The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. + */ + maxPrice?: ChatGenerationParamsMaxPrice | undefined; + /** + * Preferred minimum throughput (in tokens per second). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints below the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. + */ + preferredMinThroughput?: PreferredMinThroughput | null | undefined; + /** + * Preferred maximum latency (in seconds). Can be a number (applies to p50) or an object with percentile-specific cutoffs. Endpoints above the threshold(s) may still be used, but are deprioritized in routing. When using fallback models, this may cause a fallback model to be used instead of the primary model if it meets the threshold. + */ + preferredMaxLatency?: PreferredMaxLatency | null | undefined; }; -export const Route = { - Fallback: "fallback", - Sort: "sort", -} as const; -export type Route = OpenEnum; +export type ChatGenerationParamsPluginResponseHealing = { + id: "response-healing"; + /** + * Set to false to disable the response-healing plugin for this request. Defaults to true. + */ + enabled?: boolean | undefined; +}; + +export type ChatGenerationParamsPluginFileParser = { + id: "file-parser"; + /** + * Set to false to disable the file-parser plugin for this request. Defaults to true. + */ + enabled?: boolean | undefined; + /** + * Options for PDF parsing. + */ + pdf?: PDFParserOptions | undefined; +}; + +export type ChatGenerationParamsPluginWeb = { + id: "web"; + /** + * Set to false to disable the web-search plugin for this request. Defaults to true. + */ + enabled?: boolean | undefined; + maxResults?: number | undefined; + searchPrompt?: string | undefined; + /** + * The search engine to use for web search. + */ + engine?: WebSearchEngine | undefined; +}; + +export type ChatGenerationParamsPluginModeration = { + id: "moderation"; +}; + +export type ChatGenerationParamsPluginAutoRouter = { + id: "auto-router"; + /** + * Set to false to disable the auto-router plugin for this request. Defaults to true. + */ + enabled?: boolean | undefined; + /** + * List of model patterns to filter which models the auto-router can route between. Supports wildcards (e.g., "anthropic/*" matches all Anthropic models). When not specified, uses the default supported models list. + */ + allowedModels?: Array | undefined; +}; + +export type ChatGenerationParamsPluginUnion = + | ChatGenerationParamsPluginAutoRouter + | ChatGenerationParamsPluginModeration + | ChatGenerationParamsPluginWeb + | ChatGenerationParamsPluginFileParser + | ChatGenerationParamsPluginResponseHealing; + +/** + * Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + */ +export type ChatGenerationParamsTrace = { + traceId?: string | undefined; + traceName?: string | undefined; + spanName?: string | undefined; + generationName?: string | undefined; + parentSpanId?: string | undefined; + additionalProperties?: { [k: string]: any | null } | undefined; +}; +/** + * Constrains effort on reasoning for reasoning models + */ export const Effort = { Xhigh: "xhigh", High: "high", @@ -115,39 +337,41 @@ export const Effort = { Minimal: "minimal", None: "none", } as const; +/** + * Constrains effort on reasoning for reasoning models + */ export type Effort = OpenEnum; +/** + * Configuration options for reasoning models + */ export type Reasoning = { + /** + * Constrains effort on reasoning for reasoning models + */ effort?: Effort | null | undefined; - summary?: ReasoningSummaryVerbosity | null | undefined; -}; - -export type ResponseFormatPython = { - type: "python"; -}; - -export type ResponseFormatJSONObject = { - type: "json_object"; -}; - -export type ResponseFormatText = { - type: "text"; + summary?: any | null | undefined; }; +/** + * Response format configuration + */ export type ResponseFormat = | ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | ResponseFormatTextGrammar - | ResponseFormatPython; - -export type Stop = string | Array; + | ResponseFormatTextPython; -export type Debug = { - echoUpstreamBody?: boolean | undefined; -}; +/** + * Stop sequences (up to 4) + */ +export type Stop = string | Array | any; -export type ChatGenerationParamsImageConfig = string | number | Array; +export type ChatGenerationParamsImageConfig = + | string + | number + | Array; export const Modality = { Text: "text", @@ -155,247 +379,677 @@ export const Modality = { } as const; export type Modality = OpenEnum; +/** + * Chat completion request parameters + */ export type ChatGenerationParams = { - provider?: Schema0 | null | undefined; - plugins?: Array | undefined; - route?: Route | null | undefined; + /** + * When multiple model providers are available, optionally indicate your routing preference. + */ + provider?: ChatGenerationParamsProvider | null | undefined; + /** + * Plugins you want to enable for this request, including their settings. + */ + plugins?: + | Array< + | ChatGenerationParamsPluginAutoRouter + | ChatGenerationParamsPluginModeration + | ChatGenerationParamsPluginWeb + | ChatGenerationParamsPluginFileParser + | ChatGenerationParamsPluginResponseHealing + > + | undefined; + /** + * Unique user identifier + */ user?: string | undefined; + /** + * A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. + */ sessionId?: string | undefined; + /** + * Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + */ + trace?: ChatGenerationParamsTrace | undefined; + /** + * List of messages for the conversation + */ messages: Array; + /** + * Model to use for completion + */ model?: string | undefined; + /** + * Models to use for completion + */ models?: Array | undefined; + /** + * Frequency penalty (-2.0 to 2.0) + */ frequencyPenalty?: number | null | undefined; + /** + * Token logit bias adjustments + */ logitBias?: { [k: string]: number } | null | undefined; + /** + * Return log probabilities + */ logprobs?: boolean | null | undefined; + /** + * Number of top log probabilities to return (0-20) + */ topLogprobs?: number | null | undefined; + /** + * Maximum tokens in completion + */ maxCompletionTokens?: number | null | undefined; + /** + * Maximum tokens (deprecated, use max_completion_tokens) + */ maxTokens?: number | null | undefined; + /** + * Key-value pairs for additional object information (max 16 pairs, 64 char keys, 512 char values) + */ metadata?: { [k: string]: string } | undefined; + /** + * Presence penalty (-2.0 to 2.0) + */ presencePenalty?: number | null | undefined; + /** + * Configuration options for reasoning models + */ reasoning?: Reasoning | undefined; + /** + * Response format configuration + */ responseFormat?: | ResponseFormatText | ResponseFormatJSONObject | ResponseFormatJSONSchema | ResponseFormatTextGrammar - | ResponseFormatPython + | ResponseFormatTextPython | undefined; + /** + * Random seed for deterministic outputs + */ seed?: number | null | undefined; - stop?: string | Array | null | undefined; + /** + * Stop sequences (up to 4) + */ + stop?: string | Array | any | null | undefined; + /** + * Enable streaming response + */ stream?: boolean | undefined; + /** + * Streaming configuration options + */ streamOptions?: ChatStreamOptions | null | undefined; + /** + * Sampling temperature (0-2) + */ temperature?: number | null | undefined; - toolChoice?: any | undefined; + parallelToolCalls?: boolean | null | undefined; + /** + * Tool choice configuration + */ + toolChoice?: ToolChoiceOption | undefined; + /** + * Available tools for function calling + */ tools?: Array | undefined; + /** + * Nucleus sampling parameter (0-1) + */ topP?: number | null | undefined; - debug?: Debug | undefined; - imageConfig?: { [k: string]: string | number | Array } | undefined; + /** + * Debug options for inspecting request transformations (streaming only) + */ + debug?: DebugOptions | undefined; + /** + * Provider-specific image configuration options. Keys and values vary by model/provider. See https://openrouter.ai/docs/guides/overview/multimodal/image-generation for more details. + */ + imageConfig?: + | { [k: string]: string | number | Array } + | undefined; + /** + * Output modalities for the response. Supported values are "text" and "image". + */ modalities?: Array | undefined; }; /** @internal */ -export const Schema3$outboundSchema: z.ZodType = openEnums - .outboundSchema(Schema3); +export type ChatGenerationParamsOrder$Outbound = string | string; + +/** @internal */ +export const ChatGenerationParamsOrder$outboundSchema: z.ZodType< + ChatGenerationParamsOrder$Outbound, + ChatGenerationParamsOrder +> = z.union([ProviderName$outboundSchema, z.string()]); + +export function chatGenerationParamsOrderToJSON( + chatGenerationParamsOrder: ChatGenerationParamsOrder, +): string { + return JSON.stringify( + ChatGenerationParamsOrder$outboundSchema.parse(chatGenerationParamsOrder), + ); +} /** @internal */ -export const Schema8$outboundSchema: z.ZodType = openEnums - .outboundSchema(Schema8); +export type ChatGenerationParamsOnly$Outbound = string | string; /** @internal */ -export type Schema15$Outbound = { - p50?: number | null | undefined; - p75?: number | null | undefined; - p90?: number | null | undefined; - p99?: number | null | undefined; +export const ChatGenerationParamsOnly$outboundSchema: z.ZodType< + ChatGenerationParamsOnly$Outbound, + ChatGenerationParamsOnly +> = z.union([ProviderName$outboundSchema, z.string()]); + +export function chatGenerationParamsOnlyToJSON( + chatGenerationParamsOnly: ChatGenerationParamsOnly, +): string { + return JSON.stringify( + ChatGenerationParamsOnly$outboundSchema.parse(chatGenerationParamsOnly), + ); +} + +/** @internal */ +export type ChatGenerationParamsIgnore$Outbound = string | string; + +/** @internal */ +export const ChatGenerationParamsIgnore$outboundSchema: z.ZodType< + ChatGenerationParamsIgnore$Outbound, + ChatGenerationParamsIgnore +> = z.union([ProviderName$outboundSchema, z.string()]); + +export function chatGenerationParamsIgnoreToJSON( + chatGenerationParamsIgnore: ChatGenerationParamsIgnore, +): string { + return JSON.stringify( + ChatGenerationParamsIgnore$outboundSchema.parse(chatGenerationParamsIgnore), + ); +} + +/** @internal */ +export const ChatGenerationParamsSortEnum$outboundSchema: z.ZodType< + string, + ChatGenerationParamsSortEnum +> = openEnums.outboundSchema(ChatGenerationParamsSortEnum); + +/** @internal */ +export const ChatGenerationParamsProviderSortConfigEnum$outboundSchema: + z.ZodEnum = z.enum( + ChatGenerationParamsProviderSortConfigEnum, + ); + +/** @internal */ +export const ChatGenerationParamsBy$outboundSchema: z.ZodType< + string, + ChatGenerationParamsBy +> = openEnums.outboundSchema(ChatGenerationParamsBy); + +/** @internal */ +export const ChatGenerationParamsPartition$outboundSchema: z.ZodType< + string, + ChatGenerationParamsPartition +> = openEnums.outboundSchema(ChatGenerationParamsPartition); + +/** @internal */ +export type ChatGenerationParamsProviderSortConfig$Outbound = { + by?: string | null | undefined; + partition?: string | null | undefined; }; /** @internal */ -export const Schema15$outboundSchema: z.ZodType = z - .object({ - p50: z.nullable(z.number()).optional(), - p75: z.nullable(z.number()).optional(), - p90: z.nullable(z.number()).optional(), - p99: z.nullable(z.number()).optional(), - }); +export const ChatGenerationParamsProviderSortConfig$outboundSchema: z.ZodType< + ChatGenerationParamsProviderSortConfig$Outbound, + ChatGenerationParamsProviderSortConfig +> = z.object({ + by: z.nullable(ChatGenerationParamsBy$outboundSchema).optional(), + partition: z.nullable(ChatGenerationParamsPartition$outboundSchema) + .optional(), +}); -export function schema15ToJSON(schema15: Schema15): string { - return JSON.stringify(Schema15$outboundSchema.parse(schema15)); +export function chatGenerationParamsProviderSortConfigToJSON( + chatGenerationParamsProviderSortConfig: + ChatGenerationParamsProviderSortConfig, +): string { + return JSON.stringify( + ChatGenerationParamsProviderSortConfig$outboundSchema.parse( + chatGenerationParamsProviderSortConfig, + ), + ); } /** @internal */ -export type Schema15Union$Outbound = number | Schema15$Outbound; +export type ChatGenerationParamsProviderSortConfigUnion$Outbound = + | ChatGenerationParamsProviderSortConfig$Outbound + | string; /** @internal */ -export const Schema15Union$outboundSchema: z.ZodType< - Schema15Union$Outbound, - Schema15Union -> = z.union([z.number(), z.lazy(() => Schema15$outboundSchema)]); +export const ChatGenerationParamsProviderSortConfigUnion$outboundSchema: + z.ZodType< + ChatGenerationParamsProviderSortConfigUnion$Outbound, + ChatGenerationParamsProviderSortConfigUnion + > = z.union([ + z.lazy(() => ChatGenerationParamsProviderSortConfig$outboundSchema), + ChatGenerationParamsProviderSortConfigEnum$outboundSchema, + ]); -export function schema15UnionToJSON(schema15Union: Schema15Union): string { - return JSON.stringify(Schema15Union$outboundSchema.parse(schema15Union)); +export function chatGenerationParamsProviderSortConfigUnionToJSON( + chatGenerationParamsProviderSortConfigUnion: + ChatGenerationParamsProviderSortConfigUnion, +): string { + return JSON.stringify( + ChatGenerationParamsProviderSortConfigUnion$outboundSchema.parse( + chatGenerationParamsProviderSortConfigUnion, + ), + ); } /** @internal */ -export type Schema0$Outbound = { +export const ChatGenerationParamsProviderSort$outboundSchema: z.ZodType< + string, + ChatGenerationParamsProviderSort +> = openEnums.outboundSchema(ChatGenerationParamsProviderSort); + +/** @internal */ +export type ChatGenerationParamsSortUnion$Outbound = + | string + | ChatGenerationParamsProviderSortConfig$Outbound + | string + | string; + +/** @internal */ +export const ChatGenerationParamsSortUnion$outboundSchema: z.ZodType< + ChatGenerationParamsSortUnion$Outbound, + ChatGenerationParamsSortUnion +> = z.union([ + ChatGenerationParamsProviderSort$outboundSchema, + z.union([ + z.lazy(() => ChatGenerationParamsProviderSortConfig$outboundSchema), + ChatGenerationParamsProviderSortConfigEnum$outboundSchema, + ]), + ChatGenerationParamsSortEnum$outboundSchema, +]); + +export function chatGenerationParamsSortUnionToJSON( + chatGenerationParamsSortUnion: ChatGenerationParamsSortUnion, +): string { + return JSON.stringify( + ChatGenerationParamsSortUnion$outboundSchema.parse( + chatGenerationParamsSortUnion, + ), + ); +} + +/** @internal */ +export type ChatGenerationParamsMaxPrice$Outbound = { + prompt?: string | undefined; + completion?: string | undefined; + image?: string | undefined; + audio?: string | undefined; + request?: string | undefined; +}; + +/** @internal */ +export const ChatGenerationParamsMaxPrice$outboundSchema: z.ZodType< + ChatGenerationParamsMaxPrice$Outbound, + ChatGenerationParamsMaxPrice +> = z.object({ + prompt: z.string().optional(), + completion: z.string().optional(), + image: z.string().optional(), + audio: z.string().optional(), + request: z.string().optional(), +}); + +export function chatGenerationParamsMaxPriceToJSON( + chatGenerationParamsMaxPrice: ChatGenerationParamsMaxPrice, +): string { + return JSON.stringify( + ChatGenerationParamsMaxPrice$outboundSchema.parse( + chatGenerationParamsMaxPrice, + ), + ); +} + +/** @internal */ +export type ChatGenerationParamsProvider$Outbound = { allow_fallbacks?: boolean | null | undefined; require_parameters?: boolean | null | undefined; data_collection?: string | null | undefined; zdr?: boolean | null | undefined; enforce_distillable_text?: boolean | null | undefined; - order?: Array | null | undefined; - only?: Array | null | undefined; - ignore?: Array | null | undefined; + order?: Array | null | undefined; + only?: Array | null | undefined; + ignore?: Array | null | undefined; quantizations?: Array | null | undefined; - sort?: ProviderSortUnion$Outbound | null | undefined; - max_price?: Schema10$Outbound | undefined; - preferred_min_throughput?: number | Schema15$Outbound | null | undefined; - preferred_max_latency?: number | Schema15$Outbound | null | undefined; + sort?: + | string + | ChatGenerationParamsProviderSortConfig$Outbound + | string + | string + | null + | undefined; + max_price?: ChatGenerationParamsMaxPrice$Outbound | undefined; + preferred_min_throughput?: PreferredMinThroughput$Outbound | null | undefined; + preferred_max_latency?: PreferredMaxLatency$Outbound | null | undefined; }; /** @internal */ -export const Schema0$outboundSchema: z.ZodType = z - .object({ - allowFallbacks: z.nullable(z.boolean()).optional(), - requireParameters: z.nullable(z.boolean()).optional(), - dataCollection: z.nullable(Schema3$outboundSchema).optional(), - zdr: z.nullable(z.boolean()).optional(), - enforceDistillableText: z.nullable(z.boolean()).optional(), - order: z.nullable(z.array(Schema5$outboundSchema)).optional(), - only: z.nullable(z.array(Schema5$outboundSchema)).optional(), - ignore: z.nullable(z.array(Schema5$outboundSchema)).optional(), - quantizations: z.nullable(z.array(Schema8$outboundSchema)).optional(), - sort: z.nullable(ProviderSortUnion$outboundSchema).optional(), - maxPrice: Schema10$outboundSchema.optional(), - preferredMinThroughput: z.nullable( - z.union([z.number(), z.lazy(() => Schema15$outboundSchema)]), - ).optional(), - preferredMaxLatency: z.nullable( - z.union([z.number(), z.lazy(() => Schema15$outboundSchema)]), - ).optional(), - }).transform((v) => { - return remap$(v, { - allowFallbacks: "allow_fallbacks", - requireParameters: "require_parameters", - dataCollection: "data_collection", - enforceDistillableText: "enforce_distillable_text", - maxPrice: "max_price", - preferredMinThroughput: "preferred_min_throughput", - preferredMaxLatency: "preferred_max_latency", - }); +export const ChatGenerationParamsProvider$outboundSchema: z.ZodType< + ChatGenerationParamsProvider$Outbound, + ChatGenerationParamsProvider +> = z.object({ + allowFallbacks: z.nullable(z.boolean()).optional(), + requireParameters: z.nullable(z.boolean()).optional(), + dataCollection: z.nullable(DataCollection$outboundSchema).optional(), + zdr: z.nullable(z.boolean()).optional(), + enforceDistillableText: z.nullable(z.boolean()).optional(), + order: z.nullable(z.array(z.union([ProviderName$outboundSchema, z.string()]))) + .optional(), + only: z.nullable(z.array(z.union([ProviderName$outboundSchema, z.string()]))) + .optional(), + ignore: z.nullable( + z.array(z.union([ProviderName$outboundSchema, z.string()])), + ).optional(), + quantizations: z.nullable(z.array(Quantization$outboundSchema)).optional(), + sort: z.nullable( + z.union([ + ChatGenerationParamsProviderSort$outboundSchema, + z.union([ + z.lazy(() => ChatGenerationParamsProviderSortConfig$outboundSchema), + ChatGenerationParamsProviderSortConfigEnum$outboundSchema, + ]), + ChatGenerationParamsSortEnum$outboundSchema, + ]), + ).optional(), + maxPrice: z.lazy(() => ChatGenerationParamsMaxPrice$outboundSchema) + .optional(), + preferredMinThroughput: z.nullable(PreferredMinThroughput$outboundSchema) + .optional(), + preferredMaxLatency: z.nullable(PreferredMaxLatency$outboundSchema) + .optional(), +}).transform((v) => { + return remap$(v, { + allowFallbacks: "allow_fallbacks", + requireParameters: "require_parameters", + dataCollection: "data_collection", + enforceDistillableText: "enforce_distillable_text", + maxPrice: "max_price", + preferredMinThroughput: "preferred_min_throughput", + preferredMaxLatency: "preferred_max_latency", }); +}); -export function schema0ToJSON(schema0: Schema0): string { - return JSON.stringify(Schema0$outboundSchema.parse(schema0)); +export function chatGenerationParamsProviderToJSON( + chatGenerationParamsProvider: ChatGenerationParamsProvider, +): string { + return JSON.stringify( + ChatGenerationParamsProvider$outboundSchema.parse( + chatGenerationParamsProvider, + ), + ); } /** @internal */ -export const Route$outboundSchema: z.ZodType = openEnums - .outboundSchema(Route); +export type ChatGenerationParamsPluginResponseHealing$Outbound = { + id: "response-healing"; + enabled?: boolean | undefined; +}; /** @internal */ -export const Effort$outboundSchema: z.ZodType = openEnums - .outboundSchema(Effort); +export const ChatGenerationParamsPluginResponseHealing$outboundSchema: + z.ZodType< + ChatGenerationParamsPluginResponseHealing$Outbound, + ChatGenerationParamsPluginResponseHealing + > = z.object({ + id: z.literal("response-healing"), + enabled: z.boolean().optional(), + }); + +export function chatGenerationParamsPluginResponseHealingToJSON( + chatGenerationParamsPluginResponseHealing: + ChatGenerationParamsPluginResponseHealing, +): string { + return JSON.stringify( + ChatGenerationParamsPluginResponseHealing$outboundSchema.parse( + chatGenerationParamsPluginResponseHealing, + ), + ); +} /** @internal */ -export type Reasoning$Outbound = { - effort?: string | null | undefined; - summary?: string | null | undefined; +export type ChatGenerationParamsPluginFileParser$Outbound = { + id: "file-parser"; + enabled?: boolean | undefined; + pdf?: PDFParserOptions$Outbound | undefined; }; /** @internal */ -export const Reasoning$outboundSchema: z.ZodType< - Reasoning$Outbound, - Reasoning +export const ChatGenerationParamsPluginFileParser$outboundSchema: z.ZodType< + ChatGenerationParamsPluginFileParser$Outbound, + ChatGenerationParamsPluginFileParser > = z.object({ - effort: z.nullable(Effort$outboundSchema).optional(), - summary: z.nullable(ReasoningSummaryVerbosity$outboundSchema).optional(), + id: z.literal("file-parser"), + enabled: z.boolean().optional(), + pdf: PDFParserOptions$outboundSchema.optional(), }); -export function reasoningToJSON(reasoning: Reasoning): string { - return JSON.stringify(Reasoning$outboundSchema.parse(reasoning)); +export function chatGenerationParamsPluginFileParserToJSON( + chatGenerationParamsPluginFileParser: ChatGenerationParamsPluginFileParser, +): string { + return JSON.stringify( + ChatGenerationParamsPluginFileParser$outboundSchema.parse( + chatGenerationParamsPluginFileParser, + ), + ); } /** @internal */ -export type ResponseFormatPython$Outbound = { - type: "python"; +export type ChatGenerationParamsPluginWeb$Outbound = { + id: "web"; + enabled?: boolean | undefined; + max_results?: number | undefined; + search_prompt?: string | undefined; + engine?: string | undefined; }; /** @internal */ -export const ResponseFormatPython$outboundSchema: z.ZodType< - ResponseFormatPython$Outbound, - ResponseFormatPython +export const ChatGenerationParamsPluginWeb$outboundSchema: z.ZodType< + ChatGenerationParamsPluginWeb$Outbound, + ChatGenerationParamsPluginWeb > = z.object({ - type: z.literal("python"), + id: z.literal("web"), + enabled: z.boolean().optional(), + maxResults: z.number().optional(), + searchPrompt: z.string().optional(), + engine: WebSearchEngine$outboundSchema.optional(), +}).transform((v) => { + return remap$(v, { + maxResults: "max_results", + searchPrompt: "search_prompt", + }); }); -export function responseFormatPythonToJSON( - responseFormatPython: ResponseFormatPython, +export function chatGenerationParamsPluginWebToJSON( + chatGenerationParamsPluginWeb: ChatGenerationParamsPluginWeb, ): string { return JSON.stringify( - ResponseFormatPython$outboundSchema.parse(responseFormatPython), + ChatGenerationParamsPluginWeb$outboundSchema.parse( + chatGenerationParamsPluginWeb, + ), ); } /** @internal */ -export type ResponseFormatJSONObject$Outbound = { - type: "json_object"; +export type ChatGenerationParamsPluginModeration$Outbound = { + id: "moderation"; }; /** @internal */ -export const ResponseFormatJSONObject$outboundSchema: z.ZodType< - ResponseFormatJSONObject$Outbound, - ResponseFormatJSONObject +export const ChatGenerationParamsPluginModeration$outboundSchema: z.ZodType< + ChatGenerationParamsPluginModeration$Outbound, + ChatGenerationParamsPluginModeration > = z.object({ - type: z.literal("json_object"), + id: z.literal("moderation"), }); -export function responseFormatJSONObjectToJSON( - responseFormatJSONObject: ResponseFormatJSONObject, +export function chatGenerationParamsPluginModerationToJSON( + chatGenerationParamsPluginModeration: ChatGenerationParamsPluginModeration, ): string { return JSON.stringify( - ResponseFormatJSONObject$outboundSchema.parse(responseFormatJSONObject), + ChatGenerationParamsPluginModeration$outboundSchema.parse( + chatGenerationParamsPluginModeration, + ), ); } /** @internal */ -export type ResponseFormatText$Outbound = { - type: "text"; +export type ChatGenerationParamsPluginAutoRouter$Outbound = { + id: "auto-router"; + enabled?: boolean | undefined; + allowed_models?: Array | undefined; }; /** @internal */ -export const ResponseFormatText$outboundSchema: z.ZodType< - ResponseFormatText$Outbound, - ResponseFormatText +export const ChatGenerationParamsPluginAutoRouter$outboundSchema: z.ZodType< + ChatGenerationParamsPluginAutoRouter$Outbound, + ChatGenerationParamsPluginAutoRouter > = z.object({ - type: z.literal("text"), + id: z.literal("auto-router"), + enabled: z.boolean().optional(), + allowedModels: z.array(z.string()).optional(), +}).transform((v) => { + return remap$(v, { + allowedModels: "allowed_models", + }); }); -export function responseFormatTextToJSON( - responseFormatText: ResponseFormatText, +export function chatGenerationParamsPluginAutoRouterToJSON( + chatGenerationParamsPluginAutoRouter: ChatGenerationParamsPluginAutoRouter, ): string { return JSON.stringify( - ResponseFormatText$outboundSchema.parse(responseFormatText), + ChatGenerationParamsPluginAutoRouter$outboundSchema.parse( + chatGenerationParamsPluginAutoRouter, + ), ); } +/** @internal */ +export type ChatGenerationParamsPluginUnion$Outbound = + | ChatGenerationParamsPluginAutoRouter$Outbound + | ChatGenerationParamsPluginModeration$Outbound + | ChatGenerationParamsPluginWeb$Outbound + | ChatGenerationParamsPluginFileParser$Outbound + | ChatGenerationParamsPluginResponseHealing$Outbound; + +/** @internal */ +export const ChatGenerationParamsPluginUnion$outboundSchema: z.ZodType< + ChatGenerationParamsPluginUnion$Outbound, + ChatGenerationParamsPluginUnion +> = z.union([ + z.lazy(() => ChatGenerationParamsPluginAutoRouter$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginModeration$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginWeb$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginFileParser$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginResponseHealing$outboundSchema), +]); + +export function chatGenerationParamsPluginUnionToJSON( + chatGenerationParamsPluginUnion: ChatGenerationParamsPluginUnion, +): string { + return JSON.stringify( + ChatGenerationParamsPluginUnion$outboundSchema.parse( + chatGenerationParamsPluginUnion, + ), + ); +} + +/** @internal */ +export type ChatGenerationParamsTrace$Outbound = { + trace_id?: string | undefined; + trace_name?: string | undefined; + span_name?: string | undefined; + generation_name?: string | undefined; + parent_span_id?: string | undefined; + [additionalProperties: string]: unknown; +}; + +/** @internal */ +export const ChatGenerationParamsTrace$outboundSchema: z.ZodType< + ChatGenerationParamsTrace$Outbound, + ChatGenerationParamsTrace +> = z.object({ + traceId: z.string().optional(), + traceName: z.string().optional(), + spanName: z.string().optional(), + generationName: z.string().optional(), + parentSpanId: z.string().optional(), + additionalProperties: z.record(z.string(), z.nullable(z.any())).optional(), +}).transform((v) => { + return { + ...v.additionalProperties, + ...remap$(v, { + traceId: "trace_id", + traceName: "trace_name", + spanName: "span_name", + generationName: "generation_name", + parentSpanId: "parent_span_id", + additionalProperties: null, + }), + }; +}); + +export function chatGenerationParamsTraceToJSON( + chatGenerationParamsTrace: ChatGenerationParamsTrace, +): string { + return JSON.stringify( + ChatGenerationParamsTrace$outboundSchema.parse(chatGenerationParamsTrace), + ); +} + +/** @internal */ +export const Effort$outboundSchema: z.ZodType = openEnums + .outboundSchema(Effort); + +/** @internal */ +export type Reasoning$Outbound = { + effort?: string | null | undefined; + summary?: any | null | undefined; +}; + +/** @internal */ +export const Reasoning$outboundSchema: z.ZodType< + Reasoning$Outbound, + Reasoning +> = z.object({ + effort: z.nullable(Effort$outboundSchema).optional(), + summary: z.nullable(z.any()).optional(), +}); + +export function reasoningToJSON(reasoning: Reasoning): string { + return JSON.stringify(Reasoning$outboundSchema.parse(reasoning)); +} + /** @internal */ export type ResponseFormat$Outbound = | ResponseFormatText$Outbound | ResponseFormatJSONObject$Outbound | ResponseFormatJSONSchema$Outbound | ResponseFormatTextGrammar$Outbound - | ResponseFormatPython$Outbound; + | ResponseFormatTextPython$Outbound; /** @internal */ export const ResponseFormat$outboundSchema: z.ZodType< ResponseFormat$Outbound, ResponseFormat > = z.union([ - z.lazy(() => ResponseFormatText$outboundSchema), - z.lazy(() => ResponseFormatJSONObject$outboundSchema), + ResponseFormatText$outboundSchema, + ResponseFormatJSONObject$outboundSchema, ResponseFormatJSONSchema$outboundSchema, ResponseFormatTextGrammar$outboundSchema, - z.lazy(() => ResponseFormatPython$outboundSchema), + ResponseFormatTextPython$outboundSchema, ]); export function responseFormatToJSON(responseFormat: ResponseFormat): string { @@ -403,47 +1057,30 @@ export function responseFormatToJSON(responseFormat: ResponseFormat): string { } /** @internal */ -export type Stop$Outbound = string | Array; +export type Stop$Outbound = string | Array | any; /** @internal */ export const Stop$outboundSchema: z.ZodType = z.union([ z.string(), z.array(z.string()), + z.any(), ]); export function stopToJSON(stop: Stop): string { return JSON.stringify(Stop$outboundSchema.parse(stop)); } -/** @internal */ -export type Debug$Outbound = { - echo_upstream_body?: boolean | undefined; -}; - -/** @internal */ -export const Debug$outboundSchema: z.ZodType = z.object({ - echoUpstreamBody: z.boolean().optional(), -}).transform((v) => { - return remap$(v, { - echoUpstreamBody: "echo_upstream_body", - }); -}); - -export function debugToJSON(debug: Debug): string { - return JSON.stringify(Debug$outboundSchema.parse(debug)); -} - /** @internal */ export type ChatGenerationParamsImageConfig$Outbound = | string | number - | Array; + | Array; /** @internal */ export const ChatGenerationParamsImageConfig$outboundSchema: z.ZodType< ChatGenerationParamsImageConfig$Outbound, ChatGenerationParamsImageConfig -> = z.union([z.string(), z.number(), z.array(z.any())]); +> = z.union([z.string(), z.number(), z.array(z.nullable(z.any()))]); export function chatGenerationParamsImageConfigToJSON( chatGenerationParamsImageConfig: ChatGenerationParamsImageConfig, @@ -461,11 +1098,19 @@ export const Modality$outboundSchema: z.ZodType = openEnums /** @internal */ export type ChatGenerationParams$Outbound = { - provider?: Schema0$Outbound | null | undefined; - plugins?: Array | undefined; - route?: string | null | undefined; + provider?: ChatGenerationParamsProvider$Outbound | null | undefined; + plugins?: + | Array< + | ChatGenerationParamsPluginAutoRouter$Outbound + | ChatGenerationParamsPluginModeration$Outbound + | ChatGenerationParamsPluginWeb$Outbound + | ChatGenerationParamsPluginFileParser$Outbound + | ChatGenerationParamsPluginResponseHealing$Outbound + > + | undefined; user?: string | undefined; session_id?: string | undefined; + trace?: ChatGenerationParamsTrace$Outbound | undefined; messages: Array; model?: string | undefined; models?: Array | undefined; @@ -483,18 +1128,21 @@ export type ChatGenerationParams$Outbound = { | ResponseFormatJSONObject$Outbound | ResponseFormatJSONSchema$Outbound | ResponseFormatTextGrammar$Outbound - | ResponseFormatPython$Outbound + | ResponseFormatTextPython$Outbound | undefined; seed?: number | null | undefined; - stop?: string | Array | null | undefined; + stop?: string | Array | any | null | undefined; stream: boolean; stream_options?: ChatStreamOptions$Outbound | null | undefined; - temperature?: number | null | undefined; - tool_choice?: any | undefined; + temperature: number | null; + parallel_tool_calls?: boolean | null | undefined; + tool_choice?: ToolChoiceOption$Outbound | undefined; tools?: Array | undefined; - top_p?: number | null | undefined; - debug?: Debug$Outbound | undefined; - image_config?: { [k: string]: string | number | Array } | undefined; + top_p: number | null; + debug?: DebugOptions$Outbound | undefined; + image_config?: + | { [k: string]: string | number | Array } + | undefined; modalities?: Array | undefined; }; @@ -503,11 +1151,21 @@ export const ChatGenerationParams$outboundSchema: z.ZodType< ChatGenerationParams$Outbound, ChatGenerationParams > = z.object({ - provider: z.nullable(z.lazy(() => Schema0$outboundSchema)).optional(), - plugins: z.array(Schema17$outboundSchema).optional(), - route: z.nullable(Route$outboundSchema).optional(), + provider: z.nullable( + z.lazy(() => ChatGenerationParamsProvider$outboundSchema), + ).optional(), + plugins: z.array( + z.union([ + z.lazy(() => ChatGenerationParamsPluginAutoRouter$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginModeration$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginWeb$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginFileParser$outboundSchema), + z.lazy(() => ChatGenerationParamsPluginResponseHealing$outboundSchema), + ]), + ).optional(), user: z.string().optional(), sessionId: z.string().optional(), + trace: z.lazy(() => ChatGenerationParamsTrace$outboundSchema).optional(), messages: z.array(Message$outboundSchema), model: z.string().optional(), models: z.array(z.string()).optional(), @@ -521,24 +1179,26 @@ export const ChatGenerationParams$outboundSchema: z.ZodType< presencePenalty: z.nullable(z.number()).optional(), reasoning: z.lazy(() => Reasoning$outboundSchema).optional(), responseFormat: z.union([ - z.lazy(() => ResponseFormatText$outboundSchema), - z.lazy(() => ResponseFormatJSONObject$outboundSchema), + ResponseFormatText$outboundSchema, + ResponseFormatJSONObject$outboundSchema, ResponseFormatJSONSchema$outboundSchema, ResponseFormatTextGrammar$outboundSchema, - z.lazy(() => ResponseFormatPython$outboundSchema), + ResponseFormatTextPython$outboundSchema, ]).optional(), seed: z.nullable(z.int()).optional(), - stop: z.nullable(z.union([z.string(), z.array(z.string())])).optional(), + stop: z.nullable(z.union([z.string(), z.array(z.string()), z.any()])) + .optional(), stream: z.boolean().default(false), streamOptions: z.nullable(ChatStreamOptions$outboundSchema).optional(), - temperature: z.nullable(z.number()).optional(), - toolChoice: z.any().optional(), + temperature: z.nullable(z.number().default(1)), + parallelToolCalls: z.nullable(z.boolean()).optional(), + toolChoice: ToolChoiceOption$outboundSchema.optional(), tools: z.array(ToolDefinitionJson$outboundSchema).optional(), - topP: z.nullable(z.number()).optional(), - debug: z.lazy(() => Debug$outboundSchema).optional(), + topP: z.nullable(z.number().default(1)), + debug: DebugOptions$outboundSchema.optional(), imageConfig: z.record( z.string(), - z.union([z.string(), z.number(), z.array(z.any())]), + z.union([z.string(), z.number(), z.array(z.nullable(z.any()))]), ).optional(), modalities: z.array(Modality$outboundSchema).optional(), }).transform((v) => { @@ -552,6 +1212,7 @@ export const ChatGenerationParams$outboundSchema: z.ZodType< presencePenalty: "presence_penalty", responseFormat: "response_format", streamOptions: "stream_options", + parallelToolCalls: "parallel_tool_calls", toolChoice: "tool_choice", topP: "top_p", imageConfig: "image_config", diff --git a/src/models/chatgenerationtokenusage.ts b/src/models/chatgenerationtokenusage.ts index a2c30cf8..4509571b 100644 --- a/src/models/chatgenerationtokenusage.ts +++ b/src/models/chatgenerationtokenusage.ts @@ -9,25 +9,73 @@ import { safeParse } from "../lib/schemas.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +/** + * Detailed completion token usage + */ export type CompletionTokensDetails = { + /** + * Tokens used for reasoning + */ reasoningTokens?: number | null | undefined; + /** + * Tokens used for audio output + */ audioTokens?: number | null | undefined; + /** + * Accepted prediction tokens + */ acceptedPredictionTokens?: number | null | undefined; + /** + * Rejected prediction tokens + */ rejectedPredictionTokens?: number | null | undefined; }; +/** + * Detailed prompt token usage + */ export type PromptTokensDetails = { + /** + * Cached prompt tokens + */ cachedTokens?: number | undefined; + /** + * Tokens written to cache. Only returned for models with explicit caching and cache write pricing. + */ cacheWriteTokens?: number | undefined; + /** + * Audio input tokens + */ audioTokens?: number | undefined; + /** + * Video input tokens + */ videoTokens?: number | undefined; }; +/** + * Token usage statistics + */ export type ChatGenerationTokenUsage = { + /** + * Number of tokens in the completion + */ completionTokens: number; + /** + * Number of tokens in the prompt + */ promptTokens: number; + /** + * Total number of tokens + */ totalTokens: number; + /** + * Detailed completion token usage + */ completionTokensDetails?: CompletionTokensDetails | null | undefined; + /** + * Detailed prompt token usage + */ promptTokensDetails?: PromptTokensDetails | null | undefined; }; diff --git a/src/models/chatmessagecontentitem.ts b/src/models/chatmessagecontentitem.ts index f1548c67..096ae163 100644 --- a/src/models/chatmessagecontentitem.ts +++ b/src/models/chatmessagecontentitem.ts @@ -30,14 +30,66 @@ import { ChatMessageContentItemVideo$Outbound, ChatMessageContentItemVideo$outboundSchema, } from "./chatmessagecontentitemvideo.js"; +import { + ChatMessageContentItemVideoLegacy, + ChatMessageContentItemVideoLegacy$inboundSchema, + ChatMessageContentItemVideoLegacy$Outbound, + ChatMessageContentItemVideoLegacy$outboundSchema, +} from "./chatmessagecontentitemvideolegacy.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export type ChatMessageContentItem1 = + | ChatMessageContentItemVideoLegacy + | ChatMessageContentItemVideo; + +/** + * Content part for chat completion messages + */ export type ChatMessageContentItem = | ChatMessageContentItemText | ChatMessageContentItemImage | ChatMessageContentItemAudio - | (ChatMessageContentItemVideo & { type: "input_video" }) - | (ChatMessageContentItemVideo & { type: "video_url" }); + | ChatMessageContentItemVideoLegacy + | ChatMessageContentItemVideo; + +/** @internal */ +export const ChatMessageContentItem1$inboundSchema: z.ZodType< + ChatMessageContentItem1, + unknown +> = z.union([ + ChatMessageContentItemVideoLegacy$inboundSchema, + ChatMessageContentItemVideo$inboundSchema, +]); +/** @internal */ +export type ChatMessageContentItem1$Outbound = + | ChatMessageContentItemVideoLegacy$Outbound + | ChatMessageContentItemVideo$Outbound; + +/** @internal */ +export const ChatMessageContentItem1$outboundSchema: z.ZodType< + ChatMessageContentItem1$Outbound, + ChatMessageContentItem1 +> = z.union([ + ChatMessageContentItemVideoLegacy$outboundSchema, + ChatMessageContentItemVideo$outboundSchema, +]); + +export function chatMessageContentItem1ToJSON( + chatMessageContentItem1: ChatMessageContentItem1, +): string { + return JSON.stringify( + ChatMessageContentItem1$outboundSchema.parse(chatMessageContentItem1), + ); +} +export function chatMessageContentItem1FromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ChatMessageContentItem1$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ChatMessageContentItem1' from JSON`, + ); +} /** @internal */ export const ChatMessageContentItem$inboundSchema: z.ZodType< @@ -47,20 +99,18 @@ export const ChatMessageContentItem$inboundSchema: z.ZodType< ChatMessageContentItemText$inboundSchema, ChatMessageContentItemImage$inboundSchema, ChatMessageContentItemAudio$inboundSchema, - ChatMessageContentItemVideo$inboundSchema.and( - z.object({ type: z.literal("input_video") }), - ), - z.lazy(() => ChatMessageContentItemVideo$inboundSchema).and( - z.object({ type: z.literal("video_url") }), - ), + z.union([ + ChatMessageContentItemVideoLegacy$inboundSchema, + ChatMessageContentItemVideo$inboundSchema, + ]), ]); /** @internal */ export type ChatMessageContentItem$Outbound = | ChatMessageContentItemText$Outbound | ChatMessageContentItemImage$Outbound | ChatMessageContentItemAudio$Outbound - | (ChatMessageContentItemVideo$Outbound & { type: "input_video" }) - | (ChatMessageContentItemVideo$Outbound & { type: "video_url" }); + | ChatMessageContentItemVideoLegacy$Outbound + | ChatMessageContentItemVideo$Outbound; /** @internal */ export const ChatMessageContentItem$outboundSchema: z.ZodType< @@ -70,12 +120,10 @@ export const ChatMessageContentItem$outboundSchema: z.ZodType< ChatMessageContentItemText$outboundSchema, ChatMessageContentItemImage$outboundSchema, ChatMessageContentItemAudio$outboundSchema, - ChatMessageContentItemVideo$outboundSchema.and( - z.object({ type: z.literal("input_video") }), - ), - z.lazy(() => ChatMessageContentItemVideo$outboundSchema).and( - z.object({ type: z.literal("video_url") }), - ), + z.union([ + ChatMessageContentItemVideoLegacy$outboundSchema, + ChatMessageContentItemVideo$outboundSchema, + ]), ]); export function chatMessageContentItemToJSON( diff --git a/src/models/chatmessagecontentitemaudio.ts b/src/models/chatmessagecontentitemaudio.ts index afcdc4c1..a4694768 100644 --- a/src/models/chatmessagecontentitemaudio.ts +++ b/src/models/chatmessagecontentitemaudio.ts @@ -6,19 +6,45 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export const ChatMessageContentItemAudioType = { + InputAudio: "input_audio", +} as const; +export type ChatMessageContentItemAudioType = ClosedEnum< + typeof ChatMessageContentItemAudioType +>; + export type ChatMessageContentItemAudioInputAudio = { + /** + * Base64 encoded audio data + */ data: string; + /** + * Audio format (e.g., wav, mp3, flac, m4a, ogg, aiff, aac, pcm16, pcm24). Supported formats vary by provider. + */ format: string; }; +/** + * Audio input content part. Supported audio formats vary by provider. + */ export type ChatMessageContentItemAudio = { - type: "input_audio"; + type: ChatMessageContentItemAudioType; inputAudio: ChatMessageContentItemAudioInputAudio; }; +/** @internal */ +export const ChatMessageContentItemAudioType$inboundSchema: z.ZodEnum< + typeof ChatMessageContentItemAudioType +> = z.enum(ChatMessageContentItemAudioType); +/** @internal */ +export const ChatMessageContentItemAudioType$outboundSchema: z.ZodEnum< + typeof ChatMessageContentItemAudioType +> = ChatMessageContentItemAudioType$inboundSchema; + /** @internal */ export const ChatMessageContentItemAudioInputAudio$inboundSchema: z.ZodType< ChatMessageContentItemAudioInputAudio, @@ -67,7 +93,7 @@ export const ChatMessageContentItemAudio$inboundSchema: z.ZodType< ChatMessageContentItemAudio, unknown > = z.object({ - type: z.literal("input_audio"), + type: ChatMessageContentItemAudioType$inboundSchema, input_audio: z.lazy(() => ChatMessageContentItemAudioInputAudio$inboundSchema ), @@ -78,7 +104,7 @@ export const ChatMessageContentItemAudio$inboundSchema: z.ZodType< }); /** @internal */ export type ChatMessageContentItemAudio$Outbound = { - type: "input_audio"; + type: string; input_audio: ChatMessageContentItemAudioInputAudio$Outbound; }; @@ -87,7 +113,7 @@ export const ChatMessageContentItemAudio$outboundSchema: z.ZodType< ChatMessageContentItemAudio$Outbound, ChatMessageContentItemAudio > = z.object({ - type: z.literal("input_audio"), + type: ChatMessageContentItemAudioType$outboundSchema, inputAudio: z.lazy(() => ChatMessageContentItemAudioInputAudio$outboundSchema ), diff --git a/src/models/chatmessagecontentitemcachecontrol.ts b/src/models/chatmessagecontentitemcachecontrol.ts index c368f953..e92d5063 100644 --- a/src/models/chatmessagecontentitemcachecontrol.ts +++ b/src/models/chatmessagecontentitemcachecontrol.ts @@ -6,21 +6,40 @@ import * as z from "zod/v4"; import { safeParse } from "../lib/schemas.js"; import * as openEnums from "../types/enums.js"; -import { OpenEnum } from "../types/enums.js"; +import { ClosedEnum, OpenEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export const ChatMessageContentItemCacheControlType = { + Ephemeral: "ephemeral", +} as const; +export type ChatMessageContentItemCacheControlType = ClosedEnum< + typeof ChatMessageContentItemCacheControlType +>; + export const Ttl = { Fivem: "5m", Oneh: "1h", } as const; export type Ttl = OpenEnum; +/** + * Cache control for the content part + */ export type ChatMessageContentItemCacheControl = { - type: "ephemeral"; + type: ChatMessageContentItemCacheControlType; ttl?: Ttl | undefined; }; +/** @internal */ +export const ChatMessageContentItemCacheControlType$inboundSchema: z.ZodEnum< + typeof ChatMessageContentItemCacheControlType +> = z.enum(ChatMessageContentItemCacheControlType); +/** @internal */ +export const ChatMessageContentItemCacheControlType$outboundSchema: z.ZodEnum< + typeof ChatMessageContentItemCacheControlType +> = ChatMessageContentItemCacheControlType$inboundSchema; + /** @internal */ export const Ttl$inboundSchema: z.ZodType = openEnums .inboundSchema(Ttl); @@ -33,12 +52,12 @@ export const ChatMessageContentItemCacheControl$inboundSchema: z.ZodType< ChatMessageContentItemCacheControl, unknown > = z.object({ - type: z.literal("ephemeral"), + type: ChatMessageContentItemCacheControlType$inboundSchema, ttl: Ttl$inboundSchema.optional(), }); /** @internal */ export type ChatMessageContentItemCacheControl$Outbound = { - type: "ephemeral"; + type: string; ttl?: string | undefined; }; @@ -47,7 +66,7 @@ export const ChatMessageContentItemCacheControl$outboundSchema: z.ZodType< ChatMessageContentItemCacheControl$Outbound, ChatMessageContentItemCacheControl > = z.object({ - type: z.literal("ephemeral"), + type: ChatMessageContentItemCacheControlType$outboundSchema, ttl: Ttl$outboundSchema.optional(), }); diff --git a/src/models/chatmessagecontentitemimage.ts b/src/models/chatmessagecontentitemimage.ts index f5d965e2..e9f3cc51 100644 --- a/src/models/chatmessagecontentitemimage.ts +++ b/src/models/chatmessagecontentitemimage.ts @@ -7,29 +7,60 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; import * as openEnums from "../types/enums.js"; -import { OpenEnum } from "../types/enums.js"; +import { ClosedEnum, OpenEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export const ChatMessageContentItemImageType = { + ImageUrl: "image_url", +} as const; +export type ChatMessageContentItemImageType = ClosedEnum< + typeof ChatMessageContentItemImageType +>; + +/** + * Image detail level for vision models + */ export const ChatMessageContentItemImageDetail = { Auto: "auto", Low: "low", High: "high", } as const; +/** + * Image detail level for vision models + */ export type ChatMessageContentItemImageDetail = OpenEnum< typeof ChatMessageContentItemImageDetail >; export type ChatMessageContentItemImageImageUrl = { + /** + * URL of the image (data: URLs supported) + */ url: string; + /** + * Image detail level for vision models + */ detail?: ChatMessageContentItemImageDetail | undefined; }; +/** + * Image content part for vision models + */ export type ChatMessageContentItemImage = { - type: "image_url"; + type: ChatMessageContentItemImageType; imageUrl: ChatMessageContentItemImageImageUrl; }; +/** @internal */ +export const ChatMessageContentItemImageType$inboundSchema: z.ZodEnum< + typeof ChatMessageContentItemImageType +> = z.enum(ChatMessageContentItemImageType); +/** @internal */ +export const ChatMessageContentItemImageType$outboundSchema: z.ZodEnum< + typeof ChatMessageContentItemImageType +> = ChatMessageContentItemImageType$inboundSchema; + /** @internal */ export const ChatMessageContentItemImageDetail$inboundSchema: z.ZodType< ChatMessageContentItemImageDetail, @@ -89,7 +120,7 @@ export const ChatMessageContentItemImage$inboundSchema: z.ZodType< ChatMessageContentItemImage, unknown > = z.object({ - type: z.literal("image_url"), + type: ChatMessageContentItemImageType$inboundSchema, image_url: z.lazy(() => ChatMessageContentItemImageImageUrl$inboundSchema), }).transform((v) => { return remap$(v, { @@ -98,7 +129,7 @@ export const ChatMessageContentItemImage$inboundSchema: z.ZodType< }); /** @internal */ export type ChatMessageContentItemImage$Outbound = { - type: "image_url"; + type: string; image_url: ChatMessageContentItemImageImageUrl$Outbound; }; @@ -107,7 +138,7 @@ export const ChatMessageContentItemImage$outboundSchema: z.ZodType< ChatMessageContentItemImage$Outbound, ChatMessageContentItemImage > = z.object({ - type: z.literal("image_url"), + type: ChatMessageContentItemImageType$outboundSchema, imageUrl: z.lazy(() => ChatMessageContentItemImageImageUrl$outboundSchema), }).transform((v) => { return remap$(v, { diff --git a/src/models/chatmessagecontentitemtext.ts b/src/models/chatmessagecontentitemtext.ts index aa527d53..c5dd160c 100644 --- a/src/models/chatmessagecontentitemtext.ts +++ b/src/models/chatmessagecontentitemtext.ts @@ -6,6 +6,7 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { ChatMessageContentItemCacheControl, @@ -15,18 +16,40 @@ import { } from "./chatmessagecontentitemcachecontrol.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export const ChatMessageContentItemTextType = { + Text: "text", +} as const; +export type ChatMessageContentItemTextType = ClosedEnum< + typeof ChatMessageContentItemTextType +>; + +/** + * Text content part + */ export type ChatMessageContentItemText = { - type: "text"; + type: ChatMessageContentItemTextType; text: string; + /** + * Cache control for the content part + */ cacheControl?: ChatMessageContentItemCacheControl | undefined; }; +/** @internal */ +export const ChatMessageContentItemTextType$inboundSchema: z.ZodEnum< + typeof ChatMessageContentItemTextType +> = z.enum(ChatMessageContentItemTextType); +/** @internal */ +export const ChatMessageContentItemTextType$outboundSchema: z.ZodEnum< + typeof ChatMessageContentItemTextType +> = ChatMessageContentItemTextType$inboundSchema; + /** @internal */ export const ChatMessageContentItemText$inboundSchema: z.ZodType< ChatMessageContentItemText, unknown > = z.object({ - type: z.literal("text"), + type: ChatMessageContentItemTextType$inboundSchema, text: z.string(), cache_control: ChatMessageContentItemCacheControl$inboundSchema.optional(), }).transform((v) => { @@ -36,7 +59,7 @@ export const ChatMessageContentItemText$inboundSchema: z.ZodType< }); /** @internal */ export type ChatMessageContentItemText$Outbound = { - type: "text"; + type: string; text: string; cache_control?: ChatMessageContentItemCacheControl$Outbound | undefined; }; @@ -46,7 +69,7 @@ export const ChatMessageContentItemText$outboundSchema: z.ZodType< ChatMessageContentItemText$Outbound, ChatMessageContentItemText > = z.object({ - type: z.literal("text"), + type: ChatMessageContentItemTextType$outboundSchema, text: z.string(), cacheControl: ChatMessageContentItemCacheControl$outboundSchema.optional(), }).transform((v) => { diff --git a/src/models/chatmessagecontentitemvideo.ts b/src/models/chatmessagecontentitemvideo.ts index 9847a917..e3f56e04 100644 --- a/src/models/chatmessagecontentitemvideo.ts +++ b/src/models/chatmessagecontentitemvideo.ts @@ -8,213 +8,55 @@ import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; - -export type VideoUrl2 = { - url: string; -}; - -export type ChatMessageContentItemVideoVideoURL = { +import { + VideoInput, + VideoInput$inboundSchema, + VideoInput$Outbound, + VideoInput$outboundSchema, +} from "./videoinput.js"; + +/** + * Video input content part + */ +export type ChatMessageContentItemVideo = { type: "video_url"; - videoUrl: VideoUrl2; + /** + * Video input object + */ + videoUrl: VideoInput; }; -export type VideoUrl1 = { - url: string; -}; - -export type ChatMessageContentItemVideoInputVideo = { - type: "input_video"; - videoUrl: VideoUrl1; -}; - -export type ChatMessageContentItemVideo = - | ChatMessageContentItemVideoInputVideo - | ChatMessageContentItemVideoVideoURL; - -/** @internal */ -export const VideoUrl2$inboundSchema: z.ZodType = z.object({ - url: z.string(), -}); -/** @internal */ -export type VideoUrl2$Outbound = { - url: string; -}; - -/** @internal */ -export const VideoUrl2$outboundSchema: z.ZodType< - VideoUrl2$Outbound, - VideoUrl2 -> = z.object({ - url: z.string(), -}); - -export function videoUrl2ToJSON(videoUrl2: VideoUrl2): string { - return JSON.stringify(VideoUrl2$outboundSchema.parse(videoUrl2)); -} -export function videoUrl2FromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => VideoUrl2$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'VideoUrl2' from JSON`, - ); -} - /** @internal */ -export const ChatMessageContentItemVideoVideoURL$inboundSchema: z.ZodType< - ChatMessageContentItemVideoVideoURL, +export const ChatMessageContentItemVideo$inboundSchema: z.ZodType< + ChatMessageContentItemVideo, unknown > = z.object({ type: z.literal("video_url"), - video_url: z.lazy(() => VideoUrl2$inboundSchema), + video_url: VideoInput$inboundSchema, }).transform((v) => { return remap$(v, { "video_url": "videoUrl", }); }); /** @internal */ -export type ChatMessageContentItemVideoVideoURL$Outbound = { +export type ChatMessageContentItemVideo$Outbound = { type: "video_url"; - video_url: VideoUrl2$Outbound; + video_url: VideoInput$Outbound; }; /** @internal */ -export const ChatMessageContentItemVideoVideoURL$outboundSchema: z.ZodType< - ChatMessageContentItemVideoVideoURL$Outbound, - ChatMessageContentItemVideoVideoURL +export const ChatMessageContentItemVideo$outboundSchema: z.ZodType< + ChatMessageContentItemVideo$Outbound, + ChatMessageContentItemVideo > = z.object({ type: z.literal("video_url"), - videoUrl: z.lazy(() => VideoUrl2$outboundSchema), -}).transform((v) => { - return remap$(v, { - videoUrl: "video_url", - }); -}); - -export function chatMessageContentItemVideoVideoURLToJSON( - chatMessageContentItemVideoVideoURL: ChatMessageContentItemVideoVideoURL, -): string { - return JSON.stringify( - ChatMessageContentItemVideoVideoURL$outboundSchema.parse( - chatMessageContentItemVideoVideoURL, - ), - ); -} -export function chatMessageContentItemVideoVideoURLFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => - ChatMessageContentItemVideoVideoURL$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ChatMessageContentItemVideoVideoURL' from JSON`, - ); -} - -/** @internal */ -export const VideoUrl1$inboundSchema: z.ZodType = z.object({ - url: z.string(), -}); -/** @internal */ -export type VideoUrl1$Outbound = { - url: string; -}; - -/** @internal */ -export const VideoUrl1$outboundSchema: z.ZodType< - VideoUrl1$Outbound, - VideoUrl1 -> = z.object({ - url: z.string(), -}); - -export function videoUrl1ToJSON(videoUrl1: VideoUrl1): string { - return JSON.stringify(VideoUrl1$outboundSchema.parse(videoUrl1)); -} -export function videoUrl1FromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => VideoUrl1$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'VideoUrl1' from JSON`, - ); -} - -/** @internal */ -export const ChatMessageContentItemVideoInputVideo$inboundSchema: z.ZodType< - ChatMessageContentItemVideoInputVideo, - unknown -> = z.object({ - type: z.literal("input_video"), - video_url: z.lazy(() => VideoUrl1$inboundSchema), -}).transform((v) => { - return remap$(v, { - "video_url": "videoUrl", - }); -}); -/** @internal */ -export type ChatMessageContentItemVideoInputVideo$Outbound = { - type: "input_video"; - video_url: VideoUrl1$Outbound; -}; - -/** @internal */ -export const ChatMessageContentItemVideoInputVideo$outboundSchema: z.ZodType< - ChatMessageContentItemVideoInputVideo$Outbound, - ChatMessageContentItemVideoInputVideo -> = z.object({ - type: z.literal("input_video"), - videoUrl: z.lazy(() => VideoUrl1$outboundSchema), + videoUrl: VideoInput$outboundSchema, }).transform((v) => { return remap$(v, { videoUrl: "video_url", }); }); -export function chatMessageContentItemVideoInputVideoToJSON( - chatMessageContentItemVideoInputVideo: ChatMessageContentItemVideoInputVideo, -): string { - return JSON.stringify( - ChatMessageContentItemVideoInputVideo$outboundSchema.parse( - chatMessageContentItemVideoInputVideo, - ), - ); -} -export function chatMessageContentItemVideoInputVideoFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => - ChatMessageContentItemVideoInputVideo$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ChatMessageContentItemVideoInputVideo' from JSON`, - ); -} - -/** @internal */ -export const ChatMessageContentItemVideo$inboundSchema: z.ZodType< - ChatMessageContentItemVideo, - unknown -> = z.union([ - z.lazy(() => ChatMessageContentItemVideoInputVideo$inboundSchema), - z.lazy(() => ChatMessageContentItemVideoVideoURL$inboundSchema), -]); -/** @internal */ -export type ChatMessageContentItemVideo$Outbound = - | ChatMessageContentItemVideoInputVideo$Outbound - | ChatMessageContentItemVideoVideoURL$Outbound; - -/** @internal */ -export const ChatMessageContentItemVideo$outboundSchema: z.ZodType< - ChatMessageContentItemVideo$Outbound, - ChatMessageContentItemVideo -> = z.union([ - z.lazy(() => ChatMessageContentItemVideoInputVideo$outboundSchema), - z.lazy(() => ChatMessageContentItemVideoVideoURL$outboundSchema), -]); - export function chatMessageContentItemVideoToJSON( chatMessageContentItemVideo: ChatMessageContentItemVideo, ): string { diff --git a/src/models/chatmessagecontentitemvideolegacy.ts b/src/models/chatmessagecontentitemvideolegacy.ts new file mode 100644 index 00000000..59253ba5 --- /dev/null +++ b/src/models/chatmessagecontentitemvideolegacy.ts @@ -0,0 +1,79 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 785ee29d8430 + */ + +import * as z from "zod/v4"; +import { remap as remap$ } from "../lib/primitives.js"; +import { safeParse } from "../lib/schemas.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +import { + VideoInput, + VideoInput$inboundSchema, + VideoInput$Outbound, + VideoInput$outboundSchema, +} from "./videoinput.js"; + +/** + * Video input content part (legacy format - deprecated) + * + * @deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. + */ +export type ChatMessageContentItemVideoLegacy = { + type: "input_video"; + /** + * Video input object + */ + videoUrl: VideoInput; +}; + +/** @internal */ +export const ChatMessageContentItemVideoLegacy$inboundSchema: z.ZodType< + ChatMessageContentItemVideoLegacy, + unknown +> = z.object({ + type: z.literal("input_video"), + video_url: VideoInput$inboundSchema, +}).transform((v) => { + return remap$(v, { + "video_url": "videoUrl", + }); +}); +/** @internal */ +export type ChatMessageContentItemVideoLegacy$Outbound = { + type: "input_video"; + video_url: VideoInput$Outbound; +}; + +/** @internal */ +export const ChatMessageContentItemVideoLegacy$outboundSchema: z.ZodType< + ChatMessageContentItemVideoLegacy$Outbound, + ChatMessageContentItemVideoLegacy +> = z.object({ + type: z.literal("input_video"), + videoUrl: VideoInput$outboundSchema, +}).transform((v) => { + return remap$(v, { + videoUrl: "video_url", + }); +}); + +export function chatMessageContentItemVideoLegacyToJSON( + chatMessageContentItemVideoLegacy: ChatMessageContentItemVideoLegacy, +): string { + return JSON.stringify( + ChatMessageContentItemVideoLegacy$outboundSchema.parse( + chatMessageContentItemVideoLegacy, + ), + ); +} +export function chatMessageContentItemVideoLegacyFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ChatMessageContentItemVideoLegacy$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ChatMessageContentItemVideoLegacy' from JSON`, + ); +} diff --git a/src/models/chatmessagetokenlogprob.ts b/src/models/chatmessagetokenlogprob.ts index f1d1922a..a07ea564 100644 --- a/src/models/chatmessagetokenlogprob.ts +++ b/src/models/chatmessagetokenlogprob.ts @@ -15,10 +15,25 @@ export type ChatMessageTokenLogprobTopLogprob = { bytes: Array | null; }; +/** + * Token log probability information + */ export type ChatMessageTokenLogprob = { + /** + * The token + */ token: string; + /** + * Log probability of the token + */ logprob: number; + /** + * UTF-8 bytes of the token + */ bytes: Array | null; + /** + * Top alternative tokens with probabilities + */ topLogprobs: Array; }; diff --git a/src/models/chatmessagetokenlogprobs.ts b/src/models/chatmessagetokenlogprobs.ts index d898f22a..96f62bae 100644 --- a/src/models/chatmessagetokenlogprobs.ts +++ b/src/models/chatmessagetokenlogprobs.ts @@ -12,8 +12,17 @@ import { } from "./chatmessagetokenlogprob.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +/** + * Log probabilities for the completion + */ export type ChatMessageTokenLogprobs = { + /** + * Log probabilities for content tokens + */ content: Array | null; + /** + * Log probabilities for refusal tokens + */ refusal: Array | null; }; diff --git a/src/models/chatmessagetoolcall.ts b/src/models/chatmessagetoolcall.ts index c6678412..baf5c348 100644 --- a/src/models/chatmessagetoolcall.ts +++ b/src/models/chatmessagetoolcall.ts @@ -5,20 +5,49 @@ import * as z from "zod/v4"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export const ChatMessageToolCallType = { + Function: "function", +} as const; +export type ChatMessageToolCallType = ClosedEnum< + typeof ChatMessageToolCallType +>; + export type ChatMessageToolCallFunction = { + /** + * Function name to call + */ name: string; + /** + * Function arguments as JSON string + */ arguments: string; }; +/** + * Tool call made by the assistant + */ export type ChatMessageToolCall = { + /** + * Tool call identifier + */ id: string; - type: "function"; + type: ChatMessageToolCallType; function: ChatMessageToolCallFunction; }; +/** @internal */ +export const ChatMessageToolCallType$inboundSchema: z.ZodEnum< + typeof ChatMessageToolCallType +> = z.enum(ChatMessageToolCallType); +/** @internal */ +export const ChatMessageToolCallType$outboundSchema: z.ZodEnum< + typeof ChatMessageToolCallType +> = ChatMessageToolCallType$inboundSchema; + /** @internal */ export const ChatMessageToolCallFunction$inboundSchema: z.ZodType< ChatMessageToolCallFunction, @@ -67,13 +96,13 @@ export const ChatMessageToolCall$inboundSchema: z.ZodType< unknown > = z.object({ id: z.string(), - type: z.literal("function"), + type: ChatMessageToolCallType$inboundSchema, function: z.lazy(() => ChatMessageToolCallFunction$inboundSchema), }); /** @internal */ export type ChatMessageToolCall$Outbound = { id: string; - type: "function"; + type: string; function: ChatMessageToolCallFunction$Outbound; }; @@ -83,7 +112,7 @@ export const ChatMessageToolCall$outboundSchema: z.ZodType< ChatMessageToolCall > = z.object({ id: z.string(), - type: z.literal("function"), + type: ChatMessageToolCallType$outboundSchema, function: z.lazy(() => ChatMessageToolCallFunction$outboundSchema), }); diff --git a/src/models/chatresponse.ts b/src/models/chatresponse.ts index e1e9ede1..e53973ba 100644 --- a/src/models/chatresponse.ts +++ b/src/models/chatresponse.ts @@ -6,6 +6,7 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { ChatGenerationTokenUsage, @@ -17,16 +18,47 @@ import { } from "./chatresponsechoice.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +export const ChatResponseObject = { + ChatCompletion: "chat.completion", +} as const; +export type ChatResponseObject = ClosedEnum; + +/** + * Chat completion response + */ export type ChatResponse = { + /** + * Unique completion identifier + */ id: string; + /** + * List of completion choices + */ choices: Array; + /** + * Unix timestamp of creation + */ created: number; + /** + * Model used for completion + */ model: string; - object: "chat.completion"; + object: ChatResponseObject; + /** + * System fingerprint + */ systemFingerprint?: string | null | undefined; + /** + * Token usage statistics + */ usage?: ChatGenerationTokenUsage | undefined; }; +/** @internal */ +export const ChatResponseObject$inboundSchema: z.ZodEnum< + typeof ChatResponseObject +> = z.enum(ChatResponseObject); + /** @internal */ export const ChatResponse$inboundSchema: z.ZodType = z .object({ @@ -34,7 +66,7 @@ export const ChatResponse$inboundSchema: z.ZodType = z choices: z.array(ChatResponseChoice$inboundSchema), created: z.number(), model: z.string(), - object: z.literal("chat.completion"), + object: ChatResponseObject$inboundSchema, system_fingerprint: z.nullable(z.string()).optional(), usage: ChatGenerationTokenUsage$inboundSchema.optional(), }).transform((v) => { diff --git a/src/models/chatresponsechoice.ts b/src/models/chatresponsechoice.ts index 1d46bed8..67d1a419 100644 --- a/src/models/chatresponsechoice.ts +++ b/src/models/chatresponsechoice.ts @@ -11,20 +11,28 @@ import { AssistantMessage, AssistantMessage$inboundSchema, } from "./assistantmessage.js"; -import { - ChatCompletionFinishReason, - ChatCompletionFinishReason$inboundSchema, -} from "./chatcompletionfinishreason.js"; import { ChatMessageTokenLogprobs, ChatMessageTokenLogprobs$inboundSchema, } from "./chatmessagetokenlogprobs.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +/** + * Chat completion choice + */ export type ChatResponseChoice = { - finishReason: ChatCompletionFinishReason | null; + finishReason?: any | null | undefined; + /** + * Choice index + */ index: number; + /** + * Assistant message for requests and responses + */ message: AssistantMessage; + /** + * Log probabilities for the completion + */ logprobs?: ChatMessageTokenLogprobs | null | undefined; }; @@ -33,7 +41,7 @@ export const ChatResponseChoice$inboundSchema: z.ZodType< ChatResponseChoice, unknown > = z.object({ - finish_reason: z.nullable(ChatCompletionFinishReason$inboundSchema), + finish_reason: z.nullable(z.any()).optional(), index: z.number(), message: AssistantMessage$inboundSchema, logprobs: z.nullable(ChatMessageTokenLogprobs$inboundSchema).optional(), diff --git a/src/models/chatstreamingchoice.ts b/src/models/chatstreamingchoice.ts index 43fa1d4c..376c6663 100644 --- a/src/models/chatstreamingchoice.ts +++ b/src/models/chatstreamingchoice.ts @@ -7,10 +7,6 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; import { Result as SafeParseResult } from "../types/fp.js"; -import { - ChatCompletionFinishReason, - ChatCompletionFinishReason$inboundSchema, -} from "./chatcompletionfinishreason.js"; import { ChatMessageTokenLogprobs, ChatMessageTokenLogprobs$inboundSchema, @@ -21,10 +17,22 @@ import { } from "./chatstreamingmessagechunk.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +/** + * Streaming completion choice chunk + */ export type ChatStreamingChoice = { + /** + * Delta changes in streaming response + */ delta: ChatStreamingMessageChunk; - finishReason: ChatCompletionFinishReason | null; + finishReason?: any | null | undefined; + /** + * Choice index + */ index: number; + /** + * Log probabilities for the completion + */ logprobs?: ChatMessageTokenLogprobs | null | undefined; }; @@ -34,7 +42,7 @@ export const ChatStreamingChoice$inboundSchema: z.ZodType< unknown > = z.object({ delta: ChatStreamingMessageChunk$inboundSchema, - finish_reason: z.nullable(ChatCompletionFinishReason$inboundSchema), + finish_reason: z.nullable(z.any()).optional(), index: z.number(), logprobs: z.nullable(ChatMessageTokenLogprobs$inboundSchema).optional(), }).transform((v) => { diff --git a/src/models/chatstreamingmessagechunk.ts b/src/models/chatstreamingmessagechunk.ts index fb1171fa..343bf68c 100644 --- a/src/models/chatstreamingmessagechunk.ts +++ b/src/models/chatstreamingmessagechunk.ts @@ -13,22 +13,52 @@ import { ChatStreamingMessageToolCall$inboundSchema, } from "./chatstreamingmessagetoolcall.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; -import { Schema19, Schema19$inboundSchema } from "./schema19.js"; +import { + ReasoningDetailUnion, + ReasoningDetailUnion$inboundSchema, +} from "./reasoningdetailunion.js"; +/** + * The role of the message author + */ export const ChatStreamingMessageChunkRole = { Assistant: "assistant", } as const; +/** + * The role of the message author + */ export type ChatStreamingMessageChunkRole = ClosedEnum< typeof ChatStreamingMessageChunkRole >; +/** + * Delta changes in streaming response + */ export type ChatStreamingMessageChunk = { + /** + * The role of the message author + */ role?: ChatStreamingMessageChunkRole | undefined; + /** + * Message content delta + */ content?: string | null | undefined; + /** + * Reasoning content delta + */ reasoning?: string | null | undefined; + /** + * Refusal message delta + */ refusal?: string | null | undefined; + /** + * Tool calls delta + */ toolCalls?: Array | undefined; - reasoningDetails?: Array | undefined; + /** + * Reasoning details for extended thinking models + */ + reasoningDetails?: Array | undefined; }; /** @internal */ @@ -46,7 +76,7 @@ export const ChatStreamingMessageChunk$inboundSchema: z.ZodType< reasoning: z.nullable(z.string()).optional(), refusal: z.nullable(z.string()).optional(), tool_calls: z.array(ChatStreamingMessageToolCall$inboundSchema).optional(), - reasoning_details: z.array(Schema19$inboundSchema).optional(), + reasoning_details: z.array(ReasoningDetailUnion$inboundSchema).optional(), }).transform((v) => { return remap$(v, { "tool_calls": "toolCalls", diff --git a/src/models/chatstreamingmessagetoolcall.ts b/src/models/chatstreamingmessagetoolcall.ts index fc2c3662..bd4187bb 100644 --- a/src/models/chatstreamingmessagetoolcall.ts +++ b/src/models/chatstreamingmessagetoolcall.ts @@ -5,21 +5,64 @@ import * as z from "zod/v4"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +/** + * Tool call type + */ +export const ChatStreamingMessageToolCallType = { + Function: "function", +} as const; +/** + * Tool call type + */ +export type ChatStreamingMessageToolCallType = ClosedEnum< + typeof ChatStreamingMessageToolCallType +>; + +/** + * Function call details + */ export type ChatStreamingMessageToolCallFunction = { + /** + * Function name + */ name?: string | undefined; + /** + * Function arguments as JSON string + */ arguments?: string | undefined; }; +/** + * Tool call delta for streaming responses + */ export type ChatStreamingMessageToolCall = { + /** + * Tool call index in the array + */ index: number; + /** + * Tool call identifier + */ id?: string | undefined; - type?: "function" | undefined; + /** + * Tool call type + */ + type?: ChatStreamingMessageToolCallType | undefined; + /** + * Function call details + */ function?: ChatStreamingMessageToolCallFunction | undefined; }; +/** @internal */ +export const ChatStreamingMessageToolCallType$inboundSchema: z.ZodEnum< + typeof ChatStreamingMessageToolCallType +> = z.enum(ChatStreamingMessageToolCallType); + /** @internal */ export const ChatStreamingMessageToolCallFunction$inboundSchema: z.ZodType< ChatStreamingMessageToolCallFunction, @@ -47,7 +90,7 @@ export const ChatStreamingMessageToolCall$inboundSchema: z.ZodType< > = z.object({ index: z.number(), id: z.string().optional(), - type: z.literal("function").optional(), + type: ChatStreamingMessageToolCallType$inboundSchema.optional(), function: z.lazy(() => ChatStreamingMessageToolCallFunction$inboundSchema) .optional(), }); diff --git a/src/models/chatstreamingresponsechunk.ts b/src/models/chatstreamingresponsechunk.ts index b3e9a660..9ed8b45e 100644 --- a/src/models/chatstreamingresponsechunk.ts +++ b/src/models/chatstreamingresponsechunk.ts @@ -6,6 +6,7 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; import { safeParse } from "../lib/schemas.js"; +import { ClosedEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { ChatGenerationTokenUsage, @@ -17,57 +18,95 @@ import { } from "./chatstreamingchoice.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; -export type ChatStreamingResponseChunkError = { +export const ChatStreamingResponseChunkObject = { + ChatCompletionChunk: "chat.completion.chunk", +} as const; +export type ChatStreamingResponseChunkObject = ClosedEnum< + typeof ChatStreamingResponseChunkObject +>; + +/** + * Error information + */ +export type ErrorT = { + /** + * Error message + */ message: string; + /** + * Error code + */ code: number; }; -export type ChatStreamingResponseChunkData = { +/** + * Streaming chat completion chunk + */ +export type ChatStreamingResponseChunk = { + /** + * Unique chunk identifier + */ id: string; + /** + * List of streaming chunk choices + */ choices: Array; + /** + * Unix timestamp of creation + */ created: number; + /** + * Model used for completion + */ model: string; - object: "chat.completion.chunk"; + object: ChatStreamingResponseChunkObject; + /** + * System fingerprint + */ systemFingerprint?: string | null | undefined; - error?: ChatStreamingResponseChunkError | undefined; + /** + * Error information + */ + error?: ErrorT | undefined; + /** + * Token usage statistics + */ usage?: ChatGenerationTokenUsage | undefined; }; -export type ChatStreamingResponseChunk = { - data: ChatStreamingResponseChunkData; -}; +/** @internal */ +export const ChatStreamingResponseChunkObject$inboundSchema: z.ZodEnum< + typeof ChatStreamingResponseChunkObject +> = z.enum(ChatStreamingResponseChunkObject); /** @internal */ -export const ChatStreamingResponseChunkError$inboundSchema: z.ZodType< - ChatStreamingResponseChunkError, - unknown -> = z.object({ +export const ErrorT$inboundSchema: z.ZodType = z.object({ message: z.string(), code: z.number(), }); -export function chatStreamingResponseChunkErrorFromJSON( +export function errorFromJSON( jsonString: string, -): SafeParseResult { +): SafeParseResult { return safeParse( jsonString, - (x) => ChatStreamingResponseChunkError$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ChatStreamingResponseChunkError' from JSON`, + (x) => ErrorT$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ErrorT' from JSON`, ); } /** @internal */ -export const ChatStreamingResponseChunkData$inboundSchema: z.ZodType< - ChatStreamingResponseChunkData, +export const ChatStreamingResponseChunk$inboundSchema: z.ZodType< + ChatStreamingResponseChunk, unknown > = z.object({ id: z.string(), choices: z.array(ChatStreamingChoice$inboundSchema), created: z.number(), model: z.string(), - object: z.literal("chat.completion.chunk"), + object: ChatStreamingResponseChunkObject$inboundSchema, system_fingerprint: z.nullable(z.string()).optional(), - error: z.lazy(() => ChatStreamingResponseChunkError$inboundSchema).optional(), + error: z.lazy(() => ErrorT$inboundSchema).optional(), usage: ChatGenerationTokenUsage$inboundSchema.optional(), }).transform((v) => { return remap$(v, { @@ -75,35 +114,6 @@ export const ChatStreamingResponseChunkData$inboundSchema: z.ZodType< }); }); -export function chatStreamingResponseChunkDataFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => ChatStreamingResponseChunkData$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ChatStreamingResponseChunkData' from JSON`, - ); -} - -/** @internal */ -export const ChatStreamingResponseChunk$inboundSchema: z.ZodType< - ChatStreamingResponseChunk, - unknown -> = z.object({ - data: z.string().transform((v, ctx) => { - try { - return JSON.parse(v); - } catch (err) { - ctx.addIssue({ - input: v, - code: "custom", - message: `malformed json: ${err}`, - }); - return z.NEVER; - } - }).pipe(z.lazy(() => ChatStreamingResponseChunkData$inboundSchema)), -}); - export function chatStreamingResponseChunkFromJSON( jsonString: string, ): SafeParseResult { diff --git a/src/models/chatstreamoptions.ts b/src/models/chatstreamoptions.ts index aee57eed..41edb3f3 100644 --- a/src/models/chatstreamoptions.ts +++ b/src/models/chatstreamoptions.ts @@ -6,7 +6,15 @@ import * as z from "zod/v4"; import { remap as remap$ } from "../lib/primitives.js"; +/** + * Streaming configuration options + */ export type ChatStreamOptions = { + /** + * Deprecated: This field has no effect. Full usage details are always included. + * + * @deprecated field: This will be removed in a future release, please migrate away from it as soon as possible. + */ includeUsage?: boolean | undefined; }; diff --git a/src/models/debugoptions.ts b/src/models/debugoptions.ts new file mode 100644 index 00000000..3047d39c --- /dev/null +++ b/src/models/debugoptions.ts @@ -0,0 +1,38 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 12d8bc710ea7 + */ + +import * as z from "zod/v4"; +import { remap as remap$ } from "../lib/primitives.js"; + +/** + * Debug options for inspecting request transformations (streaming only) + */ +export type DebugOptions = { + /** + * If true, includes the transformed upstream request body in a debug chunk at the start of the stream. Only works with streaming mode. + */ + echoUpstreamBody?: boolean | undefined; +}; + +/** @internal */ +export type DebugOptions$Outbound = { + echo_upstream_body?: boolean | undefined; +}; + +/** @internal */ +export const DebugOptions$outboundSchema: z.ZodType< + DebugOptions$Outbound, + DebugOptions +> = z.object({ + echoUpstreamBody: z.boolean().optional(), +}).transform((v) => { + return remap$(v, { + echoUpstreamBody: "echo_upstream_body", + }); +}); + +export function debugOptionsToJSON(debugOptions: DebugOptions): string { + return JSON.stringify(DebugOptions$outboundSchema.parse(debugOptions)); +} diff --git a/src/models/developermessage.ts b/src/models/developermessage.ts index d7ccdd5d..2324f40e 100644 --- a/src/models/developermessage.ts +++ b/src/models/developermessage.ts @@ -10,13 +10,25 @@ import { ChatMessageContentItemText$outboundSchema, } from "./chatmessagecontentitemtext.js"; +/** + * Developer message content + */ export type DeveloperMessageContent = | string | Array; +/** + * Developer message + */ export type DeveloperMessage = { role: "developer"; + /** + * Developer message content + */ content: string | Array; + /** + * Optional name for the developer message + */ name?: string | undefined; }; diff --git a/src/models/errors/chaterror.ts b/src/models/errors/chaterror.ts deleted file mode 100644 index 33cb7e5a..00000000 --- a/src/models/errors/chaterror.ts +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: a3b18e48f494 - */ - -import * as z from "zod/v4"; -import * as models from "../index.js"; -import { OpenRouterError } from "./openroutererror.js"; - -export type ChatErrorData = { - error: models.ChatErrorError; -}; - -export class ChatError extends OpenRouterError { - error: models.ChatErrorError; - - /** The original data that was passed to this error instance. */ - data$: ChatErrorData; - - constructor( - err: ChatErrorData, - httpMeta: { response: Response; request: Request; body: string }, - ) { - const message = err.error?.message - || `API error occurred: ${JSON.stringify(err)}`; - super(message, httpMeta); - this.data$ = err; - this.error = err.error; - - this.name = "ChatError"; - } -} - -/** @internal */ -export const ChatError$inboundSchema: z.ZodType = z.object({ - error: z.lazy(() => models.ChatErrorError$inboundSchema), - request$: z.custom(x => x instanceof Request), - response$: z.custom(x => x instanceof Response), - body$: z.string(), -}) - .transform((v) => { - return new ChatError(v, { - request: v.request$, - response: v.response$, - body: v.body$, - }); - }); diff --git a/src/models/errors/index.ts b/src/models/errors/index.ts index c5ddaff0..173a02ae 100644 --- a/src/models/errors/index.ts +++ b/src/models/errors/index.ts @@ -5,7 +5,6 @@ export * from "./badgatewayresponseerror.js"; export * from "./badrequestresponseerror.js"; -export * from "./chaterror.js"; export * from "./edgenetworktimeoutresponseerror.js"; export * from "./forbiddenresponseerror.js"; export * from "./httpclienterrors.js"; diff --git a/src/models/index.ts b/src/models/index.ts index 47bb140b..62a6e792 100644 --- a/src/models/index.ts +++ b/src/models/index.ts @@ -5,10 +5,10 @@ export * from "./activityitem.js"; export * from "./assistantmessage.js"; +export * from "./assistantmessageimages.js"; export * from "./badgatewayresponseerrordata.js"; export * from "./badrequestresponseerrordata.js"; export * from "./chatcompletionfinishreason.js"; -export * from "./chaterror.js"; export * from "./chatgenerationparams.js"; export * from "./chatgenerationtokenusage.js"; export * from "./chatmessagecontentitem.js"; @@ -17,6 +17,7 @@ export * from "./chatmessagecontentitemcachecontrol.js"; export * from "./chatmessagecontentitemimage.js"; export * from "./chatmessagecontentitemtext.js"; export * from "./chatmessagecontentitemvideo.js"; +export * from "./chatmessagecontentitemvideolegacy.js"; export * from "./chatmessagetokenlogprob.js"; export * from "./chatmessagetokenlogprobs.js"; export * from "./chatmessagetoolcall.js"; @@ -29,6 +30,7 @@ export * from "./chatstreamingresponsechunk.js"; export * from "./chatstreamoptions.js"; export * from "./createchargerequest.js"; export * from "./datacollection.js"; +export * from "./debugoptions.js"; export * from "./defaultparameters.js"; export * from "./developermessage.js"; export * from "./edgenetworktimeoutresponseerrordata.js"; @@ -109,17 +111,23 @@ export * from "./provideroverloadedresponseerrordata.js"; export * from "./providerpreferences.js"; export * from "./providersort.js"; export * from "./providersortconfig.js"; -export * from "./providersortunion.js"; export * from "./publicendpoint.js"; export * from "./publicpricing.js"; export * from "./quantization.js"; +export * from "./reasoningdetailencrypted.js"; +export * from "./reasoningdetailsummary.js"; +export * from "./reasoningdetailtext.js"; +export * from "./reasoningdetailunion.js"; export * from "./reasoningsummarytext.js"; export * from "./reasoningsummaryverbosity.js"; export * from "./reasoningtextcontent.js"; export * from "./requesttimeoutresponseerrordata.js"; +export * from "./responseformatjsonobject.js"; export * from "./responseformatjsonschema.js"; +export * from "./responseformattext.js"; export * from "./responseformattextconfig.js"; export * from "./responseformattextgrammar.js"; +export * from "./responseformattextpython.js"; export * from "./responseinputaudio.js"; export * from "./responseinputfile.js"; export * from "./responseinputimage.js"; @@ -127,7 +135,6 @@ export * from "./responseinputtext.js"; export * from "./responseinputvideo.js"; export * from "./responseoutputtext.js"; export * from "./responseserrorfield.js"; -export * from "./responsesformatjsonobject.js"; export * from "./responsesformattext.js"; export * from "./responsesformattextjsonschemaconfig.js"; export * from "./responsesimagegenerationcall.js"; @@ -141,15 +148,11 @@ export * from "./responsessearchcontextsize.js"; export * from "./responseswebsearchcalloutput.js"; export * from "./responseswebsearchuserlocation.js"; export * from "./responsetextconfig.js"; -export * from "./schema10.js"; -export * from "./schema14.js"; -export * from "./schema17.js"; -export * from "./schema19.js"; -export * from "./schema5.js"; export * from "./security.js"; export * from "./serviceunavailableresponseerrordata.js"; export * from "./systemmessage.js"; export * from "./toolcallstatus.js"; +export * from "./toolchoiceoption.js"; export * from "./tooldefinitionjson.js"; export * from "./toolresponsemessage.js"; export * from "./toomanyrequestsresponseerrordata.js"; @@ -158,6 +161,7 @@ export * from "./unauthorizedresponseerrordata.js"; export * from "./unprocessableentityresponseerrordata.js"; export * from "./urlcitation.js"; export * from "./usermessage.js"; +export * from "./videoinput.js"; export * from "./websearchengine.js"; export * from "./websearchpreviewtooluserlocation.js"; export * from "./websearchstatus.js"; diff --git a/src/models/jsonschemaconfig.ts b/src/models/jsonschemaconfig.ts index 4ea4757a..6ea4e14b 100644 --- a/src/models/jsonschemaconfig.ts +++ b/src/models/jsonschemaconfig.ts @@ -5,10 +5,25 @@ import * as z from "zod/v4"; +/** + * JSON Schema configuration object + */ export type JSONSchemaConfig = { + /** + * Schema name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) + */ name: string; + /** + * Schema description for the model + */ description?: string | undefined; - schema?: { [k: string]: any } | undefined; + /** + * JSON Schema object + */ + schema?: { [k: string]: any | null } | undefined; + /** + * Enable strict schema adherence + */ strict?: boolean | null | undefined; }; @@ -16,7 +31,7 @@ export type JSONSchemaConfig = { export type JSONSchemaConfig$Outbound = { name: string; description?: string | undefined; - schema?: { [k: string]: any } | undefined; + schema?: { [k: string]: any | null } | undefined; strict?: boolean | null | undefined; }; @@ -27,7 +42,7 @@ export const JSONSchemaConfig$outboundSchema: z.ZodType< > = z.object({ name: z.string(), description: z.string().optional(), - schema: z.record(z.string(), z.any()).optional(), + schema: z.record(z.string(), z.nullable(z.any())).optional(), strict: z.nullable(z.boolean()).optional(), }); diff --git a/src/models/message.ts b/src/models/message.ts index ab7c5ef5..70fe60e3 100644 --- a/src/models/message.ts +++ b/src/models/message.ts @@ -30,11 +30,14 @@ import { UserMessage$outboundSchema, } from "./usermessage.js"; +/** + * Chat completion message with role-based discrimination + */ export type Message = | SystemMessage | UserMessage | DeveloperMessage - | AssistantMessage + | (AssistantMessage & { role: "assistant" }) | ToolResponseMessage; /** @internal */ @@ -42,7 +45,7 @@ export type Message$Outbound = | SystemMessage$Outbound | UserMessage$Outbound | DeveloperMessage$Outbound - | AssistantMessage$Outbound + | (AssistantMessage$Outbound & { role: "assistant" }) | ToolResponseMessage$Outbound; /** @internal */ @@ -51,7 +54,9 @@ export const Message$outboundSchema: z.ZodType = z SystemMessage$outboundSchema, UserMessage$outboundSchema, DeveloperMessage$outboundSchema, - AssistantMessage$outboundSchema, + AssistantMessage$outboundSchema.and( + z.object({ role: z.literal("assistant") }), + ), ToolResponseMessage$outboundSchema, ]); diff --git a/src/models/modelscountresponse.ts b/src/models/modelscountresponse.ts index cb6c47ea..950a3f92 100644 --- a/src/models/modelscountresponse.ts +++ b/src/models/modelscountresponse.ts @@ -11,7 +11,7 @@ import { SDKValidationError } from "./errors/sdkvalidationerror.js"; /** * Model count data */ -export type ModelsCountResponseData = { +export type Data = { /** * Total number of available models */ @@ -25,24 +25,21 @@ export type ModelsCountResponse = { /** * Model count data */ - data: ModelsCountResponseData; + data: Data; }; /** @internal */ -export const ModelsCountResponseData$inboundSchema: z.ZodType< - ModelsCountResponseData, - unknown -> = z.object({ +export const Data$inboundSchema: z.ZodType = z.object({ count: z.number(), }); -export function modelsCountResponseDataFromJSON( +export function dataFromJSON( jsonString: string, -): SafeParseResult { +): SafeParseResult { return safeParse( jsonString, - (x) => ModelsCountResponseData$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ModelsCountResponseData' from JSON`, + (x) => Data$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'Data' from JSON`, ); } @@ -51,7 +48,7 @@ export const ModelsCountResponse$inboundSchema: z.ZodType< ModelsCountResponse, unknown > = z.object({ - data: z.lazy(() => ModelsCountResponseData$inboundSchema), + data: z.lazy(() => Data$inboundSchema), }); export function modelsCountResponseFromJSON( diff --git a/src/models/namedtoolchoice.ts b/src/models/namedtoolchoice.ts index 1603f2c8..a65a4e55 100644 --- a/src/models/namedtoolchoice.ts +++ b/src/models/namedtoolchoice.ts @@ -4,16 +4,33 @@ */ import * as z from "zod/v4"; +import { ClosedEnum } from "../types/enums.js"; + +export const NamedToolChoiceType = { + Function: "function", +} as const; +export type NamedToolChoiceType = ClosedEnum; export type NamedToolChoiceFunction = { + /** + * Function name to call + */ name: string; }; +/** + * Named tool choice for specific function + */ export type NamedToolChoice = { - type: "function"; + type: NamedToolChoiceType; function: NamedToolChoiceFunction; }; +/** @internal */ +export const NamedToolChoiceType$outboundSchema: z.ZodEnum< + typeof NamedToolChoiceType +> = z.enum(NamedToolChoiceType); + /** @internal */ export type NamedToolChoiceFunction$Outbound = { name: string; @@ -37,7 +54,7 @@ export function namedToolChoiceFunctionToJSON( /** @internal */ export type NamedToolChoice$Outbound = { - type: "function"; + type: string; function: NamedToolChoiceFunction$Outbound; }; @@ -46,7 +63,7 @@ export const NamedToolChoice$outboundSchema: z.ZodType< NamedToolChoice$Outbound, NamedToolChoice > = z.object({ - type: z.literal("function"), + type: NamedToolChoiceType$outboundSchema, function: z.lazy(() => NamedToolChoiceFunction$outboundSchema), }); diff --git a/src/models/openairesponsesreasoningconfig.ts b/src/models/openairesponsesreasoningconfig.ts index eeea2c05..df560cf5 100644 --- a/src/models/openairesponsesreasoningconfig.ts +++ b/src/models/openairesponsesreasoningconfig.ts @@ -18,7 +18,7 @@ import { export type OpenAIResponsesReasoningConfig = { effort?: OpenAIResponsesReasoningEffort | null | undefined; - summary?: ReasoningSummaryVerbosity | undefined; + summary?: ReasoningSummaryVerbosity | null | undefined; }; /** @internal */ @@ -27,7 +27,7 @@ export const OpenAIResponsesReasoningConfig$inboundSchema: z.ZodType< unknown > = z.object({ effort: z.nullable(OpenAIResponsesReasoningEffort$inboundSchema).optional(), - summary: ReasoningSummaryVerbosity$inboundSchema.optional(), + summary: z.nullable(ReasoningSummaryVerbosity$inboundSchema).optional(), }); export function openAIResponsesReasoningConfigFromJSON( diff --git a/src/models/openresponsesnonstreamingresponse.ts b/src/models/openresponsesnonstreamingresponse.ts index b6c8c858..9843d593 100644 --- a/src/models/openresponsesnonstreamingresponse.ts +++ b/src/models/openresponsesnonstreamingresponse.ts @@ -74,10 +74,12 @@ import { ResponseTextConfig$inboundSchema, } from "./responsetextconfig.js"; -export const ObjectT = { +export const OpenResponsesNonStreamingResponseObject = { Response: "response", } as const; -export type ObjectT = ClosedEnum; +export type OpenResponsesNonStreamingResponseObject = ClosedEnum< + typeof OpenResponsesNonStreamingResponseObject +>; /** * Function tool definition @@ -102,7 +104,7 @@ export type OpenResponsesNonStreamingResponseToolUnion = */ export type OpenResponsesNonStreamingResponse = { id: string; - object: ObjectT; + object: OpenResponsesNonStreamingResponseObject; createdAt: number; model: string; status: OpenAIResponsesResponseStatus; @@ -156,7 +158,9 @@ export type OpenResponsesNonStreamingResponse = { }; /** @internal */ -export const ObjectT$inboundSchema: z.ZodEnum = z.enum(ObjectT); +export const OpenResponsesNonStreamingResponseObject$inboundSchema: z.ZodEnum< + typeof OpenResponsesNonStreamingResponseObject +> = z.enum(OpenResponsesNonStreamingResponseObject); /** @internal */ export const OpenResponsesNonStreamingResponseToolFunction$inboundSchema: @@ -216,7 +220,7 @@ export const OpenResponsesNonStreamingResponse$inboundSchema: z.ZodType< unknown > = z.object({ id: z.string(), - object: ObjectT$inboundSchema, + object: OpenResponsesNonStreamingResponseObject$inboundSchema, created_at: z.number(), model: z.string(), status: OpenAIResponsesResponseStatus$inboundSchema, diff --git a/src/models/openresponsesreasoningconfig.ts b/src/models/openresponsesreasoningconfig.ts index 6c2046fa..a6fe99f7 100644 --- a/src/models/openresponsesreasoningconfig.ts +++ b/src/models/openresponsesreasoningconfig.ts @@ -19,7 +19,7 @@ import { */ export type OpenResponsesReasoningConfig = { effort?: OpenAIResponsesReasoningEffort | null | undefined; - summary?: ReasoningSummaryVerbosity | undefined; + summary?: ReasoningSummaryVerbosity | null | undefined; maxTokens?: number | null | undefined; enabled?: boolean | null | undefined; }; @@ -27,7 +27,7 @@ export type OpenResponsesReasoningConfig = { /** @internal */ export type OpenResponsesReasoningConfig$Outbound = { effort?: string | null | undefined; - summary?: string | undefined; + summary?: string | null | undefined; max_tokens?: number | null | undefined; enabled?: boolean | null | undefined; }; @@ -38,7 +38,7 @@ export const OpenResponsesReasoningConfig$outboundSchema: z.ZodType< OpenResponsesReasoningConfig > = z.object({ effort: z.nullable(OpenAIResponsesReasoningEffort$outboundSchema).optional(), - summary: ReasoningSummaryVerbosity$outboundSchema.optional(), + summary: z.nullable(ReasoningSummaryVerbosity$outboundSchema).optional(), maxTokens: z.nullable(z.number()).optional(), enabled: z.nullable(z.boolean()).optional(), }).transform((v) => { diff --git a/src/models/openresponsesrequest.ts b/src/models/openresponsesrequest.ts index dae7dabb..2c89e3fb 100644 --- a/src/models/openresponsesrequest.ts +++ b/src/models/openresponsesrequest.ts @@ -151,7 +151,7 @@ export type OpenResponsesRequestMaxPrice = { /** * When multiple model providers are available, optionally indicate your routing preference. */ -export type Provider = { +export type OpenResponsesRequestProvider = { /** * Whether to allow backup providers to serve requests * @@ -215,7 +215,7 @@ export type Provider = { preferredMaxLatency?: PreferredMaxLatency | null | undefined; }; -export type PluginResponseHealing = { +export type OpenResponsesRequestPluginResponseHealing = { id: "response-healing"; /** * Set to false to disable the response-healing plugin for this request. Defaults to true. @@ -223,7 +223,7 @@ export type PluginResponseHealing = { enabled?: boolean | undefined; }; -export type PluginFileParser = { +export type OpenResponsesRequestPluginFileParser = { id: "file-parser"; /** * Set to false to disable the file-parser plugin for this request. Defaults to true. @@ -235,7 +235,7 @@ export type PluginFileParser = { pdf?: PDFParserOptions | undefined; }; -export type PluginWeb = { +export type OpenResponsesRequestPluginWeb = { id: "web"; /** * Set to false to disable the web-search plugin for this request. Defaults to true. @@ -249,11 +249,11 @@ export type PluginWeb = { engine?: WebSearchEngine | undefined; }; -export type PluginModeration = { +export type OpenResponsesRequestPluginModeration = { id: "moderation"; }; -export type PluginAutoRouter = { +export type OpenResponsesRequestPluginAutoRouter = { id: "auto-router"; /** * Set to false to disable the auto-router plugin for this request. Defaults to true. @@ -265,12 +265,24 @@ export type PluginAutoRouter = { allowedModels?: Array | undefined; }; -export type Plugin = - | PluginAutoRouter - | PluginModeration - | PluginWeb - | PluginFileParser - | PluginResponseHealing; +export type OpenResponsesRequestPluginUnion = + | OpenResponsesRequestPluginAutoRouter + | OpenResponsesRequestPluginModeration + | OpenResponsesRequestPluginWeb + | OpenResponsesRequestPluginFileParser + | OpenResponsesRequestPluginResponseHealing; + +/** + * Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + */ +export type OpenResponsesRequestTrace = { + traceId?: string | undefined; + traceName?: string | undefined; + spanName?: string | undefined; + generationName?: string | undefined; + parentSpanId?: string | undefined; + additionalProperties?: { [k: string]: any | null } | undefined; +}; /** * Request schema for Responses endpoint @@ -335,17 +347,17 @@ export type OpenResponsesRequest = { /** * When multiple model providers are available, optionally indicate your routing preference. */ - provider?: Provider | null | undefined; + provider?: OpenResponsesRequestProvider | null | undefined; /** * Plugins you want to enable for this request, including their settings. */ plugins?: | Array< - | PluginAutoRouter - | PluginModeration - | PluginWeb - | PluginFileParser - | PluginResponseHealing + | OpenResponsesRequestPluginAutoRouter + | OpenResponsesRequestPluginModeration + | OpenResponsesRequestPluginWeb + | OpenResponsesRequestPluginFileParser + | OpenResponsesRequestPluginResponseHealing > | undefined; /** @@ -356,6 +368,10 @@ export type OpenResponsesRequest = { * A unique identifier for grouping related requests (e.g., a conversation or agent workflow) for observability. If provided in both the request body and the x-session-id header, the body value takes precedence. Maximum of 128 characters. */ sessionId?: string | undefined; + /** + * Metadata for observability and tracing. Known keys (trace_id, trace_name, span_name, generation_name, parent_span_id) have special handling. Additional keys are passed through as custom metadata to configured broadcast destinations. + */ + trace?: OpenResponsesRequestTrace | undefined; }; /** @internal */ @@ -554,7 +570,7 @@ export function openResponsesRequestMaxPriceToJSON( } /** @internal */ -export type Provider$Outbound = { +export type OpenResponsesRequestProvider$Outbound = { allow_fallbacks?: boolean | null | undefined; require_parameters?: boolean | null | undefined; data_collection?: string | null | undefined; @@ -571,102 +587,114 @@ export type Provider$Outbound = { }; /** @internal */ -export const Provider$outboundSchema: z.ZodType = z - .object({ - allowFallbacks: z.nullable(z.boolean()).optional(), - requireParameters: z.nullable(z.boolean()).optional(), - dataCollection: z.nullable(DataCollection$outboundSchema).optional(), - zdr: z.nullable(z.boolean()).optional(), - enforceDistillableText: z.nullable(z.boolean()).optional(), - order: z.nullable( - z.array(z.union([ProviderName$outboundSchema, z.string()])), - ).optional(), - only: z.nullable( - z.array(z.union([ProviderName$outboundSchema, z.string()])), - ).optional(), - ignore: z.nullable( - z.array(z.union([ProviderName$outboundSchema, z.string()])), - ).optional(), - quantizations: z.nullable(z.array(Quantization$outboundSchema)).optional(), - sort: z.nullable( - z.union([ - ProviderSort$outboundSchema, - ProviderSortConfig$outboundSchema, - z.any(), - ]), - ).optional(), - maxPrice: z.lazy(() => OpenResponsesRequestMaxPrice$outboundSchema) - .optional(), - preferredMinThroughput: z.nullable(PreferredMinThroughput$outboundSchema) - .optional(), - preferredMaxLatency: z.nullable(PreferredMaxLatency$outboundSchema) - .optional(), - }).transform((v) => { - return remap$(v, { - allowFallbacks: "allow_fallbacks", - requireParameters: "require_parameters", - dataCollection: "data_collection", - enforceDistillableText: "enforce_distillable_text", - maxPrice: "max_price", - preferredMinThroughput: "preferred_min_throughput", - preferredMaxLatency: "preferred_max_latency", - }); +export const OpenResponsesRequestProvider$outboundSchema: z.ZodType< + OpenResponsesRequestProvider$Outbound, + OpenResponsesRequestProvider +> = z.object({ + allowFallbacks: z.nullable(z.boolean()).optional(), + requireParameters: z.nullable(z.boolean()).optional(), + dataCollection: z.nullable(DataCollection$outboundSchema).optional(), + zdr: z.nullable(z.boolean()).optional(), + enforceDistillableText: z.nullable(z.boolean()).optional(), + order: z.nullable(z.array(z.union([ProviderName$outboundSchema, z.string()]))) + .optional(), + only: z.nullable(z.array(z.union([ProviderName$outboundSchema, z.string()]))) + .optional(), + ignore: z.nullable( + z.array(z.union([ProviderName$outboundSchema, z.string()])), + ).optional(), + quantizations: z.nullable(z.array(Quantization$outboundSchema)).optional(), + sort: z.nullable( + z.union([ + ProviderSort$outboundSchema, + ProviderSortConfig$outboundSchema, + z.any(), + ]), + ).optional(), + maxPrice: z.lazy(() => OpenResponsesRequestMaxPrice$outboundSchema) + .optional(), + preferredMinThroughput: z.nullable(PreferredMinThroughput$outboundSchema) + .optional(), + preferredMaxLatency: z.nullable(PreferredMaxLatency$outboundSchema) + .optional(), +}).transform((v) => { + return remap$(v, { + allowFallbacks: "allow_fallbacks", + requireParameters: "require_parameters", + dataCollection: "data_collection", + enforceDistillableText: "enforce_distillable_text", + maxPrice: "max_price", + preferredMinThroughput: "preferred_min_throughput", + preferredMaxLatency: "preferred_max_latency", }); +}); -export function providerToJSON(provider: Provider): string { - return JSON.stringify(Provider$outboundSchema.parse(provider)); +export function openResponsesRequestProviderToJSON( + openResponsesRequestProvider: OpenResponsesRequestProvider, +): string { + return JSON.stringify( + OpenResponsesRequestProvider$outboundSchema.parse( + openResponsesRequestProvider, + ), + ); } /** @internal */ -export type PluginResponseHealing$Outbound = { +export type OpenResponsesRequestPluginResponseHealing$Outbound = { id: "response-healing"; enabled?: boolean | undefined; }; /** @internal */ -export const PluginResponseHealing$outboundSchema: z.ZodType< - PluginResponseHealing$Outbound, - PluginResponseHealing -> = z.object({ - id: z.literal("response-healing"), - enabled: z.boolean().optional(), -}); +export const OpenResponsesRequestPluginResponseHealing$outboundSchema: + z.ZodType< + OpenResponsesRequestPluginResponseHealing$Outbound, + OpenResponsesRequestPluginResponseHealing + > = z.object({ + id: z.literal("response-healing"), + enabled: z.boolean().optional(), + }); -export function pluginResponseHealingToJSON( - pluginResponseHealing: PluginResponseHealing, +export function openResponsesRequestPluginResponseHealingToJSON( + openResponsesRequestPluginResponseHealing: + OpenResponsesRequestPluginResponseHealing, ): string { return JSON.stringify( - PluginResponseHealing$outboundSchema.parse(pluginResponseHealing), + OpenResponsesRequestPluginResponseHealing$outboundSchema.parse( + openResponsesRequestPluginResponseHealing, + ), ); } /** @internal */ -export type PluginFileParser$Outbound = { +export type OpenResponsesRequestPluginFileParser$Outbound = { id: "file-parser"; enabled?: boolean | undefined; pdf?: PDFParserOptions$Outbound | undefined; }; /** @internal */ -export const PluginFileParser$outboundSchema: z.ZodType< - PluginFileParser$Outbound, - PluginFileParser +export const OpenResponsesRequestPluginFileParser$outboundSchema: z.ZodType< + OpenResponsesRequestPluginFileParser$Outbound, + OpenResponsesRequestPluginFileParser > = z.object({ id: z.literal("file-parser"), enabled: z.boolean().optional(), pdf: PDFParserOptions$outboundSchema.optional(), }); -export function pluginFileParserToJSON( - pluginFileParser: PluginFileParser, +export function openResponsesRequestPluginFileParserToJSON( + openResponsesRequestPluginFileParser: OpenResponsesRequestPluginFileParser, ): string { return JSON.stringify( - PluginFileParser$outboundSchema.parse(pluginFileParser), + OpenResponsesRequestPluginFileParser$outboundSchema.parse( + openResponsesRequestPluginFileParser, + ), ); } /** @internal */ -export type PluginWeb$Outbound = { +export type OpenResponsesRequestPluginWeb$Outbound = { id: "web"; enabled?: boolean | undefined; max_results?: number | undefined; @@ -675,9 +703,9 @@ export type PluginWeb$Outbound = { }; /** @internal */ -export const PluginWeb$outboundSchema: z.ZodType< - PluginWeb$Outbound, - PluginWeb +export const OpenResponsesRequestPluginWeb$outboundSchema: z.ZodType< + OpenResponsesRequestPluginWeb$Outbound, + OpenResponsesRequestPluginWeb > = z.object({ id: z.literal("web"), enabled: z.boolean().optional(), @@ -691,42 +719,50 @@ export const PluginWeb$outboundSchema: z.ZodType< }); }); -export function pluginWebToJSON(pluginWeb: PluginWeb): string { - return JSON.stringify(PluginWeb$outboundSchema.parse(pluginWeb)); +export function openResponsesRequestPluginWebToJSON( + openResponsesRequestPluginWeb: OpenResponsesRequestPluginWeb, +): string { + return JSON.stringify( + OpenResponsesRequestPluginWeb$outboundSchema.parse( + openResponsesRequestPluginWeb, + ), + ); } /** @internal */ -export type PluginModeration$Outbound = { +export type OpenResponsesRequestPluginModeration$Outbound = { id: "moderation"; }; /** @internal */ -export const PluginModeration$outboundSchema: z.ZodType< - PluginModeration$Outbound, - PluginModeration +export const OpenResponsesRequestPluginModeration$outboundSchema: z.ZodType< + OpenResponsesRequestPluginModeration$Outbound, + OpenResponsesRequestPluginModeration > = z.object({ id: z.literal("moderation"), }); -export function pluginModerationToJSON( - pluginModeration: PluginModeration, +export function openResponsesRequestPluginModerationToJSON( + openResponsesRequestPluginModeration: OpenResponsesRequestPluginModeration, ): string { return JSON.stringify( - PluginModeration$outboundSchema.parse(pluginModeration), + OpenResponsesRequestPluginModeration$outboundSchema.parse( + openResponsesRequestPluginModeration, + ), ); } /** @internal */ -export type PluginAutoRouter$Outbound = { +export type OpenResponsesRequestPluginAutoRouter$Outbound = { id: "auto-router"; enabled?: boolean | undefined; allowed_models?: Array | undefined; }; /** @internal */ -export const PluginAutoRouter$outboundSchema: z.ZodType< - PluginAutoRouter$Outbound, - PluginAutoRouter +export const OpenResponsesRequestPluginAutoRouter$outboundSchema: z.ZodType< + OpenResponsesRequestPluginAutoRouter$Outbound, + OpenResponsesRequestPluginAutoRouter > = z.object({ id: z.literal("auto-router"), enabled: z.boolean().optional(), @@ -737,34 +773,87 @@ export const PluginAutoRouter$outboundSchema: z.ZodType< }); }); -export function pluginAutoRouterToJSON( - pluginAutoRouter: PluginAutoRouter, +export function openResponsesRequestPluginAutoRouterToJSON( + openResponsesRequestPluginAutoRouter: OpenResponsesRequestPluginAutoRouter, ): string { return JSON.stringify( - PluginAutoRouter$outboundSchema.parse(pluginAutoRouter), + OpenResponsesRequestPluginAutoRouter$outboundSchema.parse( + openResponsesRequestPluginAutoRouter, + ), ); } /** @internal */ -export type Plugin$Outbound = - | PluginAutoRouter$Outbound - | PluginModeration$Outbound - | PluginWeb$Outbound - | PluginFileParser$Outbound - | PluginResponseHealing$Outbound; +export type OpenResponsesRequestPluginUnion$Outbound = + | OpenResponsesRequestPluginAutoRouter$Outbound + | OpenResponsesRequestPluginModeration$Outbound + | OpenResponsesRequestPluginWeb$Outbound + | OpenResponsesRequestPluginFileParser$Outbound + | OpenResponsesRequestPluginResponseHealing$Outbound; /** @internal */ -export const Plugin$outboundSchema: z.ZodType = z - .union([ - z.lazy(() => PluginAutoRouter$outboundSchema), - z.lazy(() => PluginModeration$outboundSchema), - z.lazy(() => PluginWeb$outboundSchema), - z.lazy(() => PluginFileParser$outboundSchema), - z.lazy(() => PluginResponseHealing$outboundSchema), - ]); +export const OpenResponsesRequestPluginUnion$outboundSchema: z.ZodType< + OpenResponsesRequestPluginUnion$Outbound, + OpenResponsesRequestPluginUnion +> = z.union([ + z.lazy(() => OpenResponsesRequestPluginAutoRouter$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginModeration$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginWeb$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginFileParser$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginResponseHealing$outboundSchema), +]); -export function pluginToJSON(plugin: Plugin): string { - return JSON.stringify(Plugin$outboundSchema.parse(plugin)); +export function openResponsesRequestPluginUnionToJSON( + openResponsesRequestPluginUnion: OpenResponsesRequestPluginUnion, +): string { + return JSON.stringify( + OpenResponsesRequestPluginUnion$outboundSchema.parse( + openResponsesRequestPluginUnion, + ), + ); +} + +/** @internal */ +export type OpenResponsesRequestTrace$Outbound = { + trace_id?: string | undefined; + trace_name?: string | undefined; + span_name?: string | undefined; + generation_name?: string | undefined; + parent_span_id?: string | undefined; + [additionalProperties: string]: unknown; +}; + +/** @internal */ +export const OpenResponsesRequestTrace$outboundSchema: z.ZodType< + OpenResponsesRequestTrace$Outbound, + OpenResponsesRequestTrace +> = z.object({ + traceId: z.string().optional(), + traceName: z.string().optional(), + spanName: z.string().optional(), + generationName: z.string().optional(), + parentSpanId: z.string().optional(), + additionalProperties: z.record(z.string(), z.nullable(z.any())).optional(), +}).transform((v) => { + return { + ...v.additionalProperties, + ...remap$(v, { + traceId: "trace_id", + traceName: "trace_name", + spanName: "span_name", + generationName: "generation_name", + parentSpanId: "parent_span_id", + additionalProperties: null, + }), + }; +}); + +export function openResponsesRequestTraceToJSON( + openResponsesRequestTrace: OpenResponsesRequestTrace, +): string { + return JSON.stringify( + OpenResponsesRequestTrace$outboundSchema.parse(openResponsesRequestTrace), + ); } /** @internal */ @@ -807,18 +896,19 @@ export type OpenResponsesRequest$Outbound = { service_tier: string; truncation?: string | null | undefined; stream: boolean; - provider?: Provider$Outbound | null | undefined; + provider?: OpenResponsesRequestProvider$Outbound | null | undefined; plugins?: | Array< - | PluginAutoRouter$Outbound - | PluginModeration$Outbound - | PluginWeb$Outbound - | PluginFileParser$Outbound - | PluginResponseHealing$Outbound + | OpenResponsesRequestPluginAutoRouter$Outbound + | OpenResponsesRequestPluginModeration$Outbound + | OpenResponsesRequestPluginWeb$Outbound + | OpenResponsesRequestPluginFileParser$Outbound + | OpenResponsesRequestPluginResponseHealing$Outbound > | undefined; user?: string | undefined; session_id?: string | undefined; + trace?: OpenResponsesRequestTrace$Outbound | undefined; }; /** @internal */ @@ -866,18 +956,21 @@ export const OpenResponsesRequest$outboundSchema: z.ZodType< serviceTier: ServiceTier$outboundSchema.default("auto"), truncation: z.nullable(Truncation$outboundSchema).optional(), stream: z.boolean().default(false), - provider: z.nullable(z.lazy(() => Provider$outboundSchema)).optional(), + provider: z.nullable( + z.lazy(() => OpenResponsesRequestProvider$outboundSchema), + ).optional(), plugins: z.array( z.union([ - z.lazy(() => PluginAutoRouter$outboundSchema), - z.lazy(() => PluginModeration$outboundSchema), - z.lazy(() => PluginWeb$outboundSchema), - z.lazy(() => PluginFileParser$outboundSchema), - z.lazy(() => PluginResponseHealing$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginAutoRouter$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginModeration$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginWeb$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginFileParser$outboundSchema), + z.lazy(() => OpenResponsesRequestPluginResponseHealing$outboundSchema), ]), ).optional(), user: z.string().optional(), sessionId: z.string().optional(), + trace: z.lazy(() => OpenResponsesRequestTrace$outboundSchema).optional(), }).transform((v) => { return remap$(v, { toolChoice: "tool_choice", diff --git a/src/models/operations/createauthkeyscode.ts b/src/models/operations/createauthkeyscode.ts index d12a0b23..8979e7af 100644 --- a/src/models/operations/createauthkeyscode.ts +++ b/src/models/operations/createauthkeyscode.ts @@ -41,6 +41,19 @@ export type CreateAuthKeysCodeCodeChallengeMethod = OpenEnum< typeof CreateAuthKeysCodeCodeChallengeMethod >; +/** + * Optional credit limit reset interval. When set, the credit limit resets on this interval. + */ +export const UsageLimitType = { + Daily: "daily", + Weekly: "weekly", + Monthly: "monthly", +} as const; +/** + * Optional credit limit reset interval. When set, the credit limit resets on this interval. + */ +export type UsageLimitType = OpenEnum; + export type CreateAuthKeysCodeRequestBody = { /** * The callback URL to redirect to after authorization. Note, only https URLs on ports 443 and 3000 are allowed. @@ -62,6 +75,14 @@ export type CreateAuthKeysCodeRequestBody = { * Optional expiration time for the API key to be created */ expiresAt?: Date | null | undefined; + /** + * Optional custom label for the API key. Defaults to the app name if not provided. + */ + keyLabel?: string | undefined; + /** + * Optional credit limit reset interval. When set, the credit limit resets on this interval. + */ + usageLimitType?: UsageLimitType | undefined; }; export type CreateAuthKeysCodeRequest = { @@ -115,6 +136,10 @@ export const CreateAuthKeysCodeCodeChallengeMethod$outboundSchema: z.ZodType< CreateAuthKeysCodeCodeChallengeMethod > = openEnums.outboundSchema(CreateAuthKeysCodeCodeChallengeMethod); +/** @internal */ +export const UsageLimitType$outboundSchema: z.ZodType = + openEnums.outboundSchema(UsageLimitType); + /** @internal */ export type CreateAuthKeysCodeRequestBody$Outbound = { callback_url: string; @@ -122,6 +147,8 @@ export type CreateAuthKeysCodeRequestBody$Outbound = { code_challenge_method?: string | undefined; limit?: number | undefined; expires_at?: string | null | undefined; + key_label?: string | undefined; + usage_limit_type?: string | undefined; }; /** @internal */ @@ -135,12 +162,16 @@ export const CreateAuthKeysCodeRequestBody$outboundSchema: z.ZodType< .optional(), limit: z.number().optional(), expiresAt: z.nullable(z.date().transform(v => v.toISOString())).optional(), + keyLabel: z.string().optional(), + usageLimitType: UsageLimitType$outboundSchema.optional(), }).transform((v) => { return remap$(v, { callbackUrl: "callback_url", codeChallenge: "code_challenge", codeChallengeMethod: "code_challenge_method", expiresAt: "expires_at", + keyLabel: "key_label", + usageLimitType: "usage_limit_type", }); }); diff --git a/src/models/operations/getgeneration.ts b/src/models/operations/getgeneration.ts index f32739a9..98798dd9 100644 --- a/src/models/operations/getgeneration.ts +++ b/src/models/operations/getgeneration.ts @@ -120,6 +120,7 @@ export const ProviderName = { Inceptron: "Inceptron", InferenceNet: "InferenceNet", Infermatic: "Infermatic", + IoNet: "Io Net", Inflection: "Inflection", Liquid: "Liquid", Mara: "Mara", diff --git a/src/models/operations/sendchatcompletionrequest.ts b/src/models/operations/sendchatcompletionrequest.ts index ada1e885..957b070c 100644 --- a/src/models/operations/sendchatcompletionrequest.ts +++ b/src/models/operations/sendchatcompletionrequest.ts @@ -41,15 +41,22 @@ export type SendChatCompletionRequestRequest = { * @remarks */ xTitle?: string | undefined; + chatGenerationParams: models.ChatGenerationParams; +}; + +/** + * Successful chat completion response + */ +export type SendChatCompletionRequestResponseBody = { /** - * Chat completion request parameters + * Streaming chat completion chunk */ - chatGenerationParams: models.ChatGenerationParams; + data: models.ChatStreamingResponseChunk; }; export type SendChatCompletionRequestResponse = | models.ChatResponse - | EventStream; + | EventStream; /** @internal */ export type SendChatCompletionRequestRequest$Outbound = { @@ -84,6 +91,36 @@ export function sendChatCompletionRequestRequestToJSON( ); } +/** @internal */ +export const SendChatCompletionRequestResponseBody$inboundSchema: z.ZodType< + SendChatCompletionRequestResponseBody, + unknown +> = z.object({ + data: z.string().transform((v, ctx) => { + try { + return JSON.parse(v); + } catch (err) { + ctx.addIssue({ + input: v, + code: "custom", + message: `malformed json: ${err}`, + }); + return z.NEVER; + } + }).pipe(models.ChatStreamingResponseChunk$inboundSchema), +}); + +export function sendChatCompletionRequestResponseBodyFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => + SendChatCompletionRequestResponseBody$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'SendChatCompletionRequestResponseBody' from JSON`, + ); +} + /** @internal */ export const SendChatCompletionRequestResponse$inboundSchema: z.ZodType< SendChatCompletionRequestResponse, @@ -95,8 +132,9 @@ export const SendChatCompletionRequestResponse$inboundSchema: z.ZodType< return new EventStream(stream, rawEvent => { if (rawEvent.data === "[DONE]") return { done: true }; return { - value: models.ChatStreamingResponseChunk$inboundSchema.parse(rawEvent) - ?.data, + value: z.lazy(() => + SendChatCompletionRequestResponseBody$inboundSchema + ).parse(rawEvent)?.data, }; }); }), diff --git a/src/models/providername.ts b/src/models/providername.ts index 41aa063b..eca31616 100644 --- a/src/models/providername.ts +++ b/src/models/providername.ts @@ -43,6 +43,7 @@ export const ProviderName = { Inceptron: "Inceptron", InferenceNet: "InferenceNet", Infermatic: "Infermatic", + IoNet: "Io Net", Inflection: "Inflection", Liquid: "Liquid", Mara: "Mara", diff --git a/src/models/providerpreferences.ts b/src/models/providerpreferences.ts index fab25d32..92c73410 100644 --- a/src/models/providerpreferences.ts +++ b/src/models/providerpreferences.ts @@ -22,7 +22,6 @@ import { PreferredMinThroughput$outboundSchema, } from "./preferredminthroughput.js"; import { ProviderName, ProviderName$outboundSchema } from "./providername.js"; -import { ProviderSort, ProviderSort$outboundSchema } from "./providersort.js"; import { Quantization, Quantization$outboundSchema } from "./quantization.js"; export type ProviderPreferencesOrder = ProviderName | string; @@ -31,42 +30,77 @@ export type ProviderPreferencesOnly = ProviderName | string; export type ProviderPreferencesIgnore = ProviderName | string; -export const SortEnum = { +export const ProviderPreferencesSortEnum = { Price: "price", Throughput: "throughput", Latency: "latency", } as const; -export type SortEnum = OpenEnum; +export type ProviderPreferencesSortEnum = OpenEnum< + typeof ProviderPreferencesSortEnum +>; + +export const ProviderPreferencesProviderSortConfigEnum = { + Price: "price", + Throughput: "throughput", + Latency: "latency", +} as const; +export type ProviderPreferencesProviderSortConfigEnum = ClosedEnum< + typeof ProviderPreferencesProviderSortConfigEnum +>; -export const ProviderSortConfigEnum = { +/** + * The provider sorting strategy (price, throughput, latency) + */ +export const ProviderPreferencesBy = { Price: "price", Throughput: "throughput", Latency: "latency", } as const; -export type ProviderSortConfigEnum = ClosedEnum; +/** + * The provider sorting strategy (price, throughput, latency) + */ +export type ProviderPreferencesBy = OpenEnum; +/** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ export const ProviderPreferencesPartition = { Model: "model", None: "none", } as const; +/** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ export type ProviderPreferencesPartition = OpenEnum< typeof ProviderPreferencesPartition >; export type ProviderPreferencesProviderSortConfig = { - by?: ProviderSort | null | undefined; + /** + * The provider sorting strategy (price, throughput, latency) + */ + by?: ProviderPreferencesBy | null | undefined; + /** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ partition?: ProviderPreferencesPartition | null | undefined; }; -export type ProviderSortConfigUnion = +export type ProviderPreferencesProviderSortConfigUnion = | ProviderPreferencesProviderSortConfig - | ProviderSortConfigEnum; + | ProviderPreferencesProviderSortConfigEnum; +/** + * The provider sorting strategy (price, throughput, latency) + */ export const ProviderPreferencesProviderSort = { Price: "price", Throughput: "throughput", Latency: "latency", } as const; +/** + * The provider sorting strategy (price, throughput, latency) + */ export type ProviderPreferencesProviderSort = OpenEnum< typeof ProviderPreferencesProviderSort >; @@ -77,8 +111,8 @@ export type ProviderPreferencesProviderSort = OpenEnum< export type ProviderPreferencesSortUnion = | ProviderPreferencesProviderSort | ProviderPreferencesProviderSortConfig - | ProviderSortConfigEnum - | SortEnum; + | ProviderPreferencesProviderSortConfigEnum + | ProviderPreferencesSortEnum; /** * The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion. @@ -146,8 +180,8 @@ export type ProviderPreferences = { sort?: | ProviderPreferencesProviderSort | ProviderPreferencesProviderSortConfig - | ProviderSortConfigEnum - | SortEnum + | ProviderPreferencesProviderSortConfigEnum + | ProviderPreferencesSortEnum | null | undefined; /** @@ -216,13 +250,22 @@ export function providerPreferencesIgnoreToJSON( } /** @internal */ -export const SortEnum$outboundSchema: z.ZodType = openEnums - .outboundSchema(SortEnum); +export const ProviderPreferencesSortEnum$outboundSchema: z.ZodType< + string, + ProviderPreferencesSortEnum +> = openEnums.outboundSchema(ProviderPreferencesSortEnum); + +/** @internal */ +export const ProviderPreferencesProviderSortConfigEnum$outboundSchema: + z.ZodEnum = z.enum( + ProviderPreferencesProviderSortConfigEnum, + ); /** @internal */ -export const ProviderSortConfigEnum$outboundSchema: z.ZodEnum< - typeof ProviderSortConfigEnum -> = z.enum(ProviderSortConfigEnum); +export const ProviderPreferencesBy$outboundSchema: z.ZodType< + string, + ProviderPreferencesBy +> = openEnums.outboundSchema(ProviderPreferencesBy); /** @internal */ export const ProviderPreferencesPartition$outboundSchema: z.ZodType< @@ -241,7 +284,7 @@ export const ProviderPreferencesProviderSortConfig$outboundSchema: z.ZodType< ProviderPreferencesProviderSortConfig$Outbound, ProviderPreferencesProviderSortConfig > = z.object({ - by: z.nullable(ProviderSort$outboundSchema).optional(), + by: z.nullable(ProviderPreferencesBy$outboundSchema).optional(), partition: z.nullable(ProviderPreferencesPartition$outboundSchema).optional(), }); @@ -256,24 +299,28 @@ export function providerPreferencesProviderSortConfigToJSON( } /** @internal */ -export type ProviderSortConfigUnion$Outbound = +export type ProviderPreferencesProviderSortConfigUnion$Outbound = | ProviderPreferencesProviderSortConfig$Outbound | string; /** @internal */ -export const ProviderSortConfigUnion$outboundSchema: z.ZodType< - ProviderSortConfigUnion$Outbound, - ProviderSortConfigUnion -> = z.union([ - z.lazy(() => ProviderPreferencesProviderSortConfig$outboundSchema), - ProviderSortConfigEnum$outboundSchema, -]); +export const ProviderPreferencesProviderSortConfigUnion$outboundSchema: + z.ZodType< + ProviderPreferencesProviderSortConfigUnion$Outbound, + ProviderPreferencesProviderSortConfigUnion + > = z.union([ + z.lazy(() => ProviderPreferencesProviderSortConfig$outboundSchema), + ProviderPreferencesProviderSortConfigEnum$outboundSchema, + ]); -export function providerSortConfigUnionToJSON( - providerSortConfigUnion: ProviderSortConfigUnion, +export function providerPreferencesProviderSortConfigUnionToJSON( + providerPreferencesProviderSortConfigUnion: + ProviderPreferencesProviderSortConfigUnion, ): string { return JSON.stringify( - ProviderSortConfigUnion$outboundSchema.parse(providerSortConfigUnion), + ProviderPreferencesProviderSortConfigUnion$outboundSchema.parse( + providerPreferencesProviderSortConfigUnion, + ), ); } @@ -298,9 +345,9 @@ export const ProviderPreferencesSortUnion$outboundSchema: z.ZodType< ProviderPreferencesProviderSort$outboundSchema, z.union([ z.lazy(() => ProviderPreferencesProviderSortConfig$outboundSchema), - ProviderSortConfigEnum$outboundSchema, + ProviderPreferencesProviderSortConfigEnum$outboundSchema, ]), - SortEnum$outboundSchema, + ProviderPreferencesSortEnum$outboundSchema, ]); export function providerPreferencesSortUnionToJSON( @@ -390,9 +437,9 @@ export const ProviderPreferences$outboundSchema: z.ZodType< ProviderPreferencesProviderSort$outboundSchema, z.union([ z.lazy(() => ProviderPreferencesProviderSortConfig$outboundSchema), - ProviderSortConfigEnum$outboundSchema, + ProviderPreferencesProviderSortConfigEnum$outboundSchema, ]), - SortEnum$outboundSchema, + ProviderPreferencesSortEnum$outboundSchema, ]), ).optional(), maxPrice: z.lazy(() => ProviderPreferencesMaxPrice$outboundSchema).optional(), diff --git a/src/models/providersort.ts b/src/models/providersort.ts index 2d28a616..b0cd31cc 100644 --- a/src/models/providersort.ts +++ b/src/models/providersort.ts @@ -7,11 +7,17 @@ import * as z from "zod/v4"; import * as openEnums from "../types/enums.js"; import { OpenEnum } from "../types/enums.js"; +/** + * The provider sorting strategy (price, throughput, latency) + */ export const ProviderSort = { Price: "price", Throughput: "throughput", Latency: "latency", } as const; +/** + * The provider sorting strategy (price, throughput, latency) + */ export type ProviderSort = OpenEnum; /** @internal */ diff --git a/src/models/providersortconfig.ts b/src/models/providersortconfig.ts index 56f70b24..473ed35e 100644 --- a/src/models/providersortconfig.ts +++ b/src/models/providersortconfig.ts @@ -6,19 +6,50 @@ import * as z from "zod/v4"; import * as openEnums from "../types/enums.js"; import { OpenEnum } from "../types/enums.js"; -import { ProviderSort, ProviderSort$outboundSchema } from "./providersort.js"; +/** + * The provider sorting strategy (price, throughput, latency) + */ +export const By = { + Price: "price", + Throughput: "throughput", + Latency: "latency", +} as const; +/** + * The provider sorting strategy (price, throughput, latency) + */ +export type By = OpenEnum; + +/** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ export const Partition = { Model: "model", None: "none", } as const; +/** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ export type Partition = OpenEnum; +/** + * The provider sorting strategy (price, throughput, latency) + */ export type ProviderSortConfig = { - by?: ProviderSort | null | undefined; + /** + * The provider sorting strategy (price, throughput, latency) + */ + by?: By | null | undefined; + /** + * Partitioning strategy for sorting: "model" (default) groups endpoints by model before sorting (fallback models remain fallbacks), "none" sorts all endpoints together regardless of model. + */ partition?: Partition | null | undefined; }; +/** @internal */ +export const By$outboundSchema: z.ZodType = openEnums + .outboundSchema(By); + /** @internal */ export const Partition$outboundSchema: z.ZodType = openEnums .outboundSchema(Partition); @@ -34,7 +65,7 @@ export const ProviderSortConfig$outboundSchema: z.ZodType< ProviderSortConfig$Outbound, ProviderSortConfig > = z.object({ - by: z.nullable(ProviderSort$outboundSchema).optional(), + by: z.nullable(By$outboundSchema).optional(), partition: z.nullable(Partition$outboundSchema).optional(), }); diff --git a/src/models/providersortunion.ts b/src/models/providersortunion.ts deleted file mode 100644 index ab212a4c..00000000 --- a/src/models/providersortunion.ts +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: 7a2d23baab80 - */ - -import * as z from "zod/v4"; -import { ProviderSort, ProviderSort$outboundSchema } from "./providersort.js"; -import { - ProviderSortConfig, - ProviderSortConfig$Outbound, - ProviderSortConfig$outboundSchema, -} from "./providersortconfig.js"; - -export type ProviderSortUnion = ProviderSort | ProviderSortConfig; - -/** @internal */ -export type ProviderSortUnion$Outbound = string | ProviderSortConfig$Outbound; - -/** @internal */ -export const ProviderSortUnion$outboundSchema: z.ZodType< - ProviderSortUnion$Outbound, - ProviderSortUnion -> = z.union([ProviderSort$outboundSchema, ProviderSortConfig$outboundSchema]); - -export function providerSortUnionToJSON( - providerSortUnion: ProviderSortUnion, -): string { - return JSON.stringify( - ProviderSortUnion$outboundSchema.parse(providerSortUnion), - ); -} diff --git a/src/models/reasoningdetailencrypted.ts b/src/models/reasoningdetailencrypted.ts new file mode 100644 index 00000000..8f8217ed --- /dev/null +++ b/src/models/reasoningdetailencrypted.ts @@ -0,0 +1,94 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 2064e7820eab + */ + +import * as z from "zod/v4"; +import { safeParse } from "../lib/schemas.js"; +import * as openEnums from "../types/enums.js"; +import { OpenEnum } from "../types/enums.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; + +export const ReasoningDetailEncryptedFormat = { + Unknown: "unknown", + OpenaiResponsesV1: "openai-responses-v1", + AzureOpenaiResponsesV1: "azure-openai-responses-v1", + XaiResponsesV1: "xai-responses-v1", + AnthropicClaudeV1: "anthropic-claude-v1", + GoogleGeminiV1: "google-gemini-v1", +} as const; +export type ReasoningDetailEncryptedFormat = OpenEnum< + typeof ReasoningDetailEncryptedFormat +>; + +/** + * Reasoning detail encrypted schema + */ +export type ReasoningDetailEncrypted = { + type: "reasoning.encrypted"; + data: string; + id?: string | null | undefined; + format?: ReasoningDetailEncryptedFormat | null | undefined; + index?: number | undefined; +}; + +/** @internal */ +export const ReasoningDetailEncryptedFormat$inboundSchema: z.ZodType< + ReasoningDetailEncryptedFormat, + unknown +> = openEnums.inboundSchema(ReasoningDetailEncryptedFormat); +/** @internal */ +export const ReasoningDetailEncryptedFormat$outboundSchema: z.ZodType< + string, + ReasoningDetailEncryptedFormat +> = openEnums.outboundSchema(ReasoningDetailEncryptedFormat); + +/** @internal */ +export const ReasoningDetailEncrypted$inboundSchema: z.ZodType< + ReasoningDetailEncrypted, + unknown +> = z.object({ + type: z.literal("reasoning.encrypted"), + data: z.string(), + id: z.nullable(z.string()).optional(), + format: z.nullable(ReasoningDetailEncryptedFormat$inboundSchema).optional(), + index: z.number().optional(), +}); +/** @internal */ +export type ReasoningDetailEncrypted$Outbound = { + type: "reasoning.encrypted"; + data: string; + id?: string | null | undefined; + format?: string | null | undefined; + index?: number | undefined; +}; + +/** @internal */ +export const ReasoningDetailEncrypted$outboundSchema: z.ZodType< + ReasoningDetailEncrypted$Outbound, + ReasoningDetailEncrypted +> = z.object({ + type: z.literal("reasoning.encrypted"), + data: z.string(), + id: z.nullable(z.string()).optional(), + format: z.nullable(ReasoningDetailEncryptedFormat$outboundSchema).optional(), + index: z.number().optional(), +}); + +export function reasoningDetailEncryptedToJSON( + reasoningDetailEncrypted: ReasoningDetailEncrypted, +): string { + return JSON.stringify( + ReasoningDetailEncrypted$outboundSchema.parse(reasoningDetailEncrypted), + ); +} +export function reasoningDetailEncryptedFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ReasoningDetailEncrypted$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ReasoningDetailEncrypted' from JSON`, + ); +} diff --git a/src/models/reasoningdetailsummary.ts b/src/models/reasoningdetailsummary.ts new file mode 100644 index 00000000..4370c07c --- /dev/null +++ b/src/models/reasoningdetailsummary.ts @@ -0,0 +1,94 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 08e5d256b8c9 + */ + +import * as z from "zod/v4"; +import { safeParse } from "../lib/schemas.js"; +import * as openEnums from "../types/enums.js"; +import { OpenEnum } from "../types/enums.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; + +export const ReasoningDetailSummaryFormat = { + Unknown: "unknown", + OpenaiResponsesV1: "openai-responses-v1", + AzureOpenaiResponsesV1: "azure-openai-responses-v1", + XaiResponsesV1: "xai-responses-v1", + AnthropicClaudeV1: "anthropic-claude-v1", + GoogleGeminiV1: "google-gemini-v1", +} as const; +export type ReasoningDetailSummaryFormat = OpenEnum< + typeof ReasoningDetailSummaryFormat +>; + +/** + * Reasoning detail summary schema + */ +export type ReasoningDetailSummary = { + type: "reasoning.summary"; + summary: string; + id?: string | null | undefined; + format?: ReasoningDetailSummaryFormat | null | undefined; + index?: number | undefined; +}; + +/** @internal */ +export const ReasoningDetailSummaryFormat$inboundSchema: z.ZodType< + ReasoningDetailSummaryFormat, + unknown +> = openEnums.inboundSchema(ReasoningDetailSummaryFormat); +/** @internal */ +export const ReasoningDetailSummaryFormat$outboundSchema: z.ZodType< + string, + ReasoningDetailSummaryFormat +> = openEnums.outboundSchema(ReasoningDetailSummaryFormat); + +/** @internal */ +export const ReasoningDetailSummary$inboundSchema: z.ZodType< + ReasoningDetailSummary, + unknown +> = z.object({ + type: z.literal("reasoning.summary"), + summary: z.string(), + id: z.nullable(z.string()).optional(), + format: z.nullable(ReasoningDetailSummaryFormat$inboundSchema).optional(), + index: z.number().optional(), +}); +/** @internal */ +export type ReasoningDetailSummary$Outbound = { + type: "reasoning.summary"; + summary: string; + id?: string | null | undefined; + format?: string | null | undefined; + index?: number | undefined; +}; + +/** @internal */ +export const ReasoningDetailSummary$outboundSchema: z.ZodType< + ReasoningDetailSummary$Outbound, + ReasoningDetailSummary +> = z.object({ + type: z.literal("reasoning.summary"), + summary: z.string(), + id: z.nullable(z.string()).optional(), + format: z.nullable(ReasoningDetailSummaryFormat$outboundSchema).optional(), + index: z.number().optional(), +}); + +export function reasoningDetailSummaryToJSON( + reasoningDetailSummary: ReasoningDetailSummary, +): string { + return JSON.stringify( + ReasoningDetailSummary$outboundSchema.parse(reasoningDetailSummary), + ); +} +export function reasoningDetailSummaryFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ReasoningDetailSummary$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ReasoningDetailSummary' from JSON`, + ); +} diff --git a/src/models/reasoningdetailtext.ts b/src/models/reasoningdetailtext.ts new file mode 100644 index 00000000..4735b882 --- /dev/null +++ b/src/models/reasoningdetailtext.ts @@ -0,0 +1,98 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 8b6bd4c64a66 + */ + +import * as z from "zod/v4"; +import { safeParse } from "../lib/schemas.js"; +import * as openEnums from "../types/enums.js"; +import { OpenEnum } from "../types/enums.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; + +export const ReasoningDetailTextFormat = { + Unknown: "unknown", + OpenaiResponsesV1: "openai-responses-v1", + AzureOpenaiResponsesV1: "azure-openai-responses-v1", + XaiResponsesV1: "xai-responses-v1", + AnthropicClaudeV1: "anthropic-claude-v1", + GoogleGeminiV1: "google-gemini-v1", +} as const; +export type ReasoningDetailTextFormat = OpenEnum< + typeof ReasoningDetailTextFormat +>; + +/** + * Reasoning detail text schema + */ +export type ReasoningDetailText = { + type: "reasoning.text"; + text?: string | null | undefined; + signature?: string | null | undefined; + id?: string | null | undefined; + format?: ReasoningDetailTextFormat | null | undefined; + index?: number | undefined; +}; + +/** @internal */ +export const ReasoningDetailTextFormat$inboundSchema: z.ZodType< + ReasoningDetailTextFormat, + unknown +> = openEnums.inboundSchema(ReasoningDetailTextFormat); +/** @internal */ +export const ReasoningDetailTextFormat$outboundSchema: z.ZodType< + string, + ReasoningDetailTextFormat +> = openEnums.outboundSchema(ReasoningDetailTextFormat); + +/** @internal */ +export const ReasoningDetailText$inboundSchema: z.ZodType< + ReasoningDetailText, + unknown +> = z.object({ + type: z.literal("reasoning.text"), + text: z.nullable(z.string()).optional(), + signature: z.nullable(z.string()).optional(), + id: z.nullable(z.string()).optional(), + format: z.nullable(ReasoningDetailTextFormat$inboundSchema).optional(), + index: z.number().optional(), +}); +/** @internal */ +export type ReasoningDetailText$Outbound = { + type: "reasoning.text"; + text?: string | null | undefined; + signature?: string | null | undefined; + id?: string | null | undefined; + format?: string | null | undefined; + index?: number | undefined; +}; + +/** @internal */ +export const ReasoningDetailText$outboundSchema: z.ZodType< + ReasoningDetailText$Outbound, + ReasoningDetailText +> = z.object({ + type: z.literal("reasoning.text"), + text: z.nullable(z.string()).optional(), + signature: z.nullable(z.string()).optional(), + id: z.nullable(z.string()).optional(), + format: z.nullable(ReasoningDetailTextFormat$outboundSchema).optional(), + index: z.number().optional(), +}); + +export function reasoningDetailTextToJSON( + reasoningDetailText: ReasoningDetailText, +): string { + return JSON.stringify( + ReasoningDetailText$outboundSchema.parse(reasoningDetailText), + ); +} +export function reasoningDetailTextFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ReasoningDetailText$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ReasoningDetailText' from JSON`, + ); +} diff --git a/src/models/reasoningdetailunion.ts b/src/models/reasoningdetailunion.ts new file mode 100644 index 00000000..08cb14ea --- /dev/null +++ b/src/models/reasoningdetailunion.ts @@ -0,0 +1,77 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: a224fd99cdb5 + */ + +import * as z from "zod/v4"; +import { safeParse } from "../lib/schemas.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; +import { + ReasoningDetailEncrypted, + ReasoningDetailEncrypted$inboundSchema, + ReasoningDetailEncrypted$Outbound, + ReasoningDetailEncrypted$outboundSchema, +} from "./reasoningdetailencrypted.js"; +import { + ReasoningDetailSummary, + ReasoningDetailSummary$inboundSchema, + ReasoningDetailSummary$Outbound, + ReasoningDetailSummary$outboundSchema, +} from "./reasoningdetailsummary.js"; +import { + ReasoningDetailText, + ReasoningDetailText$inboundSchema, + ReasoningDetailText$Outbound, + ReasoningDetailText$outboundSchema, +} from "./reasoningdetailtext.js"; + +/** + * Reasoning detail union schema + */ +export type ReasoningDetailUnion = + | ReasoningDetailSummary + | ReasoningDetailEncrypted + | ReasoningDetailText; + +/** @internal */ +export const ReasoningDetailUnion$inboundSchema: z.ZodType< + ReasoningDetailUnion, + unknown +> = z.union([ + ReasoningDetailSummary$inboundSchema, + ReasoningDetailEncrypted$inboundSchema, + ReasoningDetailText$inboundSchema, +]); +/** @internal */ +export type ReasoningDetailUnion$Outbound = + | ReasoningDetailSummary$Outbound + | ReasoningDetailEncrypted$Outbound + | ReasoningDetailText$Outbound; + +/** @internal */ +export const ReasoningDetailUnion$outboundSchema: z.ZodType< + ReasoningDetailUnion$Outbound, + ReasoningDetailUnion +> = z.union([ + ReasoningDetailSummary$outboundSchema, + ReasoningDetailEncrypted$outboundSchema, + ReasoningDetailText$outboundSchema, +]); + +export function reasoningDetailUnionToJSON( + reasoningDetailUnion: ReasoningDetailUnion, +): string { + return JSON.stringify( + ReasoningDetailUnion$outboundSchema.parse(reasoningDetailUnion), + ); +} +export function reasoningDetailUnionFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ReasoningDetailUnion$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ReasoningDetailUnion' from JSON`, + ); +} diff --git a/src/models/responseformatjsonobject.ts b/src/models/responseformatjsonobject.ts new file mode 100644 index 00000000..57c3bfa2 --- /dev/null +++ b/src/models/responseformatjsonobject.ts @@ -0,0 +1,53 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: bb0b83e33de9 + */ + +import * as z from "zod/v4"; +import { safeParse } from "../lib/schemas.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; + +/** + * JSON object response format + */ +export type ResponseFormatJSONObject = { + type: "json_object"; +}; + +/** @internal */ +export const ResponseFormatJSONObject$inboundSchema: z.ZodType< + ResponseFormatJSONObject, + unknown +> = z.object({ + type: z.literal("json_object"), +}); +/** @internal */ +export type ResponseFormatJSONObject$Outbound = { + type: "json_object"; +}; + +/** @internal */ +export const ResponseFormatJSONObject$outboundSchema: z.ZodType< + ResponseFormatJSONObject$Outbound, + ResponseFormatJSONObject +> = z.object({ + type: z.literal("json_object"), +}); + +export function responseFormatJSONObjectToJSON( + responseFormatJSONObject: ResponseFormatJSONObject, +): string { + return JSON.stringify( + ResponseFormatJSONObject$outboundSchema.parse(responseFormatJSONObject), + ); +} +export function responseFormatJSONObjectFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => ResponseFormatJSONObject$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'ResponseFormatJSONObject' from JSON`, + ); +} diff --git a/src/models/responseformatjsonschema.ts b/src/models/responseformatjsonschema.ts index cb2913fd..1caafd9f 100644 --- a/src/models/responseformatjsonschema.ts +++ b/src/models/responseformatjsonschema.ts @@ -11,8 +11,14 @@ import { JSONSchemaConfig$outboundSchema, } from "./jsonschemaconfig.js"; +/** + * JSON Schema response format for structured outputs + */ export type ResponseFormatJSONSchema = { type: "json_schema"; + /** + * JSON Schema configuration object + */ jsonSchema: JSONSchemaConfig; }; diff --git a/src/models/responseformattext.ts b/src/models/responseformattext.ts new file mode 100644 index 00000000..671ca989 --- /dev/null +++ b/src/models/responseformattext.ts @@ -0,0 +1,34 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 9dcf902dc91a + */ + +import * as z from "zod/v4"; + +/** + * Default text response format + */ +export type ResponseFormatText = { + type: "text"; +}; + +/** @internal */ +export type ResponseFormatText$Outbound = { + type: "text"; +}; + +/** @internal */ +export const ResponseFormatText$outboundSchema: z.ZodType< + ResponseFormatText$Outbound, + ResponseFormatText +> = z.object({ + type: z.literal("text"), +}); + +export function responseFormatTextToJSON( + responseFormatText: ResponseFormatText, +): string { + return JSON.stringify( + ResponseFormatText$outboundSchema.parse(responseFormatText), + ); +} diff --git a/src/models/responseformattextconfig.ts b/src/models/responseformattextconfig.ts index 09480e22..645f81b5 100644 --- a/src/models/responseformattextconfig.ts +++ b/src/models/responseformattextconfig.ts @@ -8,11 +8,11 @@ import { safeParse } from "../lib/schemas.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; import { - ResponsesFormatJSONObject, - ResponsesFormatJSONObject$inboundSchema, - ResponsesFormatJSONObject$Outbound, - ResponsesFormatJSONObject$outboundSchema, -} from "./responsesformatjsonobject.js"; + ResponseFormatJSONObject, + ResponseFormatJSONObject$inboundSchema, + ResponseFormatJSONObject$Outbound, + ResponseFormatJSONObject$outboundSchema, +} from "./responseformatjsonobject.js"; import { ResponsesFormatText, ResponsesFormatText$inboundSchema, @@ -31,7 +31,7 @@ import { */ export type ResponseFormatTextConfig = | ResponsesFormatText - | ResponsesFormatJSONObject + | ResponseFormatJSONObject | ResponsesFormatTextJSONSchemaConfig; /** @internal */ @@ -40,13 +40,13 @@ export const ResponseFormatTextConfig$inboundSchema: z.ZodType< unknown > = z.union([ ResponsesFormatText$inboundSchema, - ResponsesFormatJSONObject$inboundSchema, + ResponseFormatJSONObject$inboundSchema, ResponsesFormatTextJSONSchemaConfig$inboundSchema, ]); /** @internal */ export type ResponseFormatTextConfig$Outbound = | ResponsesFormatText$Outbound - | ResponsesFormatJSONObject$Outbound + | ResponseFormatJSONObject$Outbound | ResponsesFormatTextJSONSchemaConfig$Outbound; /** @internal */ @@ -55,7 +55,7 @@ export const ResponseFormatTextConfig$outboundSchema: z.ZodType< ResponseFormatTextConfig > = z.union([ ResponsesFormatText$outboundSchema, - ResponsesFormatJSONObject$outboundSchema, + ResponseFormatJSONObject$outboundSchema, ResponsesFormatTextJSONSchemaConfig$outboundSchema, ]); diff --git a/src/models/responseformattextgrammar.ts b/src/models/responseformattextgrammar.ts index 407bdc01..6e16981f 100644 --- a/src/models/responseformattextgrammar.ts +++ b/src/models/responseformattextgrammar.ts @@ -5,8 +5,14 @@ import * as z from "zod/v4"; +/** + * Custom grammar response format + */ export type ResponseFormatTextGrammar = { type: "grammar"; + /** + * Custom grammar for text generation + */ grammar: string; }; diff --git a/src/models/responseformattextpython.ts b/src/models/responseformattextpython.ts new file mode 100644 index 00000000..a274cd1b --- /dev/null +++ b/src/models/responseformattextpython.ts @@ -0,0 +1,34 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: 186678f4d07c + */ + +import * as z from "zod/v4"; + +/** + * Python code response format + */ +export type ResponseFormatTextPython = { + type: "python"; +}; + +/** @internal */ +export type ResponseFormatTextPython$Outbound = { + type: "python"; +}; + +/** @internal */ +export const ResponseFormatTextPython$outboundSchema: z.ZodType< + ResponseFormatTextPython$Outbound, + ResponseFormatTextPython +> = z.object({ + type: z.literal("python"), +}); + +export function responseFormatTextPythonToJSON( + responseFormatTextPython: ResponseFormatTextPython, +): string { + return JSON.stringify( + ResponseFormatTextPython$outboundSchema.parse(responseFormatTextPython), + ); +} diff --git a/src/models/responseserrorfield.ts b/src/models/responseserrorfield.ts index 9672f19f..584f66cd 100644 --- a/src/models/responseserrorfield.ts +++ b/src/models/responseserrorfield.ts @@ -10,7 +10,7 @@ import { OpenEnum } from "../types/enums.js"; import { Result as SafeParseResult } from "../types/fp.js"; import { SDKValidationError } from "./errors/sdkvalidationerror.js"; -export const CodeEnum = { +export const Code = { ServerError: "server_error", RateLimitExceeded: "rate_limit_exceeded", InvalidPrompt: "invalid_prompt", @@ -30,26 +30,26 @@ export const CodeEnum = { FailedToDownloadImage: "failed_to_download_image", ImageFileNotFound: "image_file_not_found", } as const; -export type CodeEnum = OpenEnum; +export type Code = OpenEnum; /** * Error information returned from the API */ export type ResponsesErrorField = { - code: CodeEnum; + code: Code; message: string; }; /** @internal */ -export const CodeEnum$inboundSchema: z.ZodType = openEnums - .inboundSchema(CodeEnum); +export const Code$inboundSchema: z.ZodType = openEnums + .inboundSchema(Code); /** @internal */ export const ResponsesErrorField$inboundSchema: z.ZodType< ResponsesErrorField, unknown > = z.object({ - code: CodeEnum$inboundSchema, + code: Code$inboundSchema, message: z.string(), }); diff --git a/src/models/responsesformatjsonobject.ts b/src/models/responsesformatjsonobject.ts deleted file mode 100644 index 8da5d102..00000000 --- a/src/models/responsesformatjsonobject.ts +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: cd67052e598c - */ - -import * as z from "zod/v4"; -import { safeParse } from "../lib/schemas.js"; -import { Result as SafeParseResult } from "../types/fp.js"; -import { SDKValidationError } from "./errors/sdkvalidationerror.js"; - -/** - * JSON object response format - */ -export type ResponsesFormatJSONObject = { - type: "json_object"; -}; - -/** @internal */ -export const ResponsesFormatJSONObject$inboundSchema: z.ZodType< - ResponsesFormatJSONObject, - unknown -> = z.object({ - type: z.literal("json_object"), -}); -/** @internal */ -export type ResponsesFormatJSONObject$Outbound = { - type: "json_object"; -}; - -/** @internal */ -export const ResponsesFormatJSONObject$outboundSchema: z.ZodType< - ResponsesFormatJSONObject$Outbound, - ResponsesFormatJSONObject -> = z.object({ - type: z.literal("json_object"), -}); - -export function responsesFormatJSONObjectToJSON( - responsesFormatJSONObject: ResponsesFormatJSONObject, -): string { - return JSON.stringify( - ResponsesFormatJSONObject$outboundSchema.parse(responsesFormatJSONObject), - ); -} -export function responsesFormatJSONObjectFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => ResponsesFormatJSONObject$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'ResponsesFormatJSONObject' from JSON`, - ); -} diff --git a/src/models/schema10.ts b/src/models/schema10.ts deleted file mode 100644 index 50f1a6bf..00000000 --- a/src/models/schema10.ts +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: 3a361cbac0f8 - */ - -import * as z from "zod/v4"; -import { - Schema14, - Schema14$Outbound, - Schema14$outboundSchema, -} from "./schema14.js"; - -export type Prompt = number | string | any; - -export type Completion = number | string | any; - -export type Schema10 = { - prompt?: number | string | any | undefined; - completion?: number | string | any | undefined; - image?: Schema14 | undefined; - audio?: Schema14 | undefined; - request?: Schema14 | undefined; -}; - -/** @internal */ -export type Prompt$Outbound = number | string | any; - -/** @internal */ -export const Prompt$outboundSchema: z.ZodType = z - .union([z.number(), z.string(), z.any()]); - -export function promptToJSON(prompt: Prompt): string { - return JSON.stringify(Prompt$outboundSchema.parse(prompt)); -} - -/** @internal */ -export type Completion$Outbound = number | string | any; - -/** @internal */ -export const Completion$outboundSchema: z.ZodType< - Completion$Outbound, - Completion -> = z.union([z.number(), z.string(), z.any()]); - -export function completionToJSON(completion: Completion): string { - return JSON.stringify(Completion$outboundSchema.parse(completion)); -} - -/** @internal */ -export type Schema10$Outbound = { - prompt?: number | string | any | undefined; - completion?: number | string | any | undefined; - image?: Schema14$Outbound | undefined; - audio?: Schema14$Outbound | undefined; - request?: Schema14$Outbound | undefined; -}; - -/** @internal */ -export const Schema10$outboundSchema: z.ZodType = z - .object({ - prompt: z.union([z.number(), z.string(), z.any()]).optional(), - completion: z.union([z.number(), z.string(), z.any()]).optional(), - image: Schema14$outboundSchema.optional(), - audio: Schema14$outboundSchema.optional(), - request: Schema14$outboundSchema.optional(), - }); - -export function schema10ToJSON(schema10: Schema10): string { - return JSON.stringify(Schema10$outboundSchema.parse(schema10)); -} diff --git a/src/models/schema14.ts b/src/models/schema14.ts deleted file mode 100644 index f75275f2..00000000 --- a/src/models/schema14.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: 197e0ddbcace - */ - -import * as z from "zod/v4"; - -export type Schema14 = number | string | any; - -/** @internal */ -export type Schema14$Outbound = number | string | any; - -/** @internal */ -export const Schema14$outboundSchema: z.ZodType = z - .union([z.number(), z.string(), z.any()]); - -export function schema14ToJSON(schema14: Schema14): string { - return JSON.stringify(Schema14$outboundSchema.parse(schema14)); -} diff --git a/src/models/schema17.ts b/src/models/schema17.ts deleted file mode 100644 index d410ec3d..00000000 --- a/src/models/schema17.ts +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: f882e1dc50da - */ - -import * as z from "zod/v4"; -import { remap as remap$ } from "../lib/primitives.js"; -import * as openEnums from "../types/enums.js"; -import { OpenEnum } from "../types/enums.js"; - -export type Schema17ResponseHealing = { - id: "response-healing"; - enabled?: boolean | undefined; -}; - -export const PdfEngine = { - MistralOcr: "mistral-ocr", - PdfText: "pdf-text", - Native: "native", -} as const; -export type PdfEngine = OpenEnum; - -export type Pdf = { - engine?: PdfEngine | undefined; -}; - -export type Schema17FileParser = { - id: "file-parser"; - enabled?: boolean | undefined; - pdf?: Pdf | undefined; -}; - -export const Engine = { - Native: "native", - Exa: "exa", -} as const; -export type Engine = OpenEnum; - -export type Schema17Web = { - id: "web"; - enabled?: boolean | undefined; - maxResults?: number | undefined; - searchPrompt?: string | undefined; - engine?: Engine | undefined; -}; - -export type Schema17Moderation = { - id: "moderation"; -}; - -export type Schema17AutoRouter = { - id: "auto-router"; - enabled?: boolean | undefined; - allowedModels?: Array | undefined; -}; - -export type Schema17 = - | Schema17AutoRouter - | Schema17Moderation - | Schema17Web - | Schema17FileParser - | Schema17ResponseHealing; - -/** @internal */ -export type Schema17ResponseHealing$Outbound = { - id: "response-healing"; - enabled?: boolean | undefined; -}; - -/** @internal */ -export const Schema17ResponseHealing$outboundSchema: z.ZodType< - Schema17ResponseHealing$Outbound, - Schema17ResponseHealing -> = z.object({ - id: z.literal("response-healing"), - enabled: z.boolean().optional(), -}); - -export function schema17ResponseHealingToJSON( - schema17ResponseHealing: Schema17ResponseHealing, -): string { - return JSON.stringify( - Schema17ResponseHealing$outboundSchema.parse(schema17ResponseHealing), - ); -} - -/** @internal */ -export const PdfEngine$outboundSchema: z.ZodType = openEnums - .outboundSchema(PdfEngine); - -/** @internal */ -export type Pdf$Outbound = { - engine?: string | undefined; -}; - -/** @internal */ -export const Pdf$outboundSchema: z.ZodType = z.object({ - engine: PdfEngine$outboundSchema.optional(), -}); - -export function pdfToJSON(pdf: Pdf): string { - return JSON.stringify(Pdf$outboundSchema.parse(pdf)); -} - -/** @internal */ -export type Schema17FileParser$Outbound = { - id: "file-parser"; - enabled?: boolean | undefined; - pdf?: Pdf$Outbound | undefined; -}; - -/** @internal */ -export const Schema17FileParser$outboundSchema: z.ZodType< - Schema17FileParser$Outbound, - Schema17FileParser -> = z.object({ - id: z.literal("file-parser"), - enabled: z.boolean().optional(), - pdf: z.lazy(() => Pdf$outboundSchema).optional(), -}); - -export function schema17FileParserToJSON( - schema17FileParser: Schema17FileParser, -): string { - return JSON.stringify( - Schema17FileParser$outboundSchema.parse(schema17FileParser), - ); -} - -/** @internal */ -export const Engine$outboundSchema: z.ZodType = openEnums - .outboundSchema(Engine); - -/** @internal */ -export type Schema17Web$Outbound = { - id: "web"; - enabled?: boolean | undefined; - max_results?: number | undefined; - search_prompt?: string | undefined; - engine?: string | undefined; -}; - -/** @internal */ -export const Schema17Web$outboundSchema: z.ZodType< - Schema17Web$Outbound, - Schema17Web -> = z.object({ - id: z.literal("web"), - enabled: z.boolean().optional(), - maxResults: z.number().optional(), - searchPrompt: z.string().optional(), - engine: Engine$outboundSchema.optional(), -}).transform((v) => { - return remap$(v, { - maxResults: "max_results", - searchPrompt: "search_prompt", - }); -}); - -export function schema17WebToJSON(schema17Web: Schema17Web): string { - return JSON.stringify(Schema17Web$outboundSchema.parse(schema17Web)); -} - -/** @internal */ -export type Schema17Moderation$Outbound = { - id: "moderation"; -}; - -/** @internal */ -export const Schema17Moderation$outboundSchema: z.ZodType< - Schema17Moderation$Outbound, - Schema17Moderation -> = z.object({ - id: z.literal("moderation"), -}); - -export function schema17ModerationToJSON( - schema17Moderation: Schema17Moderation, -): string { - return JSON.stringify( - Schema17Moderation$outboundSchema.parse(schema17Moderation), - ); -} - -/** @internal */ -export type Schema17AutoRouter$Outbound = { - id: "auto-router"; - enabled?: boolean | undefined; - allowed_models?: Array | undefined; -}; - -/** @internal */ -export const Schema17AutoRouter$outboundSchema: z.ZodType< - Schema17AutoRouter$Outbound, - Schema17AutoRouter -> = z.object({ - id: z.literal("auto-router"), - enabled: z.boolean().optional(), - allowedModels: z.array(z.string()).optional(), -}).transform((v) => { - return remap$(v, { - allowedModels: "allowed_models", - }); -}); - -export function schema17AutoRouterToJSON( - schema17AutoRouter: Schema17AutoRouter, -): string { - return JSON.stringify( - Schema17AutoRouter$outboundSchema.parse(schema17AutoRouter), - ); -} - -/** @internal */ -export type Schema17$Outbound = - | Schema17AutoRouter$Outbound - | Schema17Moderation$Outbound - | Schema17Web$Outbound - | Schema17FileParser$Outbound - | Schema17ResponseHealing$Outbound; - -/** @internal */ -export const Schema17$outboundSchema: z.ZodType = z - .union([ - z.lazy(() => Schema17AutoRouter$outboundSchema), - z.lazy(() => Schema17Moderation$outboundSchema), - z.lazy(() => Schema17Web$outboundSchema), - z.lazy(() => Schema17FileParser$outboundSchema), - z.lazy(() => Schema17ResponseHealing$outboundSchema), - ]); - -export function schema17ToJSON(schema17: Schema17): string { - return JSON.stringify(Schema17$outboundSchema.parse(schema17)); -} diff --git a/src/models/schema19.ts b/src/models/schema19.ts deleted file mode 100644 index 15e01a96..00000000 --- a/src/models/schema19.ts +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: 81bc92589adb - */ - -import * as z from "zod/v4"; -import { safeParse } from "../lib/schemas.js"; -import * as openEnums from "../types/enums.js"; -import { OpenEnum } from "../types/enums.js"; -import { Result as SafeParseResult } from "../types/fp.js"; -import { SDKValidationError } from "./errors/sdkvalidationerror.js"; - -export const Schema21 = { - Unknown: "unknown", - OpenaiResponsesV1: "openai-responses-v1", - AzureOpenaiResponsesV1: "azure-openai-responses-v1", - XaiResponsesV1: "xai-responses-v1", - AnthropicClaudeV1: "anthropic-claude-v1", - GoogleGeminiV1: "google-gemini-v1", -} as const; -export type Schema21 = OpenEnum; - -export type Schema19ReasoningText = { - type: "reasoning.text"; - text?: string | null | undefined; - signature?: string | null | undefined; - id?: string | null | undefined; - format?: Schema21 | null | undefined; - index?: number | undefined; -}; - -export type Schema19ReasoningEncrypted = { - type: "reasoning.encrypted"; - data: string; - id?: string | null | undefined; - format?: Schema21 | null | undefined; - index?: number | undefined; -}; - -export type Schema19ReasoningSummary = { - type: "reasoning.summary"; - summary: string; - id?: string | null | undefined; - format?: Schema21 | null | undefined; - index?: number | undefined; -}; - -export type Schema19 = - | Schema19ReasoningSummary - | Schema19ReasoningEncrypted - | Schema19ReasoningText; - -/** @internal */ -export const Schema21$inboundSchema: z.ZodType = openEnums - .inboundSchema(Schema21); -/** @internal */ -export const Schema21$outboundSchema: z.ZodType = openEnums - .outboundSchema(Schema21); - -/** @internal */ -export const Schema19ReasoningText$inboundSchema: z.ZodType< - Schema19ReasoningText, - unknown -> = z.object({ - type: z.literal("reasoning.text"), - text: z.nullable(z.string()).optional(), - signature: z.nullable(z.string()).optional(), - id: z.nullable(z.string()).optional(), - format: z.nullable(Schema21$inboundSchema).optional(), - index: z.number().optional(), -}); -/** @internal */ -export type Schema19ReasoningText$Outbound = { - type: "reasoning.text"; - text?: string | null | undefined; - signature?: string | null | undefined; - id?: string | null | undefined; - format?: string | null | undefined; - index?: number | undefined; -}; - -/** @internal */ -export const Schema19ReasoningText$outboundSchema: z.ZodType< - Schema19ReasoningText$Outbound, - Schema19ReasoningText -> = z.object({ - type: z.literal("reasoning.text"), - text: z.nullable(z.string()).optional(), - signature: z.nullable(z.string()).optional(), - id: z.nullable(z.string()).optional(), - format: z.nullable(Schema21$outboundSchema).optional(), - index: z.number().optional(), -}); - -export function schema19ReasoningTextToJSON( - schema19ReasoningText: Schema19ReasoningText, -): string { - return JSON.stringify( - Schema19ReasoningText$outboundSchema.parse(schema19ReasoningText), - ); -} -export function schema19ReasoningTextFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => Schema19ReasoningText$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'Schema19ReasoningText' from JSON`, - ); -} - -/** @internal */ -export const Schema19ReasoningEncrypted$inboundSchema: z.ZodType< - Schema19ReasoningEncrypted, - unknown -> = z.object({ - type: z.literal("reasoning.encrypted"), - data: z.string(), - id: z.nullable(z.string()).optional(), - format: z.nullable(Schema21$inboundSchema).optional(), - index: z.number().optional(), -}); -/** @internal */ -export type Schema19ReasoningEncrypted$Outbound = { - type: "reasoning.encrypted"; - data: string; - id?: string | null | undefined; - format?: string | null | undefined; - index?: number | undefined; -}; - -/** @internal */ -export const Schema19ReasoningEncrypted$outboundSchema: z.ZodType< - Schema19ReasoningEncrypted$Outbound, - Schema19ReasoningEncrypted -> = z.object({ - type: z.literal("reasoning.encrypted"), - data: z.string(), - id: z.nullable(z.string()).optional(), - format: z.nullable(Schema21$outboundSchema).optional(), - index: z.number().optional(), -}); - -export function schema19ReasoningEncryptedToJSON( - schema19ReasoningEncrypted: Schema19ReasoningEncrypted, -): string { - return JSON.stringify( - Schema19ReasoningEncrypted$outboundSchema.parse(schema19ReasoningEncrypted), - ); -} -export function schema19ReasoningEncryptedFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => Schema19ReasoningEncrypted$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'Schema19ReasoningEncrypted' from JSON`, - ); -} - -/** @internal */ -export const Schema19ReasoningSummary$inboundSchema: z.ZodType< - Schema19ReasoningSummary, - unknown -> = z.object({ - type: z.literal("reasoning.summary"), - summary: z.string(), - id: z.nullable(z.string()).optional(), - format: z.nullable(Schema21$inboundSchema).optional(), - index: z.number().optional(), -}); -/** @internal */ -export type Schema19ReasoningSummary$Outbound = { - type: "reasoning.summary"; - summary: string; - id?: string | null | undefined; - format?: string | null | undefined; - index?: number | undefined; -}; - -/** @internal */ -export const Schema19ReasoningSummary$outboundSchema: z.ZodType< - Schema19ReasoningSummary$Outbound, - Schema19ReasoningSummary -> = z.object({ - type: z.literal("reasoning.summary"), - summary: z.string(), - id: z.nullable(z.string()).optional(), - format: z.nullable(Schema21$outboundSchema).optional(), - index: z.number().optional(), -}); - -export function schema19ReasoningSummaryToJSON( - schema19ReasoningSummary: Schema19ReasoningSummary, -): string { - return JSON.stringify( - Schema19ReasoningSummary$outboundSchema.parse(schema19ReasoningSummary), - ); -} -export function schema19ReasoningSummaryFromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => Schema19ReasoningSummary$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'Schema19ReasoningSummary' from JSON`, - ); -} - -/** @internal */ -export const Schema19$inboundSchema: z.ZodType = z.union([ - z.lazy(() => Schema19ReasoningSummary$inboundSchema), - z.lazy(() => Schema19ReasoningEncrypted$inboundSchema), - z.lazy(() => Schema19ReasoningText$inboundSchema), -]); -/** @internal */ -export type Schema19$Outbound = - | Schema19ReasoningSummary$Outbound - | Schema19ReasoningEncrypted$Outbound - | Schema19ReasoningText$Outbound; - -/** @internal */ -export const Schema19$outboundSchema: z.ZodType = z - .union([ - z.lazy(() => Schema19ReasoningSummary$outboundSchema), - z.lazy(() => Schema19ReasoningEncrypted$outboundSchema), - z.lazy(() => Schema19ReasoningText$outboundSchema), - ]); - -export function schema19ToJSON(schema19: Schema19): string { - return JSON.stringify(Schema19$outboundSchema.parse(schema19)); -} -export function schema19FromJSON( - jsonString: string, -): SafeParseResult { - return safeParse( - jsonString, - (x) => Schema19$inboundSchema.parse(JSON.parse(x)), - `Failed to parse 'Schema19' from JSON`, - ); -} diff --git a/src/models/schema5.ts b/src/models/schema5.ts deleted file mode 100644 index 6b140f2a..00000000 --- a/src/models/schema5.ts +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. - * @generated-id: ceb3f15ca425 - */ - -import * as z from "zod/v4"; -import * as openEnums from "../types/enums.js"; -import { OpenEnum } from "../types/enums.js"; - -export const Schema5Enum = { - Ai21: "AI21", - AionLabs: "AionLabs", - Alibaba: "Alibaba", - Ambient: "Ambient", - AmazonBedrock: "Amazon Bedrock", - AmazonNova: "Amazon Nova", - Anthropic: "Anthropic", - ArceeAI: "Arcee AI", - AtlasCloud: "AtlasCloud", - Avian: "Avian", - Azure: "Azure", - BaseTen: "BaseTen", - BytePlus: "BytePlus", - BlackForestLabs: "Black Forest Labs", - Cerebras: "Cerebras", - Chutes: "Chutes", - Cirrascale: "Cirrascale", - Clarifai: "Clarifai", - Cloudflare: "Cloudflare", - Cohere: "Cohere", - Crusoe: "Crusoe", - DeepInfra: "DeepInfra", - DeepSeek: "DeepSeek", - Featherless: "Featherless", - Fireworks: "Fireworks", - Friendli: "Friendli", - GMICloud: "GMICloud", - Google: "Google", - GoogleAIStudio: "Google AI Studio", - Groq: "Groq", - Hyperbolic: "Hyperbolic", - Inception: "Inception", - Inceptron: "Inceptron", - InferenceNet: "InferenceNet", - Infermatic: "Infermatic", - Inflection: "Inflection", - Liquid: "Liquid", - Mara: "Mara", - Mancer2: "Mancer 2", - Minimax: "Minimax", - ModelRun: "ModelRun", - Mistral: "Mistral", - Modular: "Modular", - MoonshotAI: "Moonshot AI", - Morph: "Morph", - NCompass: "NCompass", - Nebius: "Nebius", - NextBit: "NextBit", - Novita: "Novita", - Nvidia: "Nvidia", - OpenAI: "OpenAI", - OpenInference: "OpenInference", - Parasail: "Parasail", - Perplexity: "Perplexity", - Phala: "Phala", - Relace: "Relace", - SambaNova: "SambaNova", - Seed: "Seed", - SiliconFlow: "SiliconFlow", - Sourceful: "Sourceful", - StepFun: "StepFun", - Stealth: "Stealth", - StreamLake: "StreamLake", - Switchpoint: "Switchpoint", - Together: "Together", - Upstage: "Upstage", - Venice: "Venice", - WandB: "WandB", - Xiaomi: "Xiaomi", - XAI: "xAI", - ZAi: "Z.AI", - FakeProvider: "FakeProvider", -} as const; -export type Schema5Enum = OpenEnum; - -export type Schema5 = Schema5Enum | string; - -/** @internal */ -export const Schema5Enum$outboundSchema: z.ZodType = - openEnums.outboundSchema(Schema5Enum); - -/** @internal */ -export type Schema5$Outbound = string | string; - -/** @internal */ -export const Schema5$outboundSchema: z.ZodType = z - .union([Schema5Enum$outboundSchema, z.string()]); - -export function schema5ToJSON(schema5: Schema5): string { - return JSON.stringify(Schema5$outboundSchema.parse(schema5)); -} diff --git a/src/models/systemmessage.ts b/src/models/systemmessage.ts index 68f3666f..de89ccd3 100644 --- a/src/models/systemmessage.ts +++ b/src/models/systemmessage.ts @@ -10,11 +10,23 @@ import { ChatMessageContentItemText$outboundSchema, } from "./chatmessagecontentitemtext.js"; +/** + * System message content + */ export type SystemMessageContent = string | Array; +/** + * System message for setting behavior + */ export type SystemMessage = { role: "system"; + /** + * System message content + */ content: string | Array; + /** + * Optional name for the system message + */ name?: string | undefined; }; diff --git a/src/models/toolchoiceoption.ts b/src/models/toolchoiceoption.ts new file mode 100644 index 00000000..521db1d5 --- /dev/null +++ b/src/models/toolchoiceoption.ts @@ -0,0 +1,79 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: d767ca0ded82 + */ + +import * as z from "zod/v4"; +import { ClosedEnum } from "../types/enums.js"; +import { + NamedToolChoice, + NamedToolChoice$Outbound, + NamedToolChoice$outboundSchema, +} from "./namedtoolchoice.js"; + +export const ToolChoiceOptionRequired = { + Required: "required", +} as const; +export type ToolChoiceOptionRequired = ClosedEnum< + typeof ToolChoiceOptionRequired +>; + +export const ToolChoiceOptionAuto = { + Auto: "auto", +} as const; +export type ToolChoiceOptionAuto = ClosedEnum; + +export const ToolChoiceOptionNone = { + None: "none", +} as const; +export type ToolChoiceOptionNone = ClosedEnum; + +/** + * Tool choice configuration + */ +export type ToolChoiceOption = + | NamedToolChoice + | ToolChoiceOptionNone + | ToolChoiceOptionAuto + | ToolChoiceOptionRequired; + +/** @internal */ +export const ToolChoiceOptionRequired$outboundSchema: z.ZodEnum< + typeof ToolChoiceOptionRequired +> = z.enum(ToolChoiceOptionRequired); + +/** @internal */ +export const ToolChoiceOptionAuto$outboundSchema: z.ZodEnum< + typeof ToolChoiceOptionAuto +> = z.enum(ToolChoiceOptionAuto); + +/** @internal */ +export const ToolChoiceOptionNone$outboundSchema: z.ZodEnum< + typeof ToolChoiceOptionNone +> = z.enum(ToolChoiceOptionNone); + +/** @internal */ +export type ToolChoiceOption$Outbound = + | NamedToolChoice$Outbound + | string + | string + | string; + +/** @internal */ +export const ToolChoiceOption$outboundSchema: z.ZodType< + ToolChoiceOption$Outbound, + ToolChoiceOption +> = z.union([ + NamedToolChoice$outboundSchema, + ToolChoiceOptionNone$outboundSchema, + ToolChoiceOptionAuto$outboundSchema, + ToolChoiceOptionRequired$outboundSchema, +]); + +export function toolChoiceOptionToJSON( + toolChoiceOption: ToolChoiceOption, +): string { + return JSON.stringify( + ToolChoiceOption$outboundSchema.parse(toolChoiceOption), + ); +} diff --git a/src/models/tooldefinitionjson.ts b/src/models/tooldefinitionjson.ts index 8456579f..9393ec42 100644 --- a/src/models/tooldefinitionjson.ts +++ b/src/models/tooldefinitionjson.ts @@ -4,24 +4,66 @@ */ import * as z from "zod/v4"; +import { remap as remap$ } from "../lib/primitives.js"; +import { ClosedEnum } from "../types/enums.js"; +import { + ChatMessageContentItemCacheControl, + ChatMessageContentItemCacheControl$Outbound, + ChatMessageContentItemCacheControl$outboundSchema, +} from "./chatmessagecontentitemcachecontrol.js"; +export const ToolDefinitionJsonType = { + Function: "function", +} as const; +export type ToolDefinitionJsonType = ClosedEnum; + +/** + * Function definition for tool calling + */ export type ToolDefinitionJsonFunction = { + /** + * Function name (a-z, A-Z, 0-9, underscores, dashes, max 64 chars) + */ name: string; + /** + * Function description for the model + */ description?: string | undefined; - parameters?: { [k: string]: any } | undefined; + /** + * Function parameters as JSON Schema object + */ + parameters?: { [k: string]: any | null } | undefined; + /** + * Enable strict schema adherence + */ strict?: boolean | null | undefined; }; +/** + * Tool definition for function calling + */ export type ToolDefinitionJson = { - type: "function"; + type: ToolDefinitionJsonType; + /** + * Function definition for tool calling + */ function: ToolDefinitionJsonFunction; + /** + * Cache control for the content part + */ + cacheControl?: ChatMessageContentItemCacheControl | undefined; }; +/** @internal */ +export const ToolDefinitionJsonType$outboundSchema: z.ZodEnum< + typeof ToolDefinitionJsonType +> = z.enum(ToolDefinitionJsonType); + /** @internal */ export type ToolDefinitionJsonFunction$Outbound = { name: string; description?: string | undefined; - parameters?: { [k: string]: any } | undefined; + parameters?: { [k: string]: any | null } | undefined; strict?: boolean | null | undefined; }; @@ -32,7 +74,7 @@ export const ToolDefinitionJsonFunction$outboundSchema: z.ZodType< > = z.object({ name: z.string(), description: z.string().optional(), - parameters: z.record(z.string(), z.any()).optional(), + parameters: z.record(z.string(), z.nullable(z.any())).optional(), strict: z.nullable(z.boolean()).optional(), }); @@ -46,8 +88,9 @@ export function toolDefinitionJsonFunctionToJSON( /** @internal */ export type ToolDefinitionJson$Outbound = { - type: "function"; + type: string; function: ToolDefinitionJsonFunction$Outbound; + cache_control?: ChatMessageContentItemCacheControl$Outbound | undefined; }; /** @internal */ @@ -55,8 +98,13 @@ export const ToolDefinitionJson$outboundSchema: z.ZodType< ToolDefinitionJson$Outbound, ToolDefinitionJson > = z.object({ - type: z.literal("function"), + type: ToolDefinitionJsonType$outboundSchema, function: z.lazy(() => ToolDefinitionJsonFunction$outboundSchema), + cacheControl: ChatMessageContentItemCacheControl$outboundSchema.optional(), +}).transform((v) => { + return remap$(v, { + cacheControl: "cache_control", + }); }); export function toolDefinitionJsonToJSON( diff --git a/src/models/toolresponsemessage.ts b/src/models/toolresponsemessage.ts index 1cadd683..319df667 100644 --- a/src/models/toolresponsemessage.ts +++ b/src/models/toolresponsemessage.ts @@ -11,11 +11,23 @@ import { ChatMessageContentItem$outboundSchema, } from "./chatmessagecontentitem.js"; +/** + * Tool response content + */ export type ToolResponseMessageContent = string | Array; +/** + * Tool response message + */ export type ToolResponseMessage = { role: "tool"; + /** + * Tool response content + */ content: string | Array; + /** + * ID of the assistant message tool call this message responds to + */ toolCallId: string; }; diff --git a/src/models/usermessage.ts b/src/models/usermessage.ts index ec761e7f..8234718a 100644 --- a/src/models/usermessage.ts +++ b/src/models/usermessage.ts @@ -10,11 +10,23 @@ import { ChatMessageContentItem$outboundSchema, } from "./chatmessagecontentitem.js"; +/** + * User message content + */ export type UserMessageContent = string | Array; +/** + * User message + */ export type UserMessage = { role: "user"; + /** + * User message content + */ content: string | Array; + /** + * Optional name for the user + */ name?: string | undefined; }; diff --git a/src/models/videoinput.ts b/src/models/videoinput.ts new file mode 100644 index 00000000..4489c8ae --- /dev/null +++ b/src/models/videoinput.ts @@ -0,0 +1,50 @@ +/* + * Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT. + * @generated-id: c9307d9d04a6 + */ + +import * as z from "zod/v4"; +import { safeParse } from "../lib/schemas.js"; +import { Result as SafeParseResult } from "../types/fp.js"; +import { SDKValidationError } from "./errors/sdkvalidationerror.js"; + +/** + * Video input object + */ +export type VideoInput = { + /** + * URL of the video (data: URLs supported) + */ + url: string; +}; + +/** @internal */ +export const VideoInput$inboundSchema: z.ZodType = z + .object({ + url: z.string(), + }); +/** @internal */ +export type VideoInput$Outbound = { + url: string; +}; + +/** @internal */ +export const VideoInput$outboundSchema: z.ZodType< + VideoInput$Outbound, + VideoInput +> = z.object({ + url: z.string(), +}); + +export function videoInputToJSON(videoInput: VideoInput): string { + return JSON.stringify(VideoInput$outboundSchema.parse(videoInput)); +} +export function videoInputFromJSON( + jsonString: string, +): SafeParseResult { + return safeParse( + jsonString, + (x) => VideoInput$inboundSchema.parse(JSON.parse(x)), + `Failed to parse 'VideoInput' from JSON`, + ); +} diff --git a/src/sdk/chat.ts b/src/sdk/chat.ts index 9020f613..e330dfa1 100644 --- a/src/sdk/chat.ts +++ b/src/sdk/chat.ts @@ -28,7 +28,7 @@ export class Chat extends ClientSDK { chatGenerationParams: { stream: true }; }, options?: RequestOptions, - ): Promise>; + ): Promise>; async send( request: operations.SendChatCompletionRequestRequest, options?: RequestOptions, diff --git a/src/sdk/sdk.ts b/src/sdk/sdk.ts index 6769c2df..1d5e1183 100644 --- a/src/sdk/sdk.ts +++ b/src/sdk/sdk.ts @@ -39,6 +39,11 @@ export class OpenRouter extends ClientSDK { return (this._analytics ??= new Analytics(this._options)); } + private _chat?: Chat; + get chat(): Chat { + return (this._chat ??= new Chat(this._options)); + } + private _credits?: Credits; get credits(): Credits { return (this._credits ??= new Credits(this._options)); @@ -84,11 +89,6 @@ export class OpenRouter extends ClientSDK { return (this._oAuth ??= new OAuth(this._options)); } - private _chat?: Chat; - get chat(): Chat { - return (this._chat ??= new Chat(this._options)); - } - // #region sdk-class-body callModel( request: CallModelInput,