You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
-- url = "http://192.168.1.164:1234", -- optional: default value is ollama url http://127.0.0.1:11434
282
-
-- -- api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated
283
-
-- chat_url = "/v1/chat/completions", -- optional: default value, override if different
284
-
-- models_endpoint = "/v1/models", -- optional: attaches to the end of the URL to form the endpoint to retrieve models
285
-
-- },
286
-
-- schema = {
287
-
-- model = {
288
-
-- default = "zeta", -- define llm model to be used
289
-
-- },
290
-
-- temperature = {
291
-
-- order = 2,
292
-
-- mapping = "parameters",
293
-
-- type = "number",
294
-
-- optional = true,
295
-
-- default = 0.4,
296
-
-- desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
297
-
-- validate = function(n)
298
-
-- return n >= 0 and n <= 2, "Must be between 0 and 2"
299
-
-- end,
300
-
-- },
301
-
-- max_completion_tokens = {
302
-
-- order = 3,
303
-
-- mapping = "parameters",
304
-
-- type = "integer",
305
-
-- optional = true,
306
-
-- default = nil,
307
-
-- desc = "An upper bound for the number of tokens that can be generated for a completion.",
308
-
-- validate = function(n)
309
-
-- return n > 0, "Must be greater than 0"
310
-
-- end,
311
-
-- },
312
-
-- stop = {
313
-
-- order = 4,
314
-
-- mapping = "parameters",
315
-
-- type = "string",
316
-
-- optional = true,
317
-
-- default = nil,
318
-
-- desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
319
-
-- validate = function(s)
320
-
-- return s:len() > 0, "Cannot be an empty string"
321
-
-- end,
322
-
-- },
323
-
-- logit_bias = {
324
-
-- order = 5,
325
-
-- mapping = "parameters",
326
-
-- type = "map",
327
-
-- optional = true,
328
-
-- default = nil,
329
-
-- desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
330
-
-- subtype_key = {
331
-
-- type = "integer",
332
-
-- },
333
-
-- subtype = {
334
-
-- type = "integer",
335
-
-- validate = function(n)
336
-
-- return n >= -100 and n <= 100, "Must be between -100 and 100"
-- -- url = "http://192.168.1.164:1234", -- optional: default value is ollama url http://127.0.0.1:11434
307
+
-- -- -- api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated
308
+
-- -- chat_url = "/v1/chat/completions", -- optional: default value, override if different
309
+
-- -- models_endpoint = "/v1/models", -- optional: attaches to the end of the URL to form the endpoint to retrieve models
310
+
-- -- },
311
+
-- -- schema = {
312
+
-- -- model = {
313
+
-- -- default = "zeta", -- define llm model to be used
314
+
-- -- },
315
+
-- -- temperature = {
316
+
-- -- order = 2,
317
+
-- -- mapping = "parameters",
318
+
-- -- type = "number",
319
+
-- -- optional = true,
320
+
-- -- default = 0.4,
321
+
-- -- desc = "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.",
322
+
-- -- validate = function(n)
323
+
-- -- return n >= 0 and n <= 2, "Must be between 0 and 2"
324
+
-- -- end,
325
+
-- -- },
326
+
-- -- max_completion_tokens = {
327
+
-- -- order = 3,
328
+
-- -- mapping = "parameters",
329
+
-- -- type = "integer",
330
+
-- -- optional = true,
331
+
-- -- default = nil,
332
+
-- -- desc = "An upper bound for the number of tokens that can be generated for a completion.",
333
+
-- -- validate = function(n)
334
+
-- -- return n > 0, "Must be greater than 0"
335
+
-- -- end,
336
+
-- -- },
337
+
-- -- stop = {
338
+
-- -- order = 4,
339
+
-- -- mapping = "parameters",
340
+
-- -- type = "string",
341
+
-- -- optional = true,
342
+
-- -- default = nil,
343
+
-- -- desc = "Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.",
344
+
-- -- validate = function(s)
345
+
-- -- return s:len() > 0, "Cannot be an empty string"
346
+
-- -- end,
347
+
-- -- },
348
+
-- -- logit_bias = {
349
+
-- -- order = 5,
350
+
-- -- mapping = "parameters",
351
+
-- -- type = "map",
352
+
-- -- optional = true,
353
+
-- -- default = nil,
354
+
-- -- desc = "Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID) to an associated bias value from -100 to 100. Use https://platform.openai.com/tokenizer to find token IDs.",
355
+
-- -- subtype_key = {
356
+
-- -- type = "integer",
357
+
-- -- },
358
+
-- -- subtype = {
359
+
-- -- type = "integer",
360
+
-- -- validate = function(n)
361
+
-- -- return n >= -100 and n <= 100, "Must be between -100 and 100"
0 commit comments