diff --git a/README.md b/README.md index 30bddc6..6460655 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,7 @@ The configuration is described in this [doc](https://icystudio.github.io/TeleGPT ```json { "openaiAPIKey": "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", + "openaiGptModel": "gpt-3.5-turbo", "botToken": "8888888888:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "adminUsernames": ["cyandev"], "conversationLimit": 30, diff --git a/config.example.json b/config.example.json index 4662d05..8e207f8 100644 --- a/config.example.json +++ b/config.example.json @@ -1,5 +1,6 @@ { "openaiAPIKey": "sk-************************************************", + "openaiGptModel": "gpt-3.5-turbo", "botToken": "8888888888:***********************************", "conversationLimit": 16, "databasePath": "/telegpt/data/telegpt.sqlite", @@ -10,4 +11,4 @@ "i18n": { "resetPrompt": "Your conversation has been reset." } -} \ No newline at end of file +} diff --git a/src/config.rs b/src/config.rs index 4d663cf..badae5c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -61,6 +61,12 @@ pub struct Config { #[serde(rename = "botToken")] pub telegram_bot_token: String, + /// The openai model your want to use in chat. + /// Value is default to "gpt-3.5-turbo". + /// JSON key: `openaiGptModel` + #[serde(default = "default_openai_gpt_model", rename = "openaiGptModel")] + pub openai_gpt_model: String, + /// A timeout in seconds for waiting for the OpenAI server response. /// JSON key: `openaiAPITimeout` #[serde(default = "default_openai_api_timeout", rename = "openaiAPITimeout")] @@ -155,6 +161,7 @@ define_defaults! { stream_throttle_interval: u64 = 500, conversation_limit: u64 = 20, renders_markdown: bool = false, + openai_gpt_model: String = "gpt-3.5-turbo".to_owned(), } define_defaults!(I18nStrings { diff --git a/src/modules/openai.rs b/src/modules/openai.rs index 43c1b0c..1618ca6 100644 --- a/src/modules/openai.rs +++ b/src/modules/openai.rs @@ -30,7 +30,7 @@ impl OpenAIClient { ) -> Result { let client = &self.client; let req = CreateChatCompletionRequestArgs::default() - .model("gpt-3.5-turbo") + .model(self.config.openai_gpt_model.clone()) .temperature(0.6) .max_tokens(self.config.max_tokens.unwrap_or(4096)) .messages(msgs)