Skip to content

Commit 721eb75

Browse files
committed
feat(readme): add GitHub Copilot provider docs and config defaults
- document COPILOT_TOKEN env var and add Copilot instructions and example - mention copilot in provider list and README features - add copilot provider default config and reasoning_effort docs in config.lua - send reasoning_effort in copilot provider API payload and clarify openai handling
1 parent 3d2e906 commit 721eb75

4 files changed

Lines changed: 36 additions & 4 deletions

File tree

README.md

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ perfect commit message.
1515

1616
## Features
1717

18-
- 🤖 Automatically generates commit messages using Gemini, OpenAI, or Anthropic APIs
18+
- 🤖 Automatically generates commit messages using Gemini, OpenAI, Anthropic, or GitHub Copilot APIs
1919
when you run `git commit -v`
2020
- 🎯 Works from terminal or within Neovim (using vim-fugitive)
2121
- 🤝 Non-intrusive - if you start typing, AI suggestions are added as comments instead
@@ -93,6 +93,12 @@ export OPENAI_API_KEY="your-api-key-here"
9393

9494
**For Anthropic:**
9595

96+
```bash
97+
export COPILOT_TOKEN="your-github-copilot-token-here"
98+
```
99+
100+
**For Anthropic:**
101+
96102
```bash
97103
export ANTHROPIC_API_KEY="your-api-key-here"
98104
```
@@ -110,7 +116,7 @@ require("ai_commit_msg").setup({
110116
-- Enable/disable the plugin
111117
enabled = true,
112118

113-
-- AI provider to use ("gemini", "openai", or "anthropic")
119+
-- AI provider to use ("gemini", "openai", "anthropic", or "copilot")
114120
provider = "gemini",
115121

116122
-- Whether to prompt for push after commit
@@ -194,6 +200,21 @@ require("ai_commit_msg").setup({
194200

195201
### Switch to Anthropic Claude
196202

203+
### Switch to GitHub Copilot
204+
205+
```lua
206+
require("ai_commit_msg").setup({
207+
provider = "copilot",
208+
providers = {
209+
copilot = {
210+
model = "gpt-5-mini",
211+
},
212+
},
213+
})
214+
```
215+
216+
### Switch to Anthropic Claude
217+
197218
```lua
198219
require("ai_commit_msg").setup({
199220
provider = "anthropic",
@@ -320,6 +341,7 @@ git config --global core.editor nvim
320341
- Gemini: Set `GEMINI_API_KEY` environment variable (default, best value)
321342
- OpenAI: Set `OPENAI_API_KEY` environment variable
322343
- Anthropic: Set `ANTHROPIC_API_KEY` environment variable
344+
- GitHub Copilot: Set `COPILOT_TOKEN` environment variable
323345
- Git
324346
- curl (for making API requests)
325347

lua/ai_commit_msg/config.lua

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,11 @@ local DEFAULT_SYSTEM_PROMPT = require("ai_commit_msg.prompts").DEFAULT_SYSTEM_PR
1010
---@field max_tokens number|nil Maximum tokens in the response
1111
---@field prompt string Prompt to send to the AI
1212
---@field system_prompt string System prompt that defines the AI's role and behavior
13-
---@field reasoning_effort string|nil Reasoning effort for models that support it ("minimal", "medium", "high")
13+
---@field reasoning_effort string|nil Reasoning effort for models that support it. Valid values:
14+
--- - "minimal": Fastest and cheapest. Least amount of "thinking". Best for simple, high-volume tasks like formatting or basic Q&A.
15+
--- - "low": Balance between speed and quality. Good for standard tasks like summarization.
16+
--- - "medium": Default-quality balance for creative and professional work.
17+
--- - "high": Most expensive and thorough; model performs deep, step-by-step reasoning (best for complex problems and debugging).
1418
---@field pricing table|nil Pricing information for cost calculation. Supports:
1519
--- - Flat table: { input_per_million, output_per_million } (backwards compatible)
1620
--- - Map keyed by model: { ["model-name"] = { input_per_million, output_per_million }, default = { ... } }
@@ -107,6 +111,7 @@ M.default = {
107111
max_tokens = 10000,
108112
prompt = DEFAULT_PROMPT,
109113
system_prompt = DEFAULT_SYSTEM_PROMPT,
114+
reasoning_effort = "minimal",
110115
pricing = {},
111116
},
112117
},

lua/ai_commit_msg/providers/copilot.lua

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,11 @@ function M.call_api(config, diff, callback)
4444
payload_data.temperature = config.temperature
4545
end
4646

47+
-- Add reasoning effort for GitHub Copilot models if configured
48+
if config.reasoning_effort then
49+
payload_data.reasoning_effort = config.reasoning_effort
50+
end
51+
4752
local payload = vim.json.encode(payload_data)
4853

4954
local curl_args = {

lua/ai_commit_msg/providers/openai.lua

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ function M.call_api(config, diff, callback)
5353
max_completion_tokens = config.max_tokens,
5454
}
5555

56-
-- Only add reasoning_effort for supported models
56+
-- Only add reasoning (effort) for supported models
5757
if config.reasoning_effort and model_supports_reasoning_effort(config.model) then
5858
payload_data.reasoning_effort = config.reasoning_effort
5959
end

0 commit comments

Comments
 (0)