-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathconfig.lua
More file actions
122 lines (116 loc) · 4.39 KB
/
config.lua
File metadata and controls
122 lines (116 loc) · 4.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
local M = {}
-- Default prompts used by all providers
local DEFAULT_PROMPT = [[{diff}]]
local DEFAULT_SYSTEM_PROMPT = require("ai_commit_msg.prompts").DEFAULT_SYSTEM_PROMPT
-- Default spinner frames
M.DEFAULT_SPINNER_FRAMES = { "⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏" }
---@class ProviderConfig
---@field model string Model to use for this provider
---@field temperature number|nil Temperature for the model (0.0 to 1.0)
---@field max_tokens number|nil Maximum tokens in the response
---@field prompt string Prompt to send to the AI
---@field system_prompt string System prompt that defines the AI's role and behavior
---@field reasoning_effort string|nil Reasoning effort for models that support it. Valid values:
--- - "minimal": Fastest and cheapest. Least amount of "thinking". Best for simple, high-volume tasks like formatting or basic Q&A.
--- - "low": Balance between speed and quality. Good for standard tasks like summarization.
--- - "medium": Default-quality balance for creative and professional work.
--- - "high": Most expensive and thorough; model performs deep, step-by-step reasoning (best for complex problems and debugging).
---@field pricing table|nil Pricing information for cost calculation. Supports:
--- - Flat table: { input_per_million, output_per_million } (backwards compatible)
--- - Map keyed by model: { ["model-name"] = { input_per_million, output_per_million }, default = { ... } }
---@class AiCommitMsgConfig
---@field enabled boolean Whether to enable the plugin
---@field provider string AI provider to use ("openai", "anthropic", or "gemini")
---@field providers table<string, ProviderConfig> Provider-specific configurations
---@field auto_push_prompt boolean Whether to prompt for push after commit
---@field pull_before_push { enabled: boolean, args: string[] } Whether and how to run `git pull` before pushing
---@field spinner string[]|boolean Array of spinner frames to animate, true for default frames, or false to disable spinner
---@field notifications boolean Whether to show notifications
---@field context_lines number Number of surrounding lines to include in git diff
---@field keymaps table<string, string|false> Keymaps for commit buffer
---@field cost_display string|false Cost display format ("compact", "verbose", or false to disable)
---@type AiCommitMsgConfig
M.default = {
enabled = true,
provider = "gemini",
auto_push_prompt = true,
pull_before_push = {
enabled = true,
args = { "--rebase", "--autostash" },
},
spinner = true,
notifications = true,
context_lines = 10,
keymaps = {
quit = "q", -- Set to false to disable
},
cost_display = "compact", -- "compact", "verbose", or false
providers = {
openai = {
model = "gpt-5-mini",
temperature = 0.3,
max_tokens = nil,
reasoning_effort = "minimal",
prompt = DEFAULT_PROMPT,
system_prompt = DEFAULT_SYSTEM_PROMPT,
-- Per-model pricing (you can add more models here)
pricing = {
["gpt-5-nano"] = {
input_per_million = 0.05,
output_per_million = 0.4,
},
["gpt-5-mini"] = {
input_per_million = 0.25,
output_per_million = 2.00,
},
["gpt-4.1-mini"] = {
input_per_million = 0.80,
output_per_million = 3.20,
},
["gpt-4.1-nano"] = {
input_per_million = 0.20,
output_per_million = 0.80,
},
},
},
anthropic = {
model = "claude-3-5-haiku-20241022",
temperature = 0.3,
max_tokens = nil,
prompt = DEFAULT_PROMPT,
system_prompt = DEFAULT_SYSTEM_PROMPT,
pricing = {
["claude-3-5-haiku-20241022"] = {
input_per_million = 0.80,
output_per_million = 4.00,
},
},
},
gemini = {
model = "gemini-2.5-flash-lite",
temperature = 0.3,
max_tokens = nil,
reasoning_effort = "none",
prompt = DEFAULT_PROMPT,
system_prompt = DEFAULT_SYSTEM_PROMPT,
pricing = {
["gemini-2.5-flash-lite"] = {
input_per_million = 0.10,
output_per_million = 0.40,
},
["gemini-2.5-flash"] = {
input_per_million = 0.30,
output_per_million = 2.50,
},
},
},
copilot = {
model = "gpt-4.1",
max_tokens = nil,
prompt = DEFAULT_PROMPT,
system_prompt = DEFAULT_SYSTEM_PROMPT,
pricing = {},
},
},
}
return M