Skip to content
Merged
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "ghcc"
version = "0.1.0"
version = "0.2.0"
edition = "2024"
rust-version = "1.85"
description = "Fast conventional commit messages using GitHub Copilot"
Expand Down
13 changes: 8 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
# ghcc

ghcc is a CLI tool for generating conventional commit messages with GitHub Copilot.
Fast CLI tool for generating conventional commit messages with GitHub Copilot.

## Features

- **Conventional commits** - Generates conventional commit style messages ([spec](https://www.conventionalcommits.org/))
- **Fast** - Single command, no prompts required
- **Commit styles** - Single-line, detailed with bullets, or let the AI decide
- **Commit styles** - Single-line, detailed body to provide context, or let the AI decide
- **Git hook integration** - Pre-fills your editor so you can check and edit before committing
- **Uses GitHub Copilot** - Just log in with your existing account. No API keys needed.
- **Model selection** - Choose from your enabled Copilot models (default: `gpt-5-mini`)
- **Model selection** - Choose from your enabled Copilot models (default: `gpt-4.1`)

> [!TIP]
> `gpt-4.1` (default) seems to perform best of the free models. For a bump in quality, any of the non-Codex GPT-5 models (`gpt-5`, `gpt-5.1`, `gpt-5.2`) perform well but will consume premium requests.

## Quickstart

Expand Down Expand Up @@ -56,10 +59,10 @@ ghcc is a CLI tool for generating conventional commit messages with GitHub Copil

```sh
ghcc # Single-line commit message (default)
ghcc -d # Detailed: subject + bullet-point body
ghcc -d # Detailed: subject + paragraph body to provide context
ghcc -a # Auto: AI decides format based on complexity
ghcc status # Check authentication status
ghcc models # List and select models (default: gpt-5-mini)
ghcc models # List and select models (default: gpt-4.1)
ghcc hook install # Install git hook for current repo
ghcc hook uninstall
```
Expand Down
21 changes: 17 additions & 4 deletions src/auth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ pub struct CopilotAuth {
pub machine_id: Option<String>,
#[serde(default)]
pub max_prompt_tokens: Option<u64>,
#[serde(default)]
pub supported_endpoints: Option<Vec<String>>,
}

#[derive(Debug, Deserialize)]
Expand Down Expand Up @@ -73,7 +75,7 @@ pub enum AuthError {
impl std::fmt::Display for AuthError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AuthError::NotFound => write!(f, "Credentials not found"),
AuthError::NotFound => write!(f, "Credentials not found. Try running 'ghcc login'"),
AuthError::Network(msg) => write!(f, "Network error: {}", msg),
AuthError::Parse(msg) => write!(f, "Parse error: {}", msg),
AuthError::FileSystem(msg) => write!(f, "File system error: {}", msg),
Expand Down Expand Up @@ -272,7 +274,8 @@ pub fn login() -> Result<CopilotAuth, AuthError> {
api_endpoint: copilot_resp.endpoints.get("api").cloned(),
model: None,
machine_id: Some(Uuid::new_v4().to_string()),
max_prompt_tokens: None, // Set when user selects a model
max_prompt_tokens: None, // Set when user selects a model
supported_endpoints: None, // Set when user selects a model
};

Ok(auth)
Expand All @@ -284,6 +287,7 @@ pub fn refresh_token(
current_model: Option<String>,
current_machine_id: Option<String>,
current_max_prompt_tokens: Option<u64>,
current_supported_endpoints: Option<Vec<String>>,
) -> Result<CopilotAuth, AuthError> {
let agent = create_agent();

Expand All @@ -308,6 +312,7 @@ pub fn refresh_token(
model: current_model,
machine_id: current_machine_id,
max_prompt_tokens: current_max_prompt_tokens,
supported_endpoints: current_supported_endpoints,
})
}

Expand All @@ -316,11 +321,18 @@ pub fn get_valid_auth() -> Result<CopilotAuth, AuthError> {
let mut auth = read_auth()?;

if is_expired(&auth) || auth.api_endpoint.is_none() {
// preserve current model, machine_id, and max_prompt_tokens during refresh
// preserve current model, machine_id, max_prompt_tokens, and supported_endpoints during refresh
let model = auth.model.clone();
let machine_id = auth.machine_id.clone();
let max_prompt_tokens = auth.max_prompt_tokens;
auth = refresh_token(&auth.refresh, model, machine_id, max_prompt_tokens)?;
let supported_endpoints = auth.supported_endpoints.clone();
auth = refresh_token(
&auth.refresh,
model,
machine_id,
max_prompt_tokens,
supported_endpoints,
)?;
save_auth(&auth)?;
}

Expand Down Expand Up @@ -367,6 +379,7 @@ mod tests {
model: None,
machine_id: None,
max_prompt_tokens: None,
supported_endpoints: None,
}
}

Expand Down
Loading