Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 36 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ The response includes both structured JSON and Markdown for interviewer-friendly

Pipeline:

`extract_text -> parse_sections -> extract_signals -> generate_questions -> rate_difficulty -> format_output`
`extract_text -> parse_sections -> extract_signals -> generate_questions -> rate_difficulty -> validate_questions -> format_output`

## Quick Start (Local)

Expand All @@ -45,6 +45,14 @@ uv sync --all-packages
uv run uvicorn app.main:app --reload
```

1. Configure LLM provider (for production-quality generation):

```bash
cp .env.example .env
```

Then set `INTERVIEWGRAPH_LLM_PROVIDER`, `INTERVIEWGRAPH_LLM_MODEL`, and the matching API key.

1. Open API docs:

- `http://127.0.0.1:8000/docs`
Expand All @@ -53,6 +61,7 @@ uv run uvicorn app.main:app --reload

- `POST /api/v1/interview-questions` for text input
- `POST /api/v1/interview-questions/upload` for PDF upload (multipart/form-data)
- Response includes `generation_mode` (`llm` or `fallback`)

Example JSON payload:

Expand All @@ -62,6 +71,32 @@ Example JSON payload:
}
```

## Production LLM Setup

- Runtime variables:
- `INTERVIEWGRAPH_LLM_PROVIDER`: `openai` or `anthropic`
- `INTERVIEWGRAPH_LLM_MODEL`: model id (for example `gpt-4o-mini`)
- `INTERVIEWGRAPH_LLM_TEMPERATURE`: float value
- Required key by provider:
- `openai` -> `OPENAI_API_KEY`
- `anthropic` -> `ANTHROPIC_API_KEY`

If provider config or API key is missing, generation falls back to deterministic question templates.

## Staging Quality Check

Run a smoke quality check using a sample resume:

```bash
PYTHONPATH=. uv run python scripts/staging_quality_check.py --resume-file docs/samples/staging_resume.txt
```

For local environments without API keys, use fallback mode:

```bash
PYTHONPATH=. uv run python scripts/staging_quality_check.py --allow-fallback
```

## Container Usage

Build image:
Expand Down
25 changes: 22 additions & 3 deletions app/main.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import annotations

from io import BytesIO
from pathlib import Path
from typing import Annotated

from fastapi import FastAPI, File, HTTPException, UploadFile
Expand All @@ -9,6 +10,22 @@

from casts.resume_ingestor.graph import resume_ingestor_graph


def _load_runtime_env() -> None:
"""Loads local env files for API runtime when available."""

try:
from dotenv import load_dotenv
except Exception:
return

project_root = Path(__file__).resolve().parents[1]
load_dotenv(project_root / ".env", override=False)
load_dotenv(project_root / ".env.local", override=False)


_load_runtime_env()

app = FastAPI(title="InterviewGraph API", version="0.1.0")


Expand All @@ -21,6 +38,7 @@ class GenerateResponse(BaseModel):
questions: list[dict[str, object]]
markdown: str
errors: list[dict[str, object]]
generation_mode: str = "fallback"


@app.get("/health")
Expand All @@ -43,14 +61,14 @@ def generate_interview_questions(payload: GenerateRequest) -> GenerateResponse:
questions=result.get("questions", []),
markdown=result.get("markdown", ""),
errors=result.get("errors", []),
generation_mode=str(result.get("generation_mode", "fallback")),
)


@app.post("/api/v1/interview-questions/upload", response_model=GenerateResponse)
async def generate_from_pdf(file: Annotated[UploadFile, File(...)]) -> GenerateResponse:
if file.content_type != "application/pdf" and not file.filename.lower().endswith(
".pdf"
):
filename = file.filename or ""
if file.content_type != "application/pdf" and not filename.lower().endswith(".pdf"):
raise HTTPException(status_code=400, detail="Only PDF uploads are supported.")

content = await file.read()
Expand All @@ -77,4 +95,5 @@ async def generate_from_pdf(file: Annotated[UploadFile, File(...)]) -> GenerateR
questions=result.get("questions", []),
markdown=result.get("markdown", ""),
errors=result.get("errors", []),
generation_mode=str(result.get("generation_mode", "fallback")),
)
20 changes: 16 additions & 4 deletions casts/resume_ingestor/modules/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def execute(self, state):
"questions": [],
"markdown": "",
"errors": [],
"generation_mode": "fallback",
}

if isinstance(resume_path, str) and resume_path.strip():
Expand All @@ -73,6 +74,7 @@ def execute(self, state):
retryable=False,
)
],
"generation_mode": "fallback",
}

try:
Expand All @@ -97,6 +99,7 @@ def execute(self, state):
retryable=True,
)
],
"generation_mode": "fallback",
}

if not loaded_text:
Expand All @@ -119,6 +122,7 @@ def execute(self, state):
retryable=True,
)
],
"generation_mode": "fallback",
}

return {
Expand All @@ -133,6 +137,7 @@ def execute(self, state):
"questions": [],
"markdown": "",
"errors": [],
"generation_mode": "fallback",
}

return {
Expand All @@ -154,6 +159,7 @@ def execute(self, state):
retryable=False,
)
],
"generation_mode": "fallback",
}


Expand Down Expand Up @@ -369,7 +375,7 @@ class GenerateQuestionsNode(BaseNode):
def execute(self, state):
existing_errors = list(state.get("errors", []))
if existing_errors:
return {"questions": []}
return {"questions": [], "generation_mode": "fallback"}

sections = state.get("sections")
signals = state.get("signals")
Expand Down Expand Up @@ -403,14 +409,14 @@ def execute(self, state):
},
)
if llm_questions is not None:
return {"questions": llm_questions}
return {"questions": llm_questions, "generation_mode": "llm"}

prompts = self._build_prompt_seeds(skills, projects, keywords, evidence)
questions = [
self._make_question(index=idx + 1, seed=seed)
for idx, seed in enumerate(prompts[:15])
]
return {"questions": questions}
return {"questions": questions, "generation_mode": "fallback"}

def _generate_questions_with_llm(
self,
Expand Down Expand Up @@ -902,6 +908,7 @@ class FormatOutputNode(BaseNode):
def execute(self, state):
questions = state.get("questions")
errors = state.get("errors")
generation_mode = state.get("generation_mode", "fallback")

if not isinstance(errors, list):
errors = []
Expand All @@ -910,6 +917,7 @@ def execute(self, state):
return {
"questions": [],
"markdown": "",
"generation_mode": "fallback",
"errors": errors
+ [
_error_item(
Expand All @@ -922,7 +930,11 @@ def execute(self, state):
}

markdown = self._render_markdown(questions)
return {"questions": questions, "markdown": markdown}
return {
"questions": questions,
"markdown": markdown,
"generation_mode": str(generation_mode),
}

def _render_markdown(self, questions: list[object]) -> str:
lines: list[str] = ["# Interview Questions", ""]
Expand Down
2 changes: 2 additions & 0 deletions casts/resume_ingestor/modules/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ class OutputState(TypedDict):
questions: list[InterviewQuestion]
markdown: str
errors: list[ErrorItem]
generation_mode: str


class State(MessagesState):
Expand All @@ -73,3 +74,4 @@ class State(MessagesState):
questions: list[InterviewQuestion]
markdown: str
errors: list[ErrorItem]
generation_mode: str
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ dependencies = [
"langchain-openai>=0.3.0",
"langgraph>=1.0.0",
"pypdf>=6.0.0",
"python-dotenv>=1.1.1",
"python-multipart>=0.0.20",
"uvicorn>=0.35.0",
]
Expand Down
7 changes: 7 additions & 0 deletions scripts/staging_quality_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def _has_provider_key() -> bool:
def _quality_report(result: dict[str, object]) -> dict[str, object]:
questions = result.get("questions", [])
errors = result.get("errors", [])
generation_mode = str(result.get("generation_mode", "fallback"))

if not isinstance(questions, list):
questions = []
Expand All @@ -65,6 +66,7 @@ def _quality_report(result: dict[str, object]) -> dict[str, object]:

unique_categories = sorted(set(categories))
return {
"generation_mode": generation_mode,
"question_count": len(questions),
"unique_categories": unique_categories,
"category_count": len(unique_categories),
Expand Down Expand Up @@ -112,12 +114,17 @@ def main() -> int:
question_count = _as_int(report.get("question_count"), 0)
category_count = _as_int(report.get("category_count"), 0)

generation_mode = str(report.get("generation_mode", "fallback"))

if question_count != 15:
return 1

min_categories = 3 if llm_ready else 2
if category_count < min_categories:
return 1

if llm_ready and generation_mode != "llm":
return 1
return 0


Expand Down
1 change: 1 addition & 0 deletions tests/api_tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def test_generate_interview_questions_from_text() -> None:
assert len(payload["questions"]) == 15
assert "# Interview Questions" in payload["markdown"]
assert payload["errors"] == []
assert payload["generation_mode"] in {"llm", "fallback"}


def test_generate_interview_questions_requires_input() -> None:
Expand Down
2 changes: 2 additions & 0 deletions tests/cast_tests/resume_ingestor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def test_graph_extracts_text_from_resume_text() -> None:
assert all(1 <= q["difficulty"] <= 5 for q in result["questions"])
assert "# Interview Questions" in result["markdown"]
assert result["errors"] == []
assert result["generation_mode"] in {"llm", "fallback"}


def test_graph_returns_error_when_input_missing() -> None:
Expand All @@ -28,6 +29,7 @@ def test_graph_returns_error_when_input_missing() -> None:
assert result["markdown"] == "# Interview Questions"
assert len(result["errors"]) == 1
assert result["errors"][0]["code"] == "MISSING_INPUT"
assert result["generation_mode"] == "fallback"


def test_graph_pipeline_completes_with_sectioned_resume_text() -> None:
Expand Down
6 changes: 5 additions & 1 deletion tests/node_tests/test_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,11 @@ def test_extract_signals_node_returns_error_without_sections() -> None:
assert result["errors"][0]["code"] == "MISSING_SECTIONS"


def test_generate_questions_node_creates_15_structured_items() -> None:
def test_generate_questions_node_creates_15_structured_items(monkeypatch) -> None:
monkeypatch.setattr(
"casts.resume_ingestor.modules.nodes.get_generation_model",
lambda: None,
)
node = GenerateQuestionsNode()
result = node(
{
Expand Down
2 changes: 2 additions & 0 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading