-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodels.py
More file actions
97 lines (80 loc) · 2.8 KB
/
models.py
File metadata and controls
97 lines (80 loc) · 2.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
"""
Data models for the Agentic Interview System.
This module contains pure data classes with no business logic.
All models use dataclasses for clean, typed data structures.
"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class Question:
"""
Represents a single interview question.
Attributes:
id: Unique identifier for the question
text: The actual question text
competency: Area being tested (e.g., "Python", "System Design")
difficulty: Difficulty level (e.g., "Easy", "Medium", "Hard")
keypoints: List of ground-truth concepts that should appear in a good answer
"""
id: int
text: str
competency: str
difficulty: str
keypoints: list[str]
@dataclass
class KeypointCoverage:
"""
Tracks whether a specific keypoint was covered in the candidate's answer.
Attributes:
keypoint: The keypoint text being evaluated
covered: Whether this keypoint was found in the answer
evidence: Optional excerpt from answer showing where keypoint was covered
"""
keypoint: str
covered: bool
evidence: Optional[str] = None
@dataclass
class EvaluationResult:
"""
Result of evaluating a candidate's answer to a question.
Attributes:
question_id: ID of the question that was answered
raw_answer: The candidate's submitted answer text
score_0_100: Numeric score from 0-100
mastery_label: Qualitative assessment ("strong", "mixed", "weak")
keypoints_coverage: Detailed coverage for each keypoint
short_feedback: Brief feedback explaining the score
suggested_followup: Optional suggestion for follow-up questions
error: Optional error message if evaluation failed
error_details: Optional structured error metadata (e.g., code, fields)
"""
question_id: int
raw_answer: str
score_0_100: int
mastery_label: str
keypoints_coverage: list[KeypointCoverage]
short_feedback: str
suggested_followup: Optional[str] = None
error: Optional[str] = None
error_details: Optional[dict] = None
@dataclass
class InterviewState:
"""
Tracks the state of an ongoing interview.
Attributes:
current_index: Index of the current question (0-based)
finished: Whether the interview is complete
evaluations: Map of question_id to EvaluationResult for answered questions
"""
current_index: int = 0
finished: bool = False
evaluations: dict[int, EvaluationResult] = field(default_factory=dict)
# ==============================================================================
# Public API
# ==============================================================================
__all__ = [
"Question",
"KeypointCoverage",
"EvaluationResult",
"InterviewState",
]