Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions backend/app/conversations/feedback/routes/routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from app.context_vars import session_id_ctx_var, user_id_ctx_var, client_id_ctx_var
from app.conversations.feedback.services.errors import InvalidOptionError, InvalidQuestionError, QuestionsFileError
from app.conversations.feedback.services.service import IUserFeedbackService, UserFeedbackService, NewFeedbackSpec
from app.conversations.feedback.services.types import QuestionsConfig
from app.errors.constants import NO_PERMISSION_FOR_SESSION
from app.errors.errors import UnauthorizedSessionAccessError
from app.metrics.services.get_metrics_service import get_metrics_service
Expand Down Expand Up @@ -72,6 +73,32 @@ def add_user_feedback_routes(users_router: APIRouter, auth: Authentication):
"""
router = APIRouter(prefix="/feedback", tags=["users-feedback"])

@router.get("/questions",
status_code=HTTPStatus.OK,
response_model=QuestionsConfig,
responses={
HTTPStatus.INTERNAL_SERVER_ERROR: {"model": HTTPErrorResponse}
},
name="get questions configuration",
description="Get the questions configuration for the feedback form"
)
async def _get_questions_config(
user_feedback_service: IUserFeedbackService = Depends(_get_user_feedback_service)
) -> QuestionsConfig:
"""
Get the questions configuration for the feedback form.

:param user_feedback_service: Service for managing user feedback
:return: The questions configuration
:raises HTTPException: If there's an error loading the questions
"""
try:
questions_data = await user_feedback_service.get_questions_config()
return questions_data
except QuestionsFileError as e:
logger.exception(e)
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail="Failed to load questions configuration")

@router.patch("",
status_code=HTTPStatus.OK,
response_model=FeedbackResponse,
Expand Down
59 changes: 57 additions & 2 deletions backend/app/conversations/feedback/routes/test_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
from fastapi import FastAPI, APIRouter
from fastapi.testclient import TestClient

from app.conversations.feedback.services.errors import InvalidQuestionError, InvalidOptionError
from app.conversations.feedback.services.errors import InvalidQuestionError, InvalidOptionError, QuestionsFileError
from app.conversations.feedback.services.service import IUserFeedbackService
from app.conversations.feedback.services.types import NewFeedbackSpec, NewFeedbackVersionSpec, NewFeedbackItemSpec, \
Feedback, \
Version, FeedbackItem, Answer
Version, FeedbackItem, Answer, QuestionsConfig
from app.users.auth import UserInfo
from app.users.get_user_preferences_repository import get_user_preferences_repository
from app.users.repositories import IUserPreferenceRepository
Expand Down Expand Up @@ -84,6 +84,9 @@ async def upsert_user_feedback(self, user_id: str, session_id: int, feedback: Fe
async def get_answered_questions(self, user_id: str) -> list[int]:
raise NotImplementedError()

async def get_questions_config(self) -> QuestionsConfig:
raise NotImplementedError()

mocked_feedback_service = MockedFeedbackService()

# Mock the user preferences repository
Expand Down Expand Up @@ -351,3 +354,55 @@ async def test_upsert_feedback_payload_too_large(self, authenticated_client_with
assert "payload" in response.json()["detail"].lower()
# AND the service's upsert_user_feedback method was not called
_upsert_spy.assert_not_called()

@pytest.mark.asyncio
async def test_get_questions_config_success(self, authenticated_client_with_mocks: TestClientWithMocks):
client, mocked_service, _, _ = authenticated_client_with_mocks
# GIVEN a session ID for which the questions configuration is requested
given_session_id = 123

# GIVEN a valid questions configuration
given_config: QuestionsConfig = {
"test_question": {
"question_text": "Test question",
"description": "Test description",
"comment_placeholder": "Test placeholder",
"type": "yes_no",
"show_comments_on": "yes"
}
}
mocked_service.get_questions_config = AsyncMock(return_value=given_config)

# WHEN getting the questions configuration
response = client.get(f"/conversations/{given_session_id}/feedback/questions",)

# THEN the response should be OK
assert response.status_code == HTTPStatus.OK

# AND the response should contain the questions configuration
actual_config = response.json()
assert actual_config == given_config

# AND the service's get_questions_config method was called
mocked_service.get_questions_config.assert_called_once()

@pytest.mark.asyncio
async def test_get_questions_config_error(self, authenticated_client_with_mocks: TestClientWithMocks):
client, mocked_service, _, _ = authenticated_client_with_mocks
# GIVEN a session ID for which the questions configuration is requested
given_session_id = 123

# GIVEN the service raises a QuestionsFileError
mocked_service.get_questions_config = AsyncMock(side_effect=QuestionsFileError("Test error"))

# WHEN getting the questions configuration
response = client.get(f"/conversations/{given_session_id}/feedback/questions",)

# THEN the response should be INTERNAL_SERVER_ERROR
assert response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR

# AND the response should contain the error message
assert response.json()["detail"] == "Failed to load questions configuration"

# AND the service's get_questions_config method was called
mocked_service.get_questions_config.assert_called_once()
46 changes: 37 additions & 9 deletions backend/app/conversations/feedback/services/questions-en.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,29 @@
"interaction_ease": {
"question_text": "How easy or difficult was it to interact with Compass and understand its responses?",
"description": "This question is used to measure the Customer Effort Score (CES). The response is on a scale from 1 (Very difficult) to 5 (Very easy).",
"comment_placeholder": "Please explain your rating and share any suggestions for improving the ease of interaction with Compass."
"comment_placeholder": "Please explain your rating and share any suggestions for improving the ease of interaction with Compass.",
"low_rating_label": "Very difficult",
"high_rating_label": "Very easy",
"type": "rating",
"max_rating": 5,
"display_rating": true
},
"clarity_of_skills": {
"question_text": "Did Compass help you gain a clearer understanding of your skills? If not, why?",
"description": "This question aims to measure Perceived Usefulness (PU). The user can answer TRUE (indicating Compass helped) or FALSE (indicating Compass did not help), with an optional text field for additional comments.",
"comment_placeholder": "Please share why Compass did not help you gain a clearer understanding of your skills."
"comment_placeholder": "Please share why Compass did not help you gain a clearer understanding of your skills.",
"type": "yes_no",
"show_comments_on": "no"
},
"satisfaction_with_compass": {
"question_text": "How satisfied are you with Compass?",
"description": "This question is used to measure the Customer Satisfaction Score (CSAT). The response is on a scale from 1 (Very dissatisfied) to 5 (Very satisfied).",
"comment_placeholder": null
"comment_placeholder": null,
"type": "rating",
"max_rating": 5,
"display_rating": true,
"low_rating_label": "Very dissatisfied",
"high_rating_label": "Very satisfied"
},
"work_experience_accuracy": {
"question_text": "Were there any aspects of your work experience information identified by Compass that were inaccurate?",
Expand All @@ -25,31 +37,47 @@
"other": "Other"
},
"description": "This question to identify specific inaccuracies in the work experience information. The response is an array of selected option keys, with optional additional comments.",
"comment_placeholder": "Please provide more details about the inaccuracies in your work experience information identified by Compass."
"comment_placeholder": "Please provide more details about the inaccuracies in your work experience information identified by Compass.",
"type": "checkbox",
"low_rating_label": "Inaccurate",
"high_rating_label": "Very accurate"
},
"incorrect_skills": {
"question_text": "Are there any skills that Compass incorrectly identified for you?",
"description": "This question aims to indentify incorrectly identified skills The user can answer TRUE (indicating incorrect skills) or FALSE (indicating no incorrect skills). In case of TRUE, the user can provide additional comments.",
"comment_placeholder": "Please list any skills that Compass incorrectly identified for you."
"comment_placeholder": "Please list any skills that Compass incorrectly identified for you.",
"type": "yes_no",
"show_comments_on": "yes"
},
"missing_skills": {
"question_text": "Are there any skills you have that Compass missed and did not identify?",
"description": "This question aims to identify missing skills. Responses should be TRUE (indicating missing skills) or FALSE (indicating no missing skills). In case of TRUE, the user can provide additional comments.",
"comment_placeholder": "Please list any skills you have that Compass did not identify."
"comment_placeholder": "Please list any skills you have that Compass did not identify.",
"type": "yes_no",
"show_comments_on": "yes"
},
"perceived_bias": {
"question_text": "Did you feel that Compass treated you unfairly by making assumptions about your background, language, or other personal characteristics? If so, please describe your experience.",
"description": "This question aims to identify perceived bias. Responses should be TRUE (indicating bias) or FALSE (indicating no bias), with an optional text field for additional comments.",
"comment_placeholder": "Please share more details about your experience. Include specific examples of when you felt Compass treated you unfairly or made assumptions about your background, language, or other personal characteristics."
"comment_placeholder": "Please share more details about your experience. Include specific examples of when you felt Compass treated you unfairly or made assumptions about your background, language, or other personal characteristics.",
"type": "yes_no",
"show_comments_on": "yes"
},
"recommendation": {
"question_text": "How likely are you to recommend Compass to other job seekers?",
"description": "This question is used to measure the Net Promoter Score (NPS). The response is on a scale from 1 (Not at all likely) to 5 (Extremely likely).",
"comment_placeholder": null
"comment_placeholder": null,
"type": "rating",
"max_rating": 5,
"display_rating": true,
"low_rating_label": "Unlikely",
"high_rating_label": "Likely"
},
"additional_feedback": {
"question_text": "Please share any additional feedback or suggestions you have for improving Compass.",
"description": "Used to collect any additional feedback or suggestions for improving Compass from the user. The response is in the form of a text.",
"comment_placeholder": "We'd love to hear your thoughts! Please share any additional feedback or suggestions you have for improving Compass."
"comment_placeholder": "We'd love to hear your thoughts! Please share any additional feedback or suggestions you have for improving Compass.",
"type": "rating",
"display_rating": false
}
}
48 changes: 45 additions & 3 deletions backend/app/conversations/feedback/services/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,15 @@
import json
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, cast
from typing import Any, Dict
from pathlib import Path

from app.conversations.feedback.repository import IUserFeedbackRepository
from app.app_config import get_application_config
from app.metrics.services.service import IMetricsService
from app.metrics.types import FeedbackProvidedEvent, FeedbackRatingValueEvent, FeedbackTypeLiteral
from .types import Feedback, NewFeedbackSpec, FeedbackItem, Version, AnsweredQuestions
from .types import Feedback, NewFeedbackSpec, FeedbackItem, Version, AnsweredQuestions, QuestionsConfig, Question, YesNoQuestion, RatingQuestion, \
CheckboxQuestion
from .errors import (
InvalidQuestionError,
QuestionsFileError
Expand Down Expand Up @@ -91,6 +92,11 @@ def calculate_ces_value(answer: int) -> int:


async def load_questions() -> Dict[str, Any]:
"""
Load questions from the JSON file.
:return: Dictionary containing questions data
:raises QuestionsFileError: If there's an error loading the questions file
"""
global questions_cache
if not questions_cache:
questions_file = Path(__file__).parent / "questions-en.json"
Expand Down Expand Up @@ -134,6 +140,16 @@ async def get_answered_questions(self, user_id: str) -> AnsweredQuestions:
"""
raise NotImplementedError()

@abstractmethod
async def get_questions_config(self) -> QuestionsConfig:
"""
Get the questions configuration for the feedback form.

:return: The questions configuration
:raises QuestionsFileError: If there's an error loading the questions file
"""
raise NotImplementedError()


class UserFeedbackService(IUserFeedbackService):
"""
Expand All @@ -145,6 +161,32 @@ def __init__(self, user_feedback_repository: IUserFeedbackRepository, metrics_se
self._user_feedback_repository: IUserFeedbackRepository = user_feedback_repository
self._metrics_service: IMetricsService = metrics_service

async def get_questions_config(self) -> QuestionsConfig:
"""
Get the questions configuration for the feedback form.

:return: The questions configuration
:raises QuestionsFileError: If there's an error loading the questions file
"""
questions_data = await load_questions()
if not questions_data:
raise QuestionsFileError("No questions data available")

# Convert the raw questions data to the proper Pydantic models
config: QuestionsConfig = {}
for question_id, question_data in questions_data.items():
question_type = question_data.get("type")
if question_type == "yes_no":
config[question_id] = YesNoQuestion(**question_data)
elif question_type == "rating":
config[question_id] = RatingQuestion(**question_data)
elif question_type == "checkbox":
config[question_id] = CheckboxQuestion(**question_data)
else:
raise QuestionsFileError(f"Invalid question type '{question_type}' for question '{question_id}'")

return config

async def upsert_user_feedback(self, user_id: str, session_id: int, feedback_spec: NewFeedbackSpec) -> Feedback:
questions_data = await load_questions()
if not questions_data:
Expand Down Expand Up @@ -225,7 +267,7 @@ async def _capture_metrics(self, user_id: str, session_id: int, feedback_items:
logger.error(f"Rating value {item.answer.rating_numeric} for question {item.question_id} is out of range (1-5)")
continue
feedback_type: FeedbackTypeLiteral
match item.question_id:
match item.question_id: # TODO: check unreachable
case "recommendation":
feedback_type = "NPS"
value = calculate_nps_value(item.answer.rating_numeric)
Expand Down
Loading