Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 13 additions & 14 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
# .env.example file
# This file provides examples of environment variables needed for the project.
# For actual use, you will need to copy this file to create an .env file and enter the actual values.
# InterviewGraph environment example
# Copy this file to .env and replace placeholder values.

# Langsmith Project Tracking
# LangSmith is a tool for monitoring and debugging
LANGSMITH_PROJECT=act... # Project name to be used by LangSmith.
LANGSMITH_API_KEY=lsv2... # LangSmith API key (must be replaced with real key)
# Optional observability
LANGSMITH_PROJECT=interviewgraph
LANGSMITH_API_KEY=

# Depending on the configuration you choose, you will need the following environment variables.
# LLM generation runtime config
# Supported providers in this repo: openai, anthropic
INTERVIEWGRAPH_LLM_PROVIDER=openai
INTERVIEWGRAPH_LLM_MODEL=gpt-4o-mini
INTERVIEWGRAPH_LLM_TEMPERATURE=0.2

## LLM API Keys:
# OpenAI API Key - required to use the GPT model.
# You can get it from the OpenAI website (https://platform.openai.com/).
OPENAI_API_KEY=sk...

# Others...
# Provider API keys (set the one matching provider)
OPENAI_API_KEY=
ANTHROPIC_API_KEY=
51 changes: 43 additions & 8 deletions casts/resume_ingestor/modules/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,48 @@
- Embedding Models: https://docs.langchain.com/oss/python/integrations/text_embedding
"""

# from langchain.chat_models import init_chat_model
# from langchain_openai import ChatOpenAI
from __future__ import annotations

import os
from typing import Any

def get_sample_model():
# return init_chat_model(
# model="",
# model_provider="openai",
# )
pass

def get_generation_model() -> Any | None:
"""Returns a configured LangChain chat model, or None if unavailable.

Configuration (all optional):
- INTERVIEWGRAPH_LLM_PROVIDER (default: openai)
- INTERVIEWGRAPH_LLM_MODEL (default: gpt-4o-mini)
- INTERVIEWGRAPH_LLM_TEMPERATURE (default: 0.2)
"""

provider = os.getenv("INTERVIEWGRAPH_LLM_PROVIDER", "openai").strip().lower()
model = os.getenv("INTERVIEWGRAPH_LLM_MODEL", "gpt-4o-mini").strip()
temp_raw = os.getenv("INTERVIEWGRAPH_LLM_TEMPERATURE", "0.2").strip()

if not _has_provider_credentials(provider):
return None

try:
temperature = float(temp_raw)
except ValueError:
temperature = 0.2

try:
from langchain.chat_models import init_chat_model

return init_chat_model(
model=model,
model_provider=provider,
temperature=temperature,
)
except Exception:
return None


def _has_provider_credentials(provider: str) -> bool:
if provider == "openai":
return bool(os.getenv("OPENAI_API_KEY", "").strip())
if provider == "anthropic":
return bool(os.getenv("ANTHROPIC_API_KEY", "").strip())
return False
Loading
Loading