Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"image": "mcr.microsoft.com/devcontainers/python:3",
"features": {
"ghcr.io/devcontainers/features/python:1": {},
"ghcr.io/gvatsal60/dev-container-features/pre-commit": {},
"ghcr.io/gvatsal60/dev-container-features/pre-commit:1": {}
},
"postCreateCommand": "bash ./scripts/devcontainer_start.sh",
"customizations": {
Expand Down
28 changes: 28 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1 +1,29 @@
# The AI model to use, e.g., "openai:gpt-4.1"
# check for available models at https://ai.pydantic.dev/models/
AI_MODEL=your-model-name-here

# To use openai's API, you need to set your API key in the environment variable OPENAI_API_KEY.
OPENAI_API_KEY=your-api-key-here

# To use Anthropy's API, you need to set your API key in the environment variable ANTHROPY_API_KEY.
ANTHROPY_API_KEY=your-anthropy-api-key-here

# To use Gemini's API, you need to set your API key in the environment variable GEMINI_API_KEY.
GEMINI_API_KEY=your-gemini-api-key-here

# To use Google's API, you need to set your API key in the environment variable GOOGLE_API_KEY.
GOOGLE_API_KEY=your-google-api-key-here

# Bedrock API key
AWS_ACCESS_KEY_ID=your-aws-access-key-id-here
AWS_SECRET_ACCESS_KEY=your-aws-secret-access-key-here
AWS_DEFAULT_REGION=your-aws-region-here

# To use Cohere's API, you need to set your API key in the environment variable CO_API_KEY.
CO_API_KEY=your-cohere-api-key-here

# To use Groq's API, you need to set your API key in the environment variable GROQ_API_KEY.
GROQ_API_KEY=your-groq-api-key-here

# To use Mistral's API, you need to set your API key in the environment variable MISTRAL_API_KEY.
MISTRAL_API_KEY=your-mistral-api-key-here
3 changes: 3 additions & 0 deletions .github/workflows/test-script.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ on:
branches:
- main

env:
OPENAI_API_KEY: "my-test-key"

jobs:
build:

Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ boto3
google-cloud
google-api-core
cachetools
pydantic-ai
45 changes: 7 additions & 38 deletions struct_module/file_item.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,13 @@
import shutil
import logging
import time
from openai import OpenAI
from dotenv import load_dotenv
from struct_module.template_renderer import TemplateRenderer
from struct_module.content_fetcher import ContentFetcher
from struct_module.model_wrapper import ModelWrapper

load_dotenv()

openai_api_key = os.getenv("OPENAI_API_KEY")
openai_model = os.getenv("OPENAI_MODEL")

class FileItem:
def __init__(self, properties):
self.logger = logging.getLogger(__name__)
Expand All @@ -32,11 +29,9 @@ def __init__(self, properties):

self.system_prompt = properties.get("system_prompt") or properties.get("global_system_prompt")
self.user_prompt = properties.get("user_prompt")
self.openai_client = None
self.mappings = properties.get("mappings", {})

if openai_api_key:
self._configure_openai()
self.model_wrapper = ModelWrapper(self.logger)

self.template_renderer = TemplateRenderer(
self.config_variables,
Expand All @@ -45,55 +40,29 @@ def __init__(self, properties):
self.mappings
)

def _configure_openai(self):
self.openai_client = OpenAI(api_key=openai_api_key)
if not openai_model:
self.logger.debug("OpenAI model not found. Using default model.")
self.openai_model = "gpt-4.1"
else:
self.logger.debug(f"Using OpenAI model: {openai_model}")
self.openai_model = openai_model

def _get_file_directory(self):
return os.path.dirname(self.name)

def process_prompt(self, dry_run=False, existing_content=None):
if self.user_prompt:
if not self.openai_client or not openai_api_key:
self.logger.warning("Skipping processing prompt as OpenAI API key is not set.")
return

if not self.system_prompt:
system_prompt = "You are a software developer working on a project. You need to create a file with the following content:"
else:
system_prompt = self.system_prompt

# If existing_content is provided, append it to the user prompt
user_prompt = self.user_prompt
if existing_content:
user_prompt += f"\n\nCurrent file content (if any):\n```\n{existing_content}\n```\n\nPlease modify existing content so that it meets the new requirements. Your output should be plain text, without any code blocks or formatting. Do not include any explanations or comments. Just provide the final content of the file."

self.logger.debug(f"Using system prompt: {system_prompt}")
self.logger.debug(f"Using user prompt: {user_prompt}")

if dry_run:
self.logger.info("[DRY RUN] Would generate content using OpenAI API.")
self.content = "[DRY RUN] Generating content using OpenAI"
return

if self.openai_client and openai_api_key:
completion = self.openai_client.chat.completions.create(
model=self.openai_model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
)

self.content = completion.choices[0].message.content
else:
self.content = "OpenAI API key not found. Skipping content generation."
self.logger.warning("Skipping processing prompt as OpenAI API key is not set.")
self.content = self.model_wrapper.generate_content(
system_prompt,
user_prompt,
dry_run=dry_run
)
self.logger.debug(f"Generated content: \n\n{self.content}")

def fetch_content(self):
Expand Down
32 changes: 32 additions & 0 deletions struct_module/model_wrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import os
import logging
from dotenv import load_dotenv
from pydantic_ai import Agent

load_dotenv()

class ModelWrapper:
"""
Wraps model logic using pydantic-ai Agent, allowing use of multiple LLM providers.
"""
def __init__(self, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.model_name = os.getenv("AI_MODEL") or "openai:gpt-4.1"
self.agent = Agent(model=self.model_name)
self.logger.debug(f"Configured Agent with model: {self.model_name}")

def generate_content(self, system_prompt, user_prompt, dry_run=False):
if not self.agent:
self.logger.warning("No agent configured. Skipping content generation.")
return "No agent configured. Skipping content generation."
if dry_run:
self.logger.info("[DRY RUN] Would generate content using AI agent.")
return "[DRY RUN] Generating content using AI agent"
prompt = f"{user_prompt}"
try:
self.agent.system_prompt = system_prompt
result = self.agent.run_sync(prompt)
return result.output
except Exception as e:
self.logger.error(f"AI agent generation failed: {e}")
return f"AI agent generation failed: {e}"
Loading