From de0eef539caf42016471a71a97fddc2e222597ad Mon Sep 17 00:00:00 2001 From: Mohammad Amin Date: Tue, 30 Sep 2025 14:59:46 +0330 Subject: [PATCH 1/2] refactor: streamline RAG query execution in Hivemind agent by replacing deprecated components with new QueryDataSources class and enhancing error handling --- requirements.txt | 4 +-- tasks/hivemind/agent.py | 44 +++++++++------------------- tasks/hivemind/query_data_sources.py | 11 +++++-- 3 files changed, 24 insertions(+), 35 deletions(-) diff --git a/requirements.txt b/requirements.txt index c820d88..17e42bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,4 @@ tc-temporal-backend==1.1.4 transformers[torch]==4.49.0 nest-asyncio==1.6.0 openai==1.93.0 -tc-hivemind-backend==1.4.3 -langchain==0.3.26 -langchain-openai==0.3.27 +tc-hivemind-backend==1.4.3 \ No newline at end of file diff --git a/tasks/hivemind/agent.py b/tasks/hivemind/agent.py index ada6f57..6828b43 100644 --- a/tasks/hivemind/agent.py +++ b/tasks/hivemind/agent.py @@ -1,18 +1,16 @@ import logging +import asyncio from crewai import Agent, Crew, Task from crewai.crews.crew_output import CrewOutput from crewai.flow.flow import Flow, listen, start, router from crewai.llm import LLM from tasks.hivemind.classify_question import ClassifyQuestion -from tasks.hivemind.query_data_sources import make_rag_tool +from tasks.hivemind.query_data_sources import QueryDataSources from pydantic import BaseModel from crewai.tools import tool from openai import OpenAI from typing import Optional from tasks.mongo_persistence import MongoPersistence -from langchain_openai import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain.agents import AgentExecutor, create_openai_functions_agent class AgenticFlowState(BaseModel): @@ -156,35 +154,21 @@ def detect_question_type(self) -> str: @router("rag") def do_rag_query(self) -> str: - llm = ChatOpenAI(model="gpt-4o-mini-2024-07-18") - rag_tool = make_rag_tool(self.enable_answer_skipping, self.community_id, self.workflow_id) - tools = [rag_tool] - - SYSTEM_INSTRUCTIONS = """\ - You are a helpful assistant. - """ - - prompt = ChatPromptTemplate.from_messages( - [ - ("system", SYSTEM_INSTRUCTIONS), - MessagesPlaceholder("chat_history", optional=True), - ("human", "{input}"), - MessagesPlaceholder("agent_scratchpad"), - ] + query_data_sources = QueryDataSources( + community_id=self.community_id, + enable_answer_skipping=self.enable_answer_skipping, + workflow_id=self.workflow_id, ) - agent = create_openai_functions_agent(llm, tools, prompt) - # Run the agent - agent_executor = AgentExecutor( - agent=agent, - tools=tools, - verbose=True, - return_intermediate_steps=False, - max_iterations=3, - ) + try: + answer = asyncio.run(query_data_sources.query(self.state.user_query)) + if answer is None: + answer = "NONE" + except Exception as e: + logging.error(f"RAG query execution failed: {e}") + answer = "NONE" - result = agent_executor.invoke({"input": self.state.user_query}) - self.state.last_answer = result["output"] + self.state.last_answer = answer self.state.retry_count += 1 return "stop" diff --git a/tasks/hivemind/query_data_sources.py b/tasks/hivemind/query_data_sources.py index 5367eb0..96f1358 100644 --- a/tasks/hivemind/query_data_sources.py +++ b/tasks/hivemind/query_data_sources.py @@ -5,7 +5,6 @@ import nest_asyncio from dotenv import load_dotenv from typing import Optional, Callable -from langchain.tools import tool from tc_temporal_backend.client import TemporalClient from tc_temporal_backend.schema.hivemind import HivemindQueryPayload from temporalio.common import RetryPolicy @@ -96,7 +95,15 @@ def make_rag_tool(enable_answer_skipping: bool, community_id: str, workflow_id: Returns: Callable: The RAG pipeline tool. """ - @tool(return_direct=True) + try: + from langchain.tools import tool as lc_tool # type: ignore + except Exception: + # Fallback no-op decorator if LangChain is not installed/required + def lc_tool(*_args, **_kwargs): + def decorator(func): + return func + return decorator + @lc_tool(return_direct=True) def get_rag_answer(query: str) -> str: """ Get the answer from the RAG pipeline From bb52bd3bedb1d0e21e8cb79e5b81a06a2b3b911b Mon Sep 17 00:00:00 2001 From: Mohammad Amin Date: Tue, 30 Sep 2025 15:56:08 +0330 Subject: [PATCH 2/2] fix: removed using codeClimate! --- .github/workflows/production.yml | 2 +- .github/workflows/start.staging.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/production.yml b/.github/workflows/production.yml index 39d3f14..8469487 100644 --- a/.github/workflows/production.yml +++ b/.github/workflows/production.yml @@ -9,6 +9,6 @@ on: jobs: ci: - uses: TogetherCrew/operations/.github/workflows/ci.yml@main + uses: TogetherCrew/operations/.github/workflows/ci2.yml@main secrets: CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} diff --git a/.github/workflows/start.staging.yml b/.github/workflows/start.staging.yml index a53de6e..dcb51ed 100644 --- a/.github/workflows/start.staging.yml +++ b/.github/workflows/start.staging.yml @@ -6,6 +6,6 @@ on: pull_request jobs: ci: - uses: TogetherCrew/operations/.github/workflows/ci.yml@main + uses: TogetherCrew/operations/.github/workflows/ci2.yml@main secrets: CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }}