Skip to content
Merged
202 changes: 202 additions & 0 deletions examples/agent_tool_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
"""Search and execute example: LLM-driven tool discovery and execution.

There are two ways to give tools to an LLM:

1. ``toolset.openai()`` — fetches ALL tools and converts them to OpenAI format.
Token cost scales with the number of tools in your catalog.

2. ``toolset.openai(mode="search_and_execute")`` — returns just 2 tools
(tool_search + tool_execute). The LLM discovers and runs tools on-demand,
keeping token usage constant regardless of catalog size.

This example demonstrates approach 2 with two patterns:
- Raw client (OpenAI): manual agent loop with ``toolset.execute()``
- LangChain: framework handles tool execution automatically

Prerequisites:
- STACKONE_API_KEY environment variable
- STACKONE_ACCOUNT_ID environment variable
- OPENAI_API_KEY environment variable

Run with:
uv run python examples/agent_tool_search.py
"""

from __future__ import annotations

import json
import os

try:
from dotenv import load_dotenv

load_dotenv()
except ModuleNotFoundError:
pass

from stackone_ai import StackOneToolSet


def example_openai() -> None:
"""Raw client: OpenAI.

Shows: init toolset -> get OpenAI tools -> manual agent loop with toolset.execute().
"""
print("=" * 60)
print("Example 1: Raw client (OpenAI) — manual execution")
print("=" * 60)
print()

try:
from openai import OpenAI
except ImportError:
print("Skipped: pip install openai")
print()
return

if not os.getenv("OPENAI_API_KEY"):
print("Skipped: Set OPENAI_API_KEY to run this example.")
print()
return

# 1. Init toolset
account_id = os.getenv("STACKONE_ACCOUNT_ID")
toolset = StackOneToolSet(
account_id=account_id,
search={"method": "semantic", "top_k": 3},
execute={"account_ids": [account_id]} if account_id else None,
)

# 2. Get tools in OpenAI format
openai_tools = toolset.openai(mode="search_and_execute")

# 3. Create OpenAI client and run agent loop
client = OpenAI()
messages: list[dict] = [
{
"role": "system",
"content": (
"You are a helpful scheduling assistant. Use tool_search to find relevant tools, "
"then tool_execute to run them. Always read the parameter schemas from tool_search "
"results carefully. If a tool needs a user URI, first search for and call a "
'"get current user" tool to obtain it. If a tool execution fails, try different '
"parameters or a different tool."
),
},
{"role": "user", "content": "List my upcoming Calendly events for the next week."},
]

for _step in range(10):
response = client.chat.completions.create(
model="gpt-5.4",
messages=messages,
tools=openai_tools,
tool_choice="auto",
)

choice = response.choices[0]

# 4. If no tool calls, print final answer and stop
if not choice.message.tool_calls:
print(f"Answer: {choice.message.content}")
break

# 5. Execute tool calls manually and feed results back
messages.append(choice.message.model_dump(exclude_none=True))
for tool_call in choice.message.tool_calls:
print(f" -> {tool_call.function.name}({tool_call.function.arguments})")
result = toolset.execute(tool_call.function.name, tool_call.function.arguments)
messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result),
}
)

print()


def example_langchain() -> None:
"""Framework: LangChain with auto-execution.

Shows: init toolset -> get LangChain tools -> bind to model -> framework executes tools.
No toolset.execute() needed — the framework calls _run() on tools automatically.
"""
print("=" * 60)
print("Example 2: LangChain — framework handles execution")
print("=" * 60)
print()

try:
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_openai import ChatOpenAI
except ImportError:
print("Skipped: pip install langchain-openai")
print()
return

if not os.getenv("OPENAI_API_KEY"):
print("Skipped: Set OPENAI_API_KEY to run this example.")
print()
return

# 1. Init toolset
account_id = os.getenv("STACKONE_ACCOUNT_ID")
toolset = StackOneToolSet(
account_id=account_id,
search={"method": "semantic", "top_k": 3},
execute={"account_ids": [account_id]} if account_id else None,
)

# 2. Get tools in LangChain format and bind to model
langchain_tools = toolset.langchain(mode="search_and_execute")
tools_by_name = {tool.name: tool for tool in langchain_tools}
model = ChatOpenAI(model="gpt-5.4").bind_tools(langchain_tools)

# 3. Run agent loop
messages = [
SystemMessage(
content=(
"You are a helpful scheduling assistant. Use tool_search to find relevant tools, "
"then tool_execute to run them. Always read the parameter schemas from tool_search "
"results carefully. If a tool needs a user URI, first search for and call a "
'"get current user" tool to obtain it. If a tool execution fails, try different '
"parameters or a different tool."
),
),
HumanMessage(content="List my upcoming Calendly events for the next week."),
]

for _step in range(10):
response: AIMessage = model.invoke(messages)

# 4. If no tool calls, print final answer and stop
if not response.tool_calls:
print(f"Answer: {response.content}")
Copy link
Copy Markdown

@cubic-dev-ai cubic-dev-ai bot Mar 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P3: Use response.text here. Gemini returns structured content blocks in AIMessage.content, so this prints the raw payload instead of the final answer.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At examples/meta_tools_example.py, line 159:

<comment>Use `response.text` here. Gemini returns structured content blocks in `AIMessage.content`, so this prints the raw payload instead of the final answer.</comment>

<file context>
@@ -110,14 +111,74 @@ def example_gemini() -> None:
+
+        # 4. If no tool calls, print final answer and stop
+        if not response.tool_calls:
+            print(f"Answer: {response.content}")
+            break
+
</file context>
Suggested change
print(f"Answer: {response.content}")
print(f"Answer: {response.text}")
Fix with Cubic

break

# 5. Framework-compatible execution — invoke LangChain tools directly
messages.append(response)
for tool_call in response.tool_calls:
print(f" -> {tool_call['name']}({json.dumps(tool_call['args'])})")
tool = tools_by_name[tool_call["name"]]
result = tool.invoke(tool_call["args"])
Copy link
Copy Markdown

@cubic-dev-ai cubic-dev-ai bot Mar 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2: Handle missing MCP dependencies before invoking LangChain tools. tool_execute calls fetch_tools() under the hood, so this example crashes at the first tool call when stackone-ai[mcp] is not installed.

Prompt for AI agents
Check if this issue is valid — if so, understand the root cause and fix it. At examples/meta_tools_example.py, line 167:

<comment>Handle missing MCP dependencies before invoking LangChain tools. `tool_execute` calls `fetch_tools()` under the hood, so this example crashes at the first tool call when `stackone-ai[mcp]` is not installed.</comment>

<file context>
@@ -110,14 +111,74 @@ def example_gemini() -> None:
+        for tool_call in response.tool_calls:
+            print(f"  -> {tool_call['name']}({json.dumps(tool_call['args'])})")
+            tool = tools_by_name[tool_call["name"]]
+            result = tool.invoke(tool_call["args"])
+            messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tool_call["id"]))
+
</file context>
Fix with Cubic

messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tool_call["id"]))

print()


def main() -> None:
"""Run all examples."""
api_key = os.getenv("STACKONE_API_KEY")
if not api_key:
print("Set STACKONE_API_KEY to run these examples.")
return

example_openai()
example_langchain()

Comment on lines +190 to +199
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these examples should be using with a client. i.e. how do you use with anthropic and openai. not just like here's it in langchain format. how would they then use the langchain format in an LLM

we want init_stackone_tools -> pass to LLM client -> show how to use LLM client with stackone tools
(for each and all LLM clients / tool formats that we support)

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Updated!


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion examples/crewai_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def crewai_integration():
goal=f"What is the employee with the id {employee_id}?",
backstory="With over 10 years of experience in HR and employee management, "
"you excel at finding patterns in complex datasets.",
llm="gpt-4o-mini",
llm="gpt-5.4",
tools=langchain_tools,
max_iter=2,
)
Expand Down
2 changes: 1 addition & 1 deletion examples/langchain_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def langchain_integration() -> None:
assert hasattr(tool, "args_schema"), "Expected tool to have args_schema"

# Create model with tools
model = ChatOpenAI(model="gpt-4o-mini")
model = ChatOpenAI(model="gpt-5.4")
model_with_tools = model.bind_tools(langchain_tools)

result = model_with_tools.invoke(f"Can you get me information about employee with ID: {employee_id}?")
Expand Down
4 changes: 2 additions & 2 deletions examples/openai_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def openai_integration() -> None:
]

response = client.chat.completions.create(
model="gpt-4o-mini",
model="gpt-5.4",
messages=messages,
tools=openai_tools,
tool_choice="auto",
Expand Down Expand Up @@ -81,7 +81,7 @@ def openai_integration() -> None:

# Verify the final response
final_response = client.chat.completions.create(
model="gpt-4o-mini",
model="gpt-5.4",
messages=messages,
tools=openai_tools,
tool_choice="auto",
Expand Down
4 changes: 2 additions & 2 deletions examples/search_tool_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def example_with_openai():

# Create a chat completion with discovered tools
response = client.chat.completions.create(
model="gpt-4",
model="gpt-5.4",
messages=[
{
"role": "system",
Expand Down Expand Up @@ -246,7 +246,7 @@ def example_with_langchain():
print(f" - {tool.name}: {tool.description}")

# Create LangChain agent
llm = ChatOpenAI(model="gpt-4", temperature=0)
llm = ChatOpenAI(model="gpt-5.4", temperature=0)

prompt = ChatPromptTemplate.from_messages(
[
Expand Down
14 changes: 7 additions & 7 deletions examples/semantic_search_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def example_search_action_names():
# Show the limited results
print(f"Top {len(results_limited)} matches from the full catalog:")
for r in results_limited:
print(f" [{r.similarity_score:.2f}] {r.action_name} ({r.connector_key})")
print(f" [{r.similarity_score:.2f}] {r.id}")
print(f" {r.description}")
print()

Expand All @@ -143,7 +143,7 @@ def example_search_action_names():
filtered = toolset.search_action_names(query, account_ids=_account_ids, top_k=5)
print(f" Filtered to {len(filtered)} matches (only your connectors):")
for r in filtered:
print(f" [{r.similarity_score:.2f}] {r.action_name} ({r.connector_key})")
print(f" [{r.similarity_score:.2f}] {r.id}")
else:
print("Tip: Set STACKONE_ACCOUNT_ID to see results filtered to your linked connectors.")

Expand Down Expand Up @@ -197,7 +197,7 @@ def example_search_tools_with_connector():
print("=" * 60)
print()

toolset = StackOneToolSet()
toolset = StackOneToolSet(search={})

query = "book a meeting"
connector = "calendly"
Expand Down Expand Up @@ -230,7 +230,7 @@ def example_search_tool_agent_loop():
print("=" * 60)
print()

toolset = StackOneToolSet()
toolset = StackOneToolSet(search={})

print("Step 1: Fetching tools from your linked accounts via MCP...")
all_tools = toolset.fetch_tools(account_ids=_account_ids)
Expand Down Expand Up @@ -281,7 +281,7 @@ def example_openai_agent_loop():

if openai_key:
client = OpenAI()
model = "gpt-4o-mini"
model = "gpt-5.4"
provider = "OpenAI"
elif google_key:
client = OpenAI(
Expand All @@ -298,7 +298,7 @@ def example_openai_agent_loop():
print(f"Using {provider} ({model})")
print()

toolset = StackOneToolSet()
toolset = StackOneToolSet(search={})

query = "list upcoming events"
print(f'Step 1: Discovering tools for "{query}" via semantic search...')
Expand Down Expand Up @@ -358,7 +358,7 @@ def example_langchain_semantic():
print()
return

toolset = StackOneToolSet()
toolset = StackOneToolSet(search={})

query = "remove a user from the team"
print(f'Step 1: Searching for "{query}" via semantic search...')
Expand Down
3 changes: 2 additions & 1 deletion stackone_ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
SemanticSearchResponse,
SemanticSearchResult,
)
from stackone_ai.toolset import SearchConfig, SearchMode, SearchTool, StackOneToolSet
from stackone_ai.toolset import ExecuteToolsConfig, SearchConfig, SearchMode, SearchTool, StackOneToolSet

__all__ = [
"StackOneToolSet",
"StackOneTool",
"Tools",
"ExecuteToolsConfig",
"SearchConfig",
"SearchMode",
"SearchTool",
Expand Down
16 changes: 14 additions & 2 deletions stackone_ai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,21 +414,33 @@ def to_langchain(self) -> BaseTool:

for name, details in self.parameters.properties.items():
python_type: type = str # Default to str
is_nullable = False
if isinstance(details, dict):
type_str = details.get("type", "string")
is_nullable = details.get("nullable", False)
if type_str == "number":
python_type = float
elif type_str == "integer":
python_type = int
elif type_str == "boolean":
python_type = bool
elif type_str == "object":
python_type = dict
elif type_str == "array":
python_type = list

field = Field(description=details.get("description", ""))
if is_nullable:
field = Field(default=None, description=details.get("description", ""))
else:
field = Field(description=details.get("description", ""))
else:
field = Field(description="")

schema_props[name] = field
annotations[name] = python_type
if is_nullable:
annotations[name] = python_type | None
else:
annotations[name] = python_type

# Create the schema class with proper annotations
schema_class = type(
Expand Down
Loading