Skip to content

Commit a5e5723

Browse files
feat(search-tools): LLM-driven search and execute and new API (#151)
* add LLM-driven tool_search and tool_execute * Fix CI * Fix CI and lint issues * PR Suggestion from bots * Address the PR comments * remove toolset.get_meta_tools() and update to new API * Update the API * Change tools to Sequence to fix CI * Remove all reference to the meta tools * Fix doc strings * Adopt changes from the new API * Update doc strings * Doc update
1 parent 78ac0cb commit a5e5723

12 files changed

+1238
-196
lines changed

examples/agent_tool_search.py

Lines changed: 202 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,202 @@
1+
"""Search and execute example: LLM-driven tool discovery and execution.
2+
3+
There are two ways to give tools to an LLM:
4+
5+
1. ``toolset.openai()`` — fetches ALL tools and converts them to OpenAI format.
6+
Token cost scales with the number of tools in your catalog.
7+
8+
2. ``toolset.openai(mode="search_and_execute")`` — returns just 2 tools
9+
(tool_search + tool_execute). The LLM discovers and runs tools on-demand,
10+
keeping token usage constant regardless of catalog size.
11+
12+
This example demonstrates approach 2 with two patterns:
13+
- Raw client (OpenAI): manual agent loop with ``toolset.execute()``
14+
- LangChain: framework handles tool execution automatically
15+
16+
Prerequisites:
17+
- STACKONE_API_KEY environment variable
18+
- STACKONE_ACCOUNT_ID environment variable
19+
- OPENAI_API_KEY environment variable
20+
21+
Run with:
22+
uv run python examples/agent_tool_search.py
23+
"""
24+
25+
from __future__ import annotations
26+
27+
import json
28+
import os
29+
30+
try:
31+
from dotenv import load_dotenv
32+
33+
load_dotenv()
34+
except ModuleNotFoundError:
35+
pass
36+
37+
from stackone_ai import StackOneToolSet
38+
39+
40+
def example_openai() -> None:
41+
"""Raw client: OpenAI.
42+
43+
Shows: init toolset -> get OpenAI tools -> manual agent loop with toolset.execute().
44+
"""
45+
print("=" * 60)
46+
print("Example 1: Raw client (OpenAI) — manual execution")
47+
print("=" * 60)
48+
print()
49+
50+
try:
51+
from openai import OpenAI
52+
except ImportError:
53+
print("Skipped: pip install openai")
54+
print()
55+
return
56+
57+
if not os.getenv("OPENAI_API_KEY"):
58+
print("Skipped: Set OPENAI_API_KEY to run this example.")
59+
print()
60+
return
61+
62+
# 1. Init toolset
63+
account_id = os.getenv("STACKONE_ACCOUNT_ID")
64+
toolset = StackOneToolSet(
65+
account_id=account_id,
66+
search={"method": "semantic", "top_k": 3},
67+
execute={"account_ids": [account_id]} if account_id else None,
68+
)
69+
70+
# 2. Get tools in OpenAI format
71+
openai_tools = toolset.openai(mode="search_and_execute")
72+
73+
# 3. Create OpenAI client and run agent loop
74+
client = OpenAI()
75+
messages: list[dict] = [
76+
{
77+
"role": "system",
78+
"content": (
79+
"You are a helpful scheduling assistant. Use tool_search to find relevant tools, "
80+
"then tool_execute to run them. Always read the parameter schemas from tool_search "
81+
"results carefully. If a tool needs a user URI, first search for and call a "
82+
'"get current user" tool to obtain it. If a tool execution fails, try different '
83+
"parameters or a different tool."
84+
),
85+
},
86+
{"role": "user", "content": "List my upcoming Calendly events for the next week."},
87+
]
88+
89+
for _step in range(10):
90+
response = client.chat.completions.create(
91+
model="gpt-5.4",
92+
messages=messages,
93+
tools=openai_tools,
94+
tool_choice="auto",
95+
)
96+
97+
choice = response.choices[0]
98+
99+
# 4. If no tool calls, print final answer and stop
100+
if not choice.message.tool_calls:
101+
print(f"Answer: {choice.message.content}")
102+
break
103+
104+
# 5. Execute tool calls manually and feed results back
105+
messages.append(choice.message.model_dump(exclude_none=True))
106+
for tool_call in choice.message.tool_calls:
107+
print(f" -> {tool_call.function.name}({tool_call.function.arguments})")
108+
result = toolset.execute(tool_call.function.name, tool_call.function.arguments)
109+
messages.append(
110+
{
111+
"role": "tool",
112+
"tool_call_id": tool_call.id,
113+
"content": json.dumps(result),
114+
}
115+
)
116+
117+
print()
118+
119+
120+
def example_langchain() -> None:
121+
"""Framework: LangChain with auto-execution.
122+
123+
Shows: init toolset -> get LangChain tools -> bind to model -> framework executes tools.
124+
No toolset.execute() needed — the framework calls _run() on tools automatically.
125+
"""
126+
print("=" * 60)
127+
print("Example 2: LangChain — framework handles execution")
128+
print("=" * 60)
129+
print()
130+
131+
try:
132+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
133+
from langchain_openai import ChatOpenAI
134+
except ImportError:
135+
print("Skipped: pip install langchain-openai")
136+
print()
137+
return
138+
139+
if not os.getenv("OPENAI_API_KEY"):
140+
print("Skipped: Set OPENAI_API_KEY to run this example.")
141+
print()
142+
return
143+
144+
# 1. Init toolset
145+
account_id = os.getenv("STACKONE_ACCOUNT_ID")
146+
toolset = StackOneToolSet(
147+
account_id=account_id,
148+
search={"method": "semantic", "top_k": 3},
149+
execute={"account_ids": [account_id]} if account_id else None,
150+
)
151+
152+
# 2. Get tools in LangChain format and bind to model
153+
langchain_tools = toolset.langchain(mode="search_and_execute")
154+
tools_by_name = {tool.name: tool for tool in langchain_tools}
155+
model = ChatOpenAI(model="gpt-5.4").bind_tools(langchain_tools)
156+
157+
# 3. Run agent loop
158+
messages = [
159+
SystemMessage(
160+
content=(
161+
"You are a helpful scheduling assistant. Use tool_search to find relevant tools, "
162+
"then tool_execute to run them. Always read the parameter schemas from tool_search "
163+
"results carefully. If a tool needs a user URI, first search for and call a "
164+
'"get current user" tool to obtain it. If a tool execution fails, try different '
165+
"parameters or a different tool."
166+
),
167+
),
168+
HumanMessage(content="List my upcoming Calendly events for the next week."),
169+
]
170+
171+
for _step in range(10):
172+
response: AIMessage = model.invoke(messages)
173+
174+
# 4. If no tool calls, print final answer and stop
175+
if not response.tool_calls:
176+
print(f"Answer: {response.content}")
177+
break
178+
179+
# 5. Framework-compatible execution — invoke LangChain tools directly
180+
messages.append(response)
181+
for tool_call in response.tool_calls:
182+
print(f" -> {tool_call['name']}({json.dumps(tool_call['args'])})")
183+
tool = tools_by_name[tool_call["name"]]
184+
result = tool.invoke(tool_call["args"])
185+
messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tool_call["id"]))
186+
187+
print()
188+
189+
190+
def main() -> None:
191+
"""Run all examples."""
192+
api_key = os.getenv("STACKONE_API_KEY")
193+
if not api_key:
194+
print("Set STACKONE_API_KEY to run these examples.")
195+
return
196+
197+
example_openai()
198+
example_langchain()
199+
200+
201+
if __name__ == "__main__":
202+
main()

examples/crewai_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def crewai_integration():
3434
goal=f"What is the employee with the id {employee_id}?",
3535
backstory="With over 10 years of experience in HR and employee management, "
3636
"you excel at finding patterns in complex datasets.",
37-
llm="gpt-4o-mini",
37+
llm="gpt-5.4",
3838
tools=langchain_tools,
3939
max_iter=2,
4040
)

examples/langchain_integration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def langchain_integration() -> None:
3333
assert hasattr(tool, "args_schema"), "Expected tool to have args_schema"
3434

3535
# Create model with tools
36-
model = ChatOpenAI(model="gpt-4o-mini")
36+
model = ChatOpenAI(model="gpt-5.4")
3737
model_with_tools = model.bind_tools(langchain_tools)
3838

3939
result = model_with_tools.invoke(f"Can you get me information about employee with ID: {employee_id}?")

examples/openai_integration.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def openai_integration() -> None:
5353
]
5454

5555
response = client.chat.completions.create(
56-
model="gpt-4o-mini",
56+
model="gpt-5.4",
5757
messages=messages,
5858
tools=openai_tools,
5959
tool_choice="auto",
@@ -81,7 +81,7 @@ def openai_integration() -> None:
8181

8282
# Verify the final response
8383
final_response = client.chat.completions.create(
84-
model="gpt-4o-mini",
84+
model="gpt-5.4",
8585
messages=messages,
8686
tools=openai_tools,
8787
tool_choice="auto",

examples/search_tool_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def example_with_openai():
198198

199199
# Create a chat completion with discovered tools
200200
response = client.chat.completions.create(
201-
model="gpt-4",
201+
model="gpt-5.4",
202202
messages=[
203203
{
204204
"role": "system",
@@ -246,7 +246,7 @@ def example_with_langchain():
246246
print(f" - {tool.name}: {tool.description}")
247247

248248
# Create LangChain agent
249-
llm = ChatOpenAI(model="gpt-4", temperature=0)
249+
llm = ChatOpenAI(model="gpt-5.4", temperature=0)
250250

251251
prompt = ChatPromptTemplate.from_messages(
252252
[

examples/semantic_search_example.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def example_search_action_names():
132132
# Show the limited results
133133
print(f"Top {len(results_limited)} matches from the full catalog:")
134134
for r in results_limited:
135-
print(f" [{r.similarity_score:.2f}] {r.action_name} ({r.connector_key})")
135+
print(f" [{r.similarity_score:.2f}] {r.id}")
136136
print(f" {r.description}")
137137
print()
138138

@@ -143,7 +143,7 @@ def example_search_action_names():
143143
filtered = toolset.search_action_names(query, account_ids=_account_ids, top_k=5)
144144
print(f" Filtered to {len(filtered)} matches (only your connectors):")
145145
for r in filtered:
146-
print(f" [{r.similarity_score:.2f}] {r.action_name} ({r.connector_key})")
146+
print(f" [{r.similarity_score:.2f}] {r.id}")
147147
else:
148148
print("Tip: Set STACKONE_ACCOUNT_ID to see results filtered to your linked connectors.")
149149

@@ -197,7 +197,7 @@ def example_search_tools_with_connector():
197197
print("=" * 60)
198198
print()
199199

200-
toolset = StackOneToolSet()
200+
toolset = StackOneToolSet(search={})
201201

202202
query = "book a meeting"
203203
connector = "calendly"
@@ -230,7 +230,7 @@ def example_search_tool_agent_loop():
230230
print("=" * 60)
231231
print()
232232

233-
toolset = StackOneToolSet()
233+
toolset = StackOneToolSet(search={})
234234

235235
print("Step 1: Fetching tools from your linked accounts via MCP...")
236236
all_tools = toolset.fetch_tools(account_ids=_account_ids)
@@ -281,7 +281,7 @@ def example_openai_agent_loop():
281281

282282
if openai_key:
283283
client = OpenAI()
284-
model = "gpt-4o-mini"
284+
model = "gpt-5.4"
285285
provider = "OpenAI"
286286
elif google_key:
287287
client = OpenAI(
@@ -298,7 +298,7 @@ def example_openai_agent_loop():
298298
print(f"Using {provider} ({model})")
299299
print()
300300

301-
toolset = StackOneToolSet()
301+
toolset = StackOneToolSet(search={})
302302

303303
query = "list upcoming events"
304304
print(f'Step 1: Discovering tools for "{query}" via semantic search...')
@@ -358,7 +358,7 @@ def example_langchain_semantic():
358358
print()
359359
return
360360

361-
toolset = StackOneToolSet()
361+
toolset = StackOneToolSet(search={})
362362

363363
query = "remove a user from the team"
364364
print(f'Step 1: Searching for "{query}" via semantic search...')

stackone_ai/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,13 @@
77
SemanticSearchResponse,
88
SemanticSearchResult,
99
)
10-
from stackone_ai.toolset import SearchConfig, SearchMode, SearchTool, StackOneToolSet
10+
from stackone_ai.toolset import ExecuteToolsConfig, SearchConfig, SearchMode, SearchTool, StackOneToolSet
1111

1212
__all__ = [
1313
"StackOneToolSet",
1414
"StackOneTool",
1515
"Tools",
16+
"ExecuteToolsConfig",
1617
"SearchConfig",
1718
"SearchMode",
1819
"SearchTool",

stackone_ai/models.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -414,21 +414,33 @@ def to_langchain(self) -> BaseTool:
414414

415415
for name, details in self.parameters.properties.items():
416416
python_type: type = str # Default to str
417+
is_nullable = False
417418
if isinstance(details, dict):
418419
type_str = details.get("type", "string")
420+
is_nullable = details.get("nullable", False)
419421
if type_str == "number":
420422
python_type = float
421423
elif type_str == "integer":
422424
python_type = int
423425
elif type_str == "boolean":
424426
python_type = bool
427+
elif type_str == "object":
428+
python_type = dict
429+
elif type_str == "array":
430+
python_type = list
425431

426-
field = Field(description=details.get("description", ""))
432+
if is_nullable:
433+
field = Field(default=None, description=details.get("description", ""))
434+
else:
435+
field = Field(description=details.get("description", ""))
427436
else:
428437
field = Field(description="")
429438

430439
schema_props[name] = field
431-
annotations[name] = python_type
440+
if is_nullable:
441+
annotations[name] = python_type | None
442+
else:
443+
annotations[name] = python_type
432444

433445
# Create the schema class with proper annotations
434446
schema_class = type(

0 commit comments

Comments
 (0)