From c6b2e3f3e8d31f8e63839b7b8c5830782207e5bc Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Thu, 2 Apr 2026 14:05:50 +0100 Subject: [PATCH 1/8] include available connectors in search/execute tool descriptions --- stackone_ai/toolset.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/stackone_ai/toolset.py b/stackone_ai/toolset.py index c13fc73..3f3744b 100644 --- a/stackone_ai/toolset.py +++ b/stackone_ai/toolset.py @@ -205,13 +205,14 @@ def execute( return {"error": f"Invalid input: {exc}", "tool_name": tool_name} -def _create_search_tool(api_key: str) -> _SearchTool: +def _create_search_tool(api_key: str, connectors: str = "") -> _SearchTool: name = "tool_search" + connector_line = f" Available connectors: {connectors}." if connectors else "" description = ( "Search for available tools by describing what you need. " "Returns matching tool names, descriptions, and parameter schemas. " "Use the returned parameter schemas to know exactly what to pass " - "when calling tool_execute." + f"when calling tool_execute.{connector_line}" ) parameters = ToolParameters( type="object", @@ -259,14 +260,14 @@ def _create_search_tool(api_key: str) -> _SearchTool: return tool -def _create_execute_tool(api_key: str) -> _ExecuteTool: +def _create_execute_tool(api_key: str, connectors: str = "") -> _ExecuteTool: name = "tool_execute" + connector_line = f" Available connectors: {connectors}." if connectors else "" description = ( "Execute a tool by name with the given parameters. " "Use tool_search first to find available tools. " "The parameters field must match the parameter schema returned " - "by tool_search. Pass parameters as a nested object matching " - "the schema structure." + f"by tool_search. Pass parameters as a nested object matching the schema structure.{connector_line}" ) parameters = ToolParameters( type="object", @@ -645,10 +646,20 @@ def _build_tools(self, account_ids: list[str] | None = None) -> Tools: if account_ids: self._account_ids = account_ids - search_tool = _create_search_tool(self.api_key) + # Discover available connectors for dynamic descriptions + connectors_str = "" + try: + all_tools = self.fetch_tools(account_ids=self._account_ids) + connectors = sorted(all_tools.get_connectors()) + if connectors: + connectors_str = ", ".join(connectors) + except Exception: + logger.debug("Could not discover connectors for tool descriptions") + + search_tool = _create_search_tool(self.api_key, connectors=connectors_str) search_tool._toolset = self - execute_tool = _create_execute_tool(self.api_key) + execute_tool = _create_execute_tool(self.api_key, connectors=connectors_str) execute_tool._toolset = self return Tools([search_tool, execute_tool]) From 5300e1d366d27253a110fa28b414ea6a175bcaac Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 10:11:10 +0100 Subject: [PATCH 2/8] Fix th timeout and account ID issue in the SDK for the toolset.execute path --- stackone_ai/models.py | 3 ++- stackone_ai/toolset.py | 18 +++++++++++++++--- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/stackone_ai/models.py b/stackone_ai/models.py index f58d91b..26db896 100644 --- a/stackone_ai/models.py +++ b/stackone_ai/models.py @@ -65,6 +65,7 @@ class ExecuteConfig(BaseModel): parameter_locations: dict[str, ParameterLocation] = Field( default_factory=dict, description="Maps parameter names to their location in the request" ) + timeout: float = Field(default=60.0, description="Request timeout in seconds") class ToolParameters(BaseModel): @@ -249,7 +250,7 @@ def execute( if query_params: request_kwargs["params"] = query_params - response = httpx.request(**request_kwargs) + response = httpx.request(**request_kwargs, timeout=self._execute_config.timeout) response_status = response.status_code response.raise_for_status() diff --git a/stackone_ai/toolset.py b/stackone_ai/toolset.py index 3f3744b..d69c54e 100644 --- a/stackone_ai/toolset.py +++ b/stackone_ai/toolset.py @@ -59,7 +59,7 @@ class SearchConfig(TypedDict, total=False): class ExecuteToolsConfig(TypedDict, total=False): """Execution configuration for the StackOneToolSet constructor. - Controls default account scoping for tool execution. + Controls default account scoping and timeout for tool execution. When set to ``None`` (default), no account scoping is applied. When provided, ``account_ids`` flow through to ``openai(mode="search_and_execute")`` @@ -69,6 +69,10 @@ class ExecuteToolsConfig(TypedDict, total=False): account_ids: list[str] """Account IDs to scope tool discovery and execution.""" + timeout: float + """Request timeout in seconds. Default: 60. Can also be set as a top-level + constructor param which takes precedence.""" + _SEARCH_DEFAULT: SearchConfig = {"method": "auto"} @@ -416,6 +420,7 @@ def __init__( api_key: str, base_url: str, account_id: str | None, + timeout: float = 60.0, ) -> None: execute_config = ExecuteConfig( method="POST", @@ -424,6 +429,7 @@ def __init__( headers={}, body_type="json", parameter_locations=dict(_RPC_PARAMETER_LOCATIONS), + timeout=timeout, ) super().__init__( description=description, @@ -556,6 +562,7 @@ def __init__( base_url: str | None = None, search: SearchConfig | None = None, execute: ExecuteToolsConfig | None = None, + timeout: float = 60.0, ) -> None: """Initialize StackOne tools with authentication @@ -571,7 +578,9 @@ def __init__( Per-call options always override these defaults. execute: Execution configuration. Controls default account scoping for tool execution. Pass ``{"account_ids": ["acc-1"]}`` to scope - meta tools to specific accounts. + tools to specific accounts. + timeout: Request timeout in seconds for tool execution HTTP calls. + Default: 60. Increase for slow providers (e.g. Workday). Raises: ToolsetConfigError: If no API key is provided or found in environment @@ -585,10 +594,12 @@ def __init__( self.api_key: str = api_key_value self.account_id = account_id self.base_url = base_url or DEFAULT_BASE_URL - self._account_ids: list[str] = [] + self._account_ids: list[str] = execute.get("account_ids", []) if execute else [] self._semantic_client: SemanticSearchClient | None = None self._search_config: SearchConfig | None = search self._execute_config: ExecuteToolsConfig | None = execute + execute_timeout = execute.get("timeout", 60.0) if execute else 60.0 + self._timeout: float = timeout if timeout != 60.0 else execute_timeout self._tools_cache: Tools | None = None def set_accounts(self, account_ids: list[str]) -> StackOneToolSet: @@ -1203,6 +1214,7 @@ def _create_rpc_tool(self, tool_def: _McpToolDefinition, account_id: str | None) api_key=self.api_key, base_url=self.base_url, account_id=account_id, + timeout=self._timeout, ) def _normalize_schema_properties(self, schema: dict[str, Any]) -> dict[str, Any]: From b628ff68bdd6ccb8a5f5e70827cb06524de0e28f Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 11:20:25 +0100 Subject: [PATCH 3/8] Update old examples to new SDK API --- examples/search_tool_example.py | 151 +++++++++++++++++----------- examples/semantic_search_example.py | 1 - 2 files changed, 90 insertions(+), 62 deletions(-) diff --git a/examples/search_tool_example.py b/examples/search_tool_example.py index 9ec048d..a220d83 100644 --- a/examples/search_tool_example.py +++ b/examples/search_tool_example.py @@ -16,6 +16,7 @@ ``` """ +import json import os from stackone_ai import StackOneToolSet @@ -36,7 +37,7 @@ def example_search_tool_basic(): print("Example 1: Dynamic tool discovery\n") # Initialize StackOne toolset - toolset = StackOneToolSet() + toolset = StackOneToolSet(search={}) # Get all available tools using MCP-backed fetch_tools() all_tools = toolset.fetch_tools(account_ids=_account_ids) @@ -106,7 +107,7 @@ def example_search_modes(): # Auto (default) — tries semantic, falls back to local print('Default: StackOneToolSet() uses search="auto" (semantic with local fallback)') - toolset_auto = StackOneToolSet() + toolset_auto = StackOneToolSet(search={}) tools_auto = toolset_auto.search_tools(query, account_ids=_account_ids, top_k=5) print(f" Found {len(tools_auto)} tools:") for tool in tools_auto: @@ -146,33 +147,60 @@ def example_search_tool_with_execution(): """Example of discovering and executing tools dynamically""" print("Example 4: Dynamic tool execution\n") - # Initialize toolset - toolset = StackOneToolSet() - - # Get all tools using MCP-backed fetch_tools() - all_tools = toolset.fetch_tools(account_ids=_account_ids) + try: + from openai import OpenAI + except ImportError: + print("OpenAI not installed: pip install openai") + print() + return - if not all_tools: - print("No tools found. Check your linked accounts.") + if not os.getenv("OPENAI_API_KEY"): + print("Skipped: Set OPENAI_API_KEY to run this example.") + print() return - search_tool = toolset.get_search_tool() + toolset = StackOneToolSet(search={}) # Step 1: Search for relevant tools - tools = search_tool("list all employees", top_k=1, account_ids=_account_ids) + search_tool = toolset.get_search_tool() + tools = search_tool("list all employees", top_k=3, account_ids=_account_ids) - if tools: - best_tool = tools[0] - print(f"Best matching tool: {best_tool.name}") - print(f"Description: {best_tool.description}") + if not tools: + print("No matching tools found.") + print() + return - # Step 2: Execute the found tool directly - try: - print(f"\nExecuting {best_tool.name}...") - result = best_tool(limit=5) - print(f"Execution result: {result}") - except Exception as e: - print(f"Execution failed (expected in example): {e}") + print(f"Found {len(tools)} tools:") + for t in tools: + print(f" - {t.name}") + + # Step 2: Let the LLM pick the right tool and params + openai_tools = tools.to_openai() + client = OpenAI() + messages: list[dict] = [ + {"role": "user", "content": "List all employees. Use the available tools."}, + ] + + for _step in range(5): + response = client.chat.completions.create( + model="gpt-5.4", messages=messages, tools=openai_tools + ) + choice = response.choices[0] + + if not choice.message.tool_calls: + print(f"\nAnswer: {choice.message.content}") + break + + messages.append(choice.message.model_dump(exclude_none=True)) + for tc in choice.message.tool_calls: + print(f" -> {tc.function.name}({tc.function.arguments[:80]})") + tool = tools.get_tool(tc.function.name) + if tool: + try: + result = tool.execute(json.loads(tc.function.arguments)) + except Exception as e: + result = {"error": str(e)} + messages.append({"role": "tool", "tool_call_id": tc.id, "content": json.dumps(result)}) print() @@ -188,7 +216,7 @@ def example_with_openai(): client = OpenAI() # Initialize StackOne toolset - toolset = StackOneToolSet() + toolset = StackOneToolSet(search={}) # Search for BambooHR employee tools tools = toolset.search_tools("manage employees", account_ids=_account_ids, top_k=5) @@ -230,48 +258,49 @@ def example_with_langchain(): print("Example 6: Using tools with LangChain\n") try: - from langchain.agents import AgentExecutor, create_tool_calling_agent - from langchain_core.prompts import ChatPromptTemplate + from langchain_core.messages import HumanMessage, ToolMessage from langchain_openai import ChatOpenAI - - # Initialize StackOne toolset - toolset = StackOneToolSet() - - # Get tools and convert to LangChain format using MCP-backed fetch_tools() - tools = toolset.search_tools("list employees", account_ids=_account_ids, top_k=5) - langchain_tools = list(tools.to_langchain()) - - print(f"Available tools for LangChain: {len(langchain_tools)}") - for tool in langchain_tools: - print(f" - {tool.name}: {tool.description}") - - # Create LangChain agent - llm = ChatOpenAI(model="gpt-5.4", temperature=0) - - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are an HR assistant. Use the available tools to help the user.", - ), - ("human", "{input}"), - ("placeholder", "{agent_scratchpad}"), - ] - ) - - agent = create_tool_calling_agent(llm, langchain_tools, prompt) - agent_executor = AgentExecutor(agent=agent, tools=langchain_tools, verbose=True) - - # Run the agent - result = agent_executor.invoke({"input": "Find tools that can list employee data"}) - - print(f"\nAgent result: {result['output']}") - except ImportError as e: print(f"LangChain dependencies not installed: {e}") print("Install with: pip install langchain-openai") - except Exception as e: - print(f"LangChain example failed: {e}") + print() + return + + if not os.getenv("OPENAI_API_KEY"): + print("Skipped: Set OPENAI_API_KEY to run this example.") + print() + return + + toolset = StackOneToolSet(search={}) + + # Search for tools and convert to LangChain format + tools = toolset.search_tools("list employees", account_ids=_account_ids, top_k=5) + langchain_tools = list(tools.to_langchain()) + + print(f"Available tools: {len(langchain_tools)}") + for tool in langchain_tools: + print(f" - {tool.name}") + + # Bind tools to model and run + model = ChatOpenAI(model="gpt-5.4").bind_tools(langchain_tools) + tools_by_name = {t.name: t for t in langchain_tools} + messages = [HumanMessage(content="What employee tools do I have access to?")] + + for _ in range(5): + response = model.invoke(messages) + if not response.tool_calls: + print(f"\nAnswer: {response.content}") + break + + messages.append(response) + for tc in response.tool_calls: + print(f" -> {tc['name']}({json.dumps(tc['args'])[:80]})") + tool = tools_by_name[tc["name"]] + try: + result = tool.invoke(tc["args"]) + except Exception as e: + result = {"error": str(e)} + messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tc["id"])) print() diff --git a/examples/semantic_search_example.py b/examples/semantic_search_example.py index b3a8f2b..1631ad2 100644 --- a/examples/semantic_search_example.py +++ b/examples/semantic_search_example.py @@ -133,7 +133,6 @@ def example_search_action_names(): print(f"Top {len(results_limited)} matches from the full catalog:") for r in results_limited: print(f" [{r.similarity_score:.2f}] {r.id}") - print(f" {r.description}") print() # Show filtering effect when account_ids are available From 8454734619978242c94478194a0554c6bc944637 Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 11:25:41 +0100 Subject: [PATCH 4/8] Fix the CI issue --- examples/search_tool_example.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/search_tool_example.py b/examples/search_tool_example.py index a220d83..d585249 100644 --- a/examples/search_tool_example.py +++ b/examples/search_tool_example.py @@ -182,9 +182,7 @@ def example_search_tool_with_execution(): ] for _step in range(5): - response = client.chat.completions.create( - model="gpt-5.4", messages=messages, tools=openai_tools - ) + response = client.chat.completions.create(model="gpt-5.4", messages=messages, tools=openai_tools) choice = response.choices[0] if not choice.message.tool_calls: From 649b139e62707f51b771e5980cdbc11b5573eeff Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 12:08:19 +0100 Subject: [PATCH 5/8] Address copilot review about the timeout and trim th search tool example --- examples/search_tool_example.py | 347 +++----------------------------- stackone_ai/toolset.py | 9 +- 2 files changed, 36 insertions(+), 320 deletions(-) diff --git a/examples/search_tool_example.py b/examples/search_tool_example.py index d585249..2618202 100644 --- a/examples/search_tool_example.py +++ b/examples/search_tool_example.py @@ -1,25 +1,15 @@ -#!/usr/bin/env python -""" -Example demonstrating dynamic tool discovery using search_tool. - -The search tool allows AI agents to discover relevant tools based on natural language -queries without hardcoding tool names. +"""Search tool patterns: callable wrapper and config overrides. -Prerequisites: -- STACKONE_API_KEY environment variable set -- STACKONE_ACCOUNT_ID environment variable set (comma-separated for multiple) -- At least one linked account in StackOne (this example uses BambooHR) +For semantic search basics, see semantic_search_example.py. +For full agent execution, see agent_tool_search.py. -This example is runnable with the following command: -```bash -uv run examples/search_tool_example.py -``` +Run with: + uv run python examples/search_tool_example.py """ -import json -import os +from __future__ import annotations -from stackone_ai import StackOneToolSet +import os try: from dotenv import load_dotenv @@ -28,314 +18,39 @@ except ModuleNotFoundError: pass -# Read account IDs from environment — supports comma-separated values -_account_ids = [aid.strip() for aid in os.getenv("STACKONE_ACCOUNT_ID", "").split(",") if aid.strip()] - - -def example_search_tool_basic(): - """Basic example of using the search tool for tool discovery""" - print("Example 1: Dynamic tool discovery\n") - - # Initialize StackOne toolset - toolset = StackOneToolSet(search={}) - - # Get all available tools using MCP-backed fetch_tools() - all_tools = toolset.fetch_tools(account_ids=_account_ids) - print(f"Total tools available: {len(all_tools)}") - - if not all_tools: - print("No tools found. Check your linked accounts.") - return - - # Get a search tool for dynamic discovery - search_tool = toolset.get_search_tool() - - # Search for employee management tools — returns a Tools collection - tools = search_tool("manage employees create update list", top_k=5, account_ids=_account_ids) - - print(f"Found {len(tools)} relevant tools:") - for tool in tools: - print(f" - {tool.name}: {tool.description}") - - print() - - -def example_search_modes(): - """Comparing semantic vs local search modes. - - Search config can be set at the constructor level or overridden per call: - - Constructor: StackOneToolSet(search={"method": "semantic"}) - - Per-call: toolset.search_tools(query, search="local") - - The search method controls which backend search_tools() uses: - - "semantic": cloud-based semantic vector search (higher accuracy for natural language) - - "local": local BM25+TF-IDF hybrid search (no network call to semantic API) - - "auto" (default): tries semantic first, falls back to local on failure - """ - print("Example 2: Semantic vs local search modes\n") - - query = "manage employee time off" - - # Constructor-level config — semantic search as the default for this toolset - print('Constructor config: StackOneToolSet(search={"method": "semantic"})') - toolset_semantic = StackOneToolSet(search={"method": "semantic"}) - try: - tools_semantic = toolset_semantic.search_tools(query, account_ids=_account_ids, top_k=5) - print(f" Found {len(tools_semantic)} tools:") - for tool in tools_semantic: - print(f" - {tool.name}") - except Exception as e: - print(f" Semantic search unavailable: {e}") - print() - - # Constructor-level config — local search (no network call to semantic API) - print('Constructor config: StackOneToolSet(search={"method": "local"})') - toolset_local = StackOneToolSet(search={"method": "local"}) - tools_local = toolset_local.search_tools(query, account_ids=_account_ids, top_k=5) - print(f" Found {len(tools_local)} tools:") - for tool in tools_local: - print(f" - {tool.name}") - print() - - # Per-call override — constructor defaults can be overridden on each call - print("Per-call override: constructor uses semantic, but this call uses local") - tools_override = toolset_semantic.search_tools(query, account_ids=_account_ids, top_k=5, search="local") - print(f" Found {len(tools_override)} tools:") - for tool in tools_override: - print(f" - {tool.name}") - print() - - # Auto (default) — tries semantic, falls back to local - print('Default: StackOneToolSet() uses search="auto" (semantic with local fallback)') - toolset_auto = StackOneToolSet(search={}) - tools_auto = toolset_auto.search_tools(query, account_ids=_account_ids, top_k=5) - print(f" Found {len(tools_auto)} tools:") - for tool in tools_auto: - print(f" - {tool.name}") - print() - - -def example_top_k_config(): - """Configuring top_k at the constructor level vs per-call. - - Constructor-level top_k applies to all search_tools() and search_action_names() - calls. Per-call top_k overrides the constructor default for that single call. - """ - print("Example 3: top_k at constructor vs per-call\n") - - # Constructor-level top_k — all calls default to returning 3 results - toolset = StackOneToolSet(search={"top_k": 3}) - - query = "manage employee records" - print(f'Constructor top_k=3: searching for "{query}"') - tools_default = toolset.search_tools(query, account_ids=_account_ids) - print(f" Got {len(tools_default)} tools (constructor default)") - for tool in tools_default: - print(f" - {tool.name}") - print() - - # Per-call override — this single call returns up to 10 results - print("Per-call top_k=10: overriding constructor default") - tools_override = toolset.search_tools(query, account_ids=_account_ids, top_k=10) - print(f" Got {len(tools_override)} tools (per-call override)") - for tool in tools_override: - print(f" - {tool.name}") - print() - - -def example_search_tool_with_execution(): - """Example of discovering and executing tools dynamically""" - print("Example 4: Dynamic tool execution\n") - - try: - from openai import OpenAI - except ImportError: - print("OpenAI not installed: pip install openai") - print() - return - - if not os.getenv("OPENAI_API_KEY"): - print("Skipped: Set OPENAI_API_KEY to run this example.") - print() - return - - toolset = StackOneToolSet(search={}) - - # Step 1: Search for relevant tools - search_tool = toolset.get_search_tool() - tools = search_tool("list all employees", top_k=3, account_ids=_account_ids) - - if not tools: - print("No matching tools found.") - print() - return - - print(f"Found {len(tools)} tools:") - for t in tools: - print(f" - {t.name}") - - # Step 2: Let the LLM pick the right tool and params - openai_tools = tools.to_openai() - client = OpenAI() - messages: list[dict] = [ - {"role": "user", "content": "List all employees. Use the available tools."}, - ] - - for _step in range(5): - response = client.chat.completions.create(model="gpt-5.4", messages=messages, tools=openai_tools) - choice = response.choices[0] - - if not choice.message.tool_calls: - print(f"\nAnswer: {choice.message.content}") - break - - messages.append(choice.message.model_dump(exclude_none=True)) - for tc in choice.message.tool_calls: - print(f" -> {tc.function.name}({tc.function.arguments[:80]})") - tool = tools.get_tool(tc.function.name) - if tool: - try: - result = tool.execute(json.loads(tc.function.arguments)) - except Exception as e: - result = {"error": str(e)} - messages.append({"role": "tool", "tool_call_id": tc.id, "content": json.dumps(result)}) - - print() - - -def example_with_openai(): - """Example of using search tool with OpenAI""" - print("Example 5: Using search tool with OpenAI\n") - - try: - from openai import OpenAI - - # Initialize OpenAI client - client = OpenAI() - - # Initialize StackOne toolset - toolset = StackOneToolSet(search={}) - - # Search for BambooHR employee tools - tools = toolset.search_tools("manage employees", account_ids=_account_ids, top_k=5) - - # Convert to OpenAI format - openai_tools = tools.to_openai() - - # Create a chat completion with discovered tools - response = client.chat.completions.create( - model="gpt-5.4", - messages=[ - { - "role": "system", - "content": "You are an HR assistant with access to employee management tools.", - }, - {"role": "user", "content": "Can you help me find tools for managing employee records?"}, - ], - tools=openai_tools, - tool_choice="auto", - ) - - print("OpenAI Response:", response.choices[0].message.content) - - if response.choices[0].message.tool_calls: - print("\nTool calls made:") - for tool_call in response.choices[0].message.tool_calls: - print(f" - {tool_call.function.name}") - - except ImportError: - print("OpenAI library not installed. Install with: pip install openai") - except Exception as e: - print(f"OpenAI example failed: {e}") - - print() - - -def example_with_langchain(): - """Example of using tools with LangChain""" - print("Example 6: Using tools with LangChain\n") - - try: - from langchain_core.messages import HumanMessage, ToolMessage - from langchain_openai import ChatOpenAI - except ImportError as e: - print(f"LangChain dependencies not installed: {e}") - print("Install with: pip install langchain-openai") - print() - return - - if not os.getenv("OPENAI_API_KEY"): - print("Skipped: Set OPENAI_API_KEY to run this example.") - print() - return - - toolset = StackOneToolSet(search={}) - - # Search for tools and convert to LangChain format - tools = toolset.search_tools("list employees", account_ids=_account_ids, top_k=5) - langchain_tools = list(tools.to_langchain()) - - print(f"Available tools: {len(langchain_tools)}") - for tool in langchain_tools: - print(f" - {tool.name}") - - # Bind tools to model and run - model = ChatOpenAI(model="gpt-5.4").bind_tools(langchain_tools) - tools_by_name = {t.name: t for t in langchain_tools} - messages = [HumanMessage(content="What employee tools do I have access to?")] +from stackone_ai import StackOneToolSet - for _ in range(5): - response = model.invoke(messages) - if not response.tool_calls: - print(f"\nAnswer: {response.content}") - break +account_id = os.getenv("STACKONE_ACCOUNT_ID", "") +_account_ids = [a.strip() for a in account_id.split(",") if a.strip()] if account_id else [] - messages.append(response) - for tc in response.tool_calls: - print(f" -> {tc['name']}({json.dumps(tc['args'])[:80]})") - tool = tools_by_name[tc["name"]] - try: - result = tool.invoke(tc["args"]) - except Exception as e: - result = {"error": str(e)} - messages.append(ToolMessage(content=json.dumps(result), tool_call_id=tc["id"])) - print() +# --- Example 1: get_search_tool() callable --- +print("=== get_search_tool() callable ===\n") +toolset = StackOneToolSet(search={}) +search_tool = toolset.get_search_tool() -def main(): - """Run all examples""" - print("=" * 60) - print("StackOne AI SDK - Search Tool Examples") - print("=" * 60) - print() +queries = ["cancel an event", "list employees", "send a message"] +for query in queries: + tools = search_tool(query, top_k=3, account_ids=_account_ids) + names = [t.name for t in tools] + print(f' "{query}" -> {", ".join(names) or "(none)"}') - if not os.getenv("STACKONE_API_KEY"): - print("Set STACKONE_API_KEY to run these examples.") - return - if not _account_ids: - print("Set STACKONE_ACCOUNT_ID to run these examples.") - print("(Comma-separated for multiple accounts)") - return +# --- Example 2: Constructor top_k vs per-call override --- +print("\n=== Constructor top_k vs per-call override ===\n") - # Basic examples that work without external APIs - example_search_tool_basic() - example_search_modes() - example_top_k_config() - example_search_tool_with_execution() +toolset_3 = StackOneToolSet(search={"top_k": 3}) +toolset_10 = StackOneToolSet(search={"top_k": 10}) - # Examples that require OpenAI API - if os.getenv("OPENAI_API_KEY"): - example_with_openai() - example_with_langchain() - else: - print("Set OPENAI_API_KEY to run OpenAI and LangChain examples\n") +query = "manage employee records" - print("=" * 60) - print("Examples completed!") - print("=" * 60) +tools_3 = toolset_3.search_tools(query, account_ids=_account_ids) +print(f"Constructor top_k=3: got {len(tools_3)} tools") +tools_10 = toolset_10.search_tools(query, account_ids=_account_ids) +print(f"Constructor top_k=10: got {len(tools_10)} tools") -if __name__ == "__main__": - main() +# Per-call override: constructor says 3 but this call says 10 +tools_override = toolset_3.search_tools(query, top_k=10, account_ids=_account_ids) +print(f"Per-call top_k=10 (overrides constructor 3): got {len(tools_override)} tools") diff --git a/stackone_ai/toolset.py b/stackone_ai/toolset.py index d69c54e..bc24037 100644 --- a/stackone_ai/toolset.py +++ b/stackone_ai/toolset.py @@ -562,7 +562,7 @@ def __init__( base_url: str | None = None, search: SearchConfig | None = None, execute: ExecuteToolsConfig | None = None, - timeout: float = 60.0, + timeout: float | None = None, ) -> None: """Initialize StackOne tools with authentication @@ -580,7 +580,8 @@ def __init__( for tool execution. Pass ``{"account_ids": ["acc-1"]}`` to scope tools to specific accounts. timeout: Request timeout in seconds for tool execution HTTP calls. - Default: 60. Increase for slow providers (e.g. Workday). + Default: 60. Takes precedence over ``execute.timeout`` if set. + Increase for slow providers (e.g. Workday). Raises: ToolsetConfigError: If no API key is provided or found in environment @@ -598,8 +599,8 @@ def __init__( self._semantic_client: SemanticSearchClient | None = None self._search_config: SearchConfig | None = search self._execute_config: ExecuteToolsConfig | None = execute - execute_timeout = execute.get("timeout", 60.0) if execute else 60.0 - self._timeout: float = timeout if timeout != 60.0 else execute_timeout + execute_timeout = execute.get("timeout") if execute else None + self._timeout: float = timeout if timeout is not None else (execute_timeout or 60.0) self._tools_cache: Tools | None = None def set_accounts(self, account_ids: list[str]) -> StackOneToolSet: From 8aa36567350b1935dd272524440c08da57a36ce7 Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 12:45:45 +0100 Subject: [PATCH 6/8] Add working Workday Example --- examples/workday_integration.py | 74 +++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 examples/workday_integration.py diff --git a/examples/workday_integration.py b/examples/workday_integration.py new file mode 100644 index 0000000..ae38ef9 --- /dev/null +++ b/examples/workday_integration.py @@ -0,0 +1,74 @@ +"""Workday integration: timeout and account scoping for slow providers. + +Workday can take 10-15s to respond. This example shows how to configure +timeout and account_ids through the execute config. + +Run with: + uv run python examples/workday_integration.py +""" + +from __future__ import annotations + +import json +import os + +try: + from dotenv import load_dotenv + + load_dotenv() +except ModuleNotFoundError: + pass + +from openai import OpenAI + +from stackone_ai import StackOneToolSet + +account_id = os.getenv("STACKONE_ACCOUNT_ID", "") + +# Timeout and account_ids both live in the execute config +toolset = StackOneToolSet( + search={"method": "auto", "top_k": 5}, + execute={"account_ids": [account_id], "timeout": 120}, +) +client = OpenAI() + + +def run_agent(messages: list[dict], tools: list[dict], max_steps: int = 10) -> None: + """Simple agent loop: call LLM, execute tools, repeat.""" + for _ in range(max_steps): + response = client.chat.completions.create(model="gpt-5.4", messages=messages, tools=tools) + choice = response.choices[0] + + if not choice.message.tool_calls: + print(f"Answer: {choice.message.content}") + return + + messages.append(choice.message.model_dump(exclude_none=True)) + for tc in choice.message.tool_calls: + print(f" -> {tc.function.name}({tc.function.arguments[:80]})") + tool = toolset.execute(tc.function.name, tc.function.arguments) + messages.append({"role": "tool", "tool_call_id": tc.id, "content": json.dumps(tool)}) + + +# --- Example 1: Search and execute mode --- +# LLM gets tool_search + tool_execute, discovers tools autonomously +print("=== Search and execute mode ===\n") +run_agent( + messages=[ + {"role": "system", "content": "Use tool_search to find tools, then tool_execute to run them."}, + {"role": "user", "content": "List the first 5 employees."}, + ], + tools=toolset.openai(mode="search_and_execute"), +) + +# --- Example 2: Normal mode --- +# Fetch specific tools upfront, pass to LLM +print("\n=== Normal mode ===\n") +tools = toolset.fetch_tools(actions=["workday_*_employee*"]) +if len(tools) == 0: + print("No Workday tools found for this account.") +else: + run_agent( + messages=[{"role": "user", "content": "List the first 5 employees."}], + tools=tools.to_openai(), + ) From e0ca81ce999aec8319ab4b8fe8094604332dcff0 Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 12:55:24 +0100 Subject: [PATCH 7/8] Add workday example in the test_examples --- examples/search_tool_example.py | 50 +++++++++-------- examples/test_examples.py | 1 + examples/workday_integration.py | 96 +++++++++++++++++---------------- 3 files changed, 77 insertions(+), 70 deletions(-) diff --git a/examples/search_tool_example.py b/examples/search_tool_example.py index 2618202..0ba286f 100644 --- a/examples/search_tool_example.py +++ b/examples/search_tool_example.py @@ -20,37 +20,41 @@ from stackone_ai import StackOneToolSet -account_id = os.getenv("STACKONE_ACCOUNT_ID", "") -_account_ids = [a.strip() for a in account_id.split(",") if a.strip()] if account_id else [] +def main() -> None: + account_id = os.getenv("STACKONE_ACCOUNT_ID", "") + _account_ids = [a.strip() for a in account_id.split(",") if a.strip()] if account_id else [] -# --- Example 1: get_search_tool() callable --- -print("=== get_search_tool() callable ===\n") + # --- Example 1: get_search_tool() callable --- + print("=== get_search_tool() callable ===\n") -toolset = StackOneToolSet(search={}) -search_tool = toolset.get_search_tool() + toolset = StackOneToolSet(search={}) + search_tool = toolset.get_search_tool() -queries = ["cancel an event", "list employees", "send a message"] -for query in queries: - tools = search_tool(query, top_k=3, account_ids=_account_ids) - names = [t.name for t in tools] - print(f' "{query}" -> {", ".join(names) or "(none)"}') + queries = ["cancel an event", "list employees", "send a message"] + for query in queries: + tools = search_tool(query, top_k=3, account_ids=_account_ids) + names = [t.name for t in tools] + print(f' "{query}" -> {", ".join(names) or "(none)"}') + # --- Example 2: Constructor top_k vs per-call override --- + print("\n=== Constructor top_k vs per-call override ===\n") -# --- Example 2: Constructor top_k vs per-call override --- -print("\n=== Constructor top_k vs per-call override ===\n") + toolset_3 = StackOneToolSet(search={"top_k": 3}) + toolset_10 = StackOneToolSet(search={"top_k": 10}) -toolset_3 = StackOneToolSet(search={"top_k": 3}) -toolset_10 = StackOneToolSet(search={"top_k": 10}) + query = "manage employee records" -query = "manage employee records" + tools_3 = toolset_3.search_tools(query, account_ids=_account_ids) + print(f"Constructor top_k=3: got {len(tools_3)} tools") -tools_3 = toolset_3.search_tools(query, account_ids=_account_ids) -print(f"Constructor top_k=3: got {len(tools_3)} tools") + tools_10 = toolset_10.search_tools(query, account_ids=_account_ids) + print(f"Constructor top_k=10: got {len(tools_10)} tools") -tools_10 = toolset_10.search_tools(query, account_ids=_account_ids) -print(f"Constructor top_k=10: got {len(tools_10)} tools") + # Per-call override: constructor says 3 but this call says 10 + tools_override = toolset_3.search_tools(query, top_k=10, account_ids=_account_ids) + print(f"Per-call top_k=10 (overrides constructor 3): got {len(tools_override)} tools") -# Per-call override: constructor says 3 but this call says 10 -tools_override = toolset_3.search_tools(query, top_k=10, account_ids=_account_ids) -print(f"Per-call top_k=10 (overrides constructor 3): got {len(tools_override)} tools") + +if __name__ == "__main__": + main() diff --git a/examples/test_examples.py b/examples/test_examples.py index 8a4c899..4a966d1 100644 --- a/examples/test_examples.py +++ b/examples/test_examples.py @@ -33,6 +33,7 @@ def get_example_files() -> list[str]: "search_tool_example.py": ["mcp"], "semantic_search_example.py": ["mcp"], "mcp_server.py": ["mcp"], + "workday_integration.py": ["openai", "mcp"], } diff --git a/examples/workday_integration.py b/examples/workday_integration.py index ae38ef9..8cba251 100644 --- a/examples/workday_integration.py +++ b/examples/workday_integration.py @@ -23,52 +23,54 @@ from stackone_ai import StackOneToolSet -account_id = os.getenv("STACKONE_ACCOUNT_ID", "") - -# Timeout and account_ids both live in the execute config -toolset = StackOneToolSet( - search={"method": "auto", "top_k": 5}, - execute={"account_ids": [account_id], "timeout": 120}, -) -client = OpenAI() - - -def run_agent(messages: list[dict], tools: list[dict], max_steps: int = 10) -> None: - """Simple agent loop: call LLM, execute tools, repeat.""" - for _ in range(max_steps): - response = client.chat.completions.create(model="gpt-5.4", messages=messages, tools=tools) - choice = response.choices[0] - - if not choice.message.tool_calls: - print(f"Answer: {choice.message.content}") - return - - messages.append(choice.message.model_dump(exclude_none=True)) - for tc in choice.message.tool_calls: - print(f" -> {tc.function.name}({tc.function.arguments[:80]})") - tool = toolset.execute(tc.function.name, tc.function.arguments) - messages.append({"role": "tool", "tool_call_id": tc.id, "content": json.dumps(tool)}) - - -# --- Example 1: Search and execute mode --- -# LLM gets tool_search + tool_execute, discovers tools autonomously -print("=== Search and execute mode ===\n") -run_agent( - messages=[ - {"role": "system", "content": "Use tool_search to find tools, then tool_execute to run them."}, - {"role": "user", "content": "List the first 5 employees."}, - ], - tools=toolset.openai(mode="search_and_execute"), -) - -# --- Example 2: Normal mode --- -# Fetch specific tools upfront, pass to LLM -print("\n=== Normal mode ===\n") -tools = toolset.fetch_tools(actions=["workday_*_employee*"]) -if len(tools) == 0: - print("No Workday tools found for this account.") -else: + +def main() -> None: + account_id = os.getenv("STACKONE_ACCOUNT_ID", "") + + # Timeout and account_ids both live in the execute config + toolset = StackOneToolSet( + search={"method": "auto", "top_k": 5}, + execute={"account_ids": [account_id], "timeout": 120}, + ) + client = OpenAI() + + def run_agent(messages: list[dict], tools: list[dict], max_steps: int = 10) -> None: + """Simple agent loop: call LLM, execute tools, repeat.""" + for _ in range(max_steps): + response = client.chat.completions.create(model="gpt-5.4", messages=messages, tools=tools) + choice = response.choices[0] + + if not choice.message.tool_calls: + print(f"Answer: {choice.message.content}") + return + + messages.append(choice.message.model_dump(exclude_none=True)) + for tc in choice.message.tool_calls: + print(f" -> {tc.function.name}({tc.function.arguments[:80]})") + result = toolset.execute(tc.function.name, tc.function.arguments) + messages.append({"role": "tool", "tool_call_id": tc.id, "content": json.dumps(result)}) + + # --- Example 1: Search and execute mode --- + print("=== Search and execute mode ===\n") run_agent( - messages=[{"role": "user", "content": "List the first 5 employees."}], - tools=tools.to_openai(), + messages=[ + {"role": "system", "content": "Use tool_search to find tools, then tool_execute to run them."}, + {"role": "user", "content": "List the first 5 employees."}, + ], + tools=toolset.openai(mode="search_and_execute"), ) + + # --- Example 2: Normal mode --- + print("\n=== Normal mode ===\n") + tools = toolset.fetch_tools(actions=["workday_*_employee*"]) + if len(tools) == 0: + print("No Workday tools found for this account.") + else: + run_agent( + messages=[{"role": "user", "content": "List the first 5 employees."}], + tools=tools.to_openai(), + ) + + +if __name__ == "__main__": + main() From 04b8bd0e9726fd99bc5bf3e8462b1b2500189260 Mon Sep 17 00:00:00 2001 From: Shashi-Stackone Date: Tue, 7 Apr 2026 14:43:05 +0100 Subject: [PATCH 8/8] Add the STACKONE_API_KEY too in example --- examples/search_tool_example.py | 30 ++++++++++++++++++------------ examples/workday_integration.py | 21 ++++++++++++++++++--- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/examples/search_tool_example.py b/examples/search_tool_example.py index 0ba286f..21f8882 100644 --- a/examples/search_tool_example.py +++ b/examples/search_tool_example.py @@ -3,6 +3,10 @@ For semantic search basics, see semantic_search_example.py. For full agent execution, see agent_tool_search.py. +Prerequisites: + - STACKONE_API_KEY environment variable + - STACKONE_ACCOUNT_ID environment variable + Run with: uv run python examples/search_tool_example.py """ @@ -22,37 +26,39 @@ def main() -> None: - account_id = os.getenv("STACKONE_ACCOUNT_ID", "") - _account_ids = [a.strip() for a in account_id.split(",") if a.strip()] if account_id else [] + api_key = os.getenv("STACKONE_API_KEY") + account_id = os.getenv("STACKONE_ACCOUNT_ID") + + if not api_key: + print("Set STACKONE_API_KEY to run this example.") + return + if not account_id: + print("Set STACKONE_ACCOUNT_ID to run this example.") + return # --- Example 1: get_search_tool() callable --- print("=== get_search_tool() callable ===\n") - toolset = StackOneToolSet(search={}) + toolset = StackOneToolSet(api_key=api_key, account_id=account_id, search={}) search_tool = toolset.get_search_tool() queries = ["cancel an event", "list employees", "send a message"] for query in queries: - tools = search_tool(query, top_k=3, account_ids=_account_ids) + tools = search_tool(query, top_k=3) names = [t.name for t in tools] print(f' "{query}" -> {", ".join(names) or "(none)"}') # --- Example 2: Constructor top_k vs per-call override --- print("\n=== Constructor top_k vs per-call override ===\n") - toolset_3 = StackOneToolSet(search={"top_k": 3}) - toolset_10 = StackOneToolSet(search={"top_k": 10}) + toolset_3 = StackOneToolSet(api_key=api_key, account_id=account_id, search={"top_k": 3}) query = "manage employee records" - tools_3 = toolset_3.search_tools(query, account_ids=_account_ids) + tools_3 = toolset_3.search_tools(query) print(f"Constructor top_k=3: got {len(tools_3)} tools") - tools_10 = toolset_10.search_tools(query, account_ids=_account_ids) - print(f"Constructor top_k=10: got {len(tools_10)} tools") - - # Per-call override: constructor says 3 but this call says 10 - tools_override = toolset_3.search_tools(query, top_k=10, account_ids=_account_ids) + tools_override = toolset_3.search_tools(query, top_k=10) print(f"Per-call top_k=10 (overrides constructor 3): got {len(tools_override)} tools") diff --git a/examples/workday_integration.py b/examples/workday_integration.py index 8cba251..8f89361 100644 --- a/examples/workday_integration.py +++ b/examples/workday_integration.py @@ -3,6 +3,11 @@ Workday can take 10-15s to respond. This example shows how to configure timeout and account_ids through the execute config. +Prerequisites: + - STACKONE_API_KEY environment variable + - STACKONE_ACCOUNT_ID environment variable (a Workday-connected account) + - OPENAI_API_KEY environment variable + Run with: uv run python examples/workday_integration.py """ @@ -25,12 +30,22 @@ def main() -> None: - account_id = os.getenv("STACKONE_ACCOUNT_ID", "") + api_key = os.getenv("STACKONE_API_KEY") + account_id = os.getenv("STACKONE_ACCOUNT_ID") + + if not api_key: + print("Set STACKONE_API_KEY to run this example.") + return + if not account_id: + print("Set STACKONE_ACCOUNT_ID to run this example.") + return - # Timeout and account_ids both live in the execute config + # Timeout for slow providers, account_id for scoping toolset = StackOneToolSet( + api_key=api_key, + account_id=account_id, search={"method": "auto", "top_k": 5}, - execute={"account_ids": [account_id], "timeout": 120}, + timeout=120, ) client = OpenAI()