From fe8ffc71b0e2d95802c96ef3426a8ca2510c95c6 Mon Sep 17 00:00:00 2001 From: Tomas Weiss Date: Wed, 21 Jan 2026 18:33:23 +0100 Subject: [PATCH 1/3] feat: basic agentstack overview skill --- .claude/skills/agentstack-overview/SKILL.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .claude/skills/agentstack-overview/SKILL.md diff --git a/.claude/skills/agentstack-overview/SKILL.md b/.claude/skills/agentstack-overview/SKILL.md new file mode 100644 index 0000000000..e14edc7ef9 --- /dev/null +++ b/.claude/skills/agentstack-overview/SKILL.md @@ -0,0 +1,16 @@ +--- +name: agentstack-overview +description: Provides basic insight into what the main goal of Agent Stack is, how it's structured, and how it can be used. +--- + +## Agent Stack Goal + +Agent Stack is a platform that provides infrastructure for developing and running AI agents. Agent builders can wrap their agent code with the Agent Stack SDK. The SDK creates a thin HTTP wrapper that exposes the [A2A - Agent to Agent](https://a2a-protocol.org/latest/specification) protocol, which is enhanced with custom [extensions](https://a2a-protocol.org/latest/topics/extensions/). + +The exposed HTTP server is then registered with the AgentStack server. The Agent Stack UI then provides a GUI (or CLI) to run the agent. + +## Agent Stack Personas + +### Agent Builder + +Typically a Python developer who either has an existing agent implemented in any framework (e.g., BeeAI, LangGraph, CrewAI) or is building an agent from scratch. This person wants to focus on building the agent, not on the interface, integration, or authentication. Agent Stack provides a quick and functional UI for their agents that they can use for local development, testing, or sharing with others. From 5011215d2d4ea872097e66dc1ad67a7334d7be13 Mon Sep 17 00:00:00 2001 From: Tomas Weiss Date: Wed, 21 Jan 2026 18:34:55 +0100 Subject: [PATCH 2/3] chore: basic skills for agentstack SDK Signed-off-by: Tomas Weiss --- .../agentstack-client-sdk-essentials/SKILL.MD | 245 ++++++++++++++++++ 1 file changed, 245 insertions(+) create mode 100644 .claude/skills/agentstack-client-sdk-essentials/SKILL.MD diff --git a/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD b/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD new file mode 100644 index 0000000000..7a55ae72ed --- /dev/null +++ b/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD @@ -0,0 +1,245 @@ +--- +name: agentstack-client-sdk-essentials +description: Basic implementation guidelines how to work with Agent Stack client SDK +--- + +Custom GUI integration with Agent Stack requires dependency on both [A2A](https://a2a-protocol.org/latest/specification) as well as [Agent Stack](https://agentstack.beeai.dev/llms.txt). + +You need to install both: + +```bash +pnpm add @a2a-js/sdk agentstack-sdk +``` + +## Create A2A Client + +Agent Stack SDK is extending [A2A - Agent to Agent](https://a2a-protocol.org/latest/specification) protocol. The first step is creating proper instance of A2A client. + +Which can be done as follows: + +```ts +import { ClientFactory } from '@a2a-js/sdk/client'; + +// UUID of the agent +const agentId = '548cd604-ce87-4ca4-b988-68357ca4cc40' + +// Agent Stack runs on http://localhost:8334/ by default +const agentstackUrl = 'http://localhost:8334' + +// Agent URL passed to A2A client must ALWAYS be absolute +const agentUrl = `${agentstackUrl}/api/v1/a2a/${agentId}/agent-card.json`; + +const factory = new ClientFactory(); +const a2aClient = await factory.createFromUrl(agentUrl); +``` + +## Get list of available agents + +You need to know the agent UUID in advance. The easiest way to obtain it is via Agent Stack API client which is a wrapper around Agent Stack server API. + +Agents are also called Providers. + +```ts +import { buildApiClient } from 'agentstack-sdk'; + +const agentstack = buildApiClient({ baseUrl: 'http://localhost:8334' }); + +// There are some filter options to query providers +const providers = await agentstack.listProviders({ + query: {} +}) + +if (providers.ok) { + // You will get paginated result + providers.data.items.forEach(provider => { + console.log(`The agent is named ${provider.agent_card.name} with id ${provider.id}`) + }) +} +``` + +## Multi turn (chat) conversation with the agent + +Using A2A client to achieve multi turn (Chat) conversation with agent is relatively simple. + +```ts +import type { + Message, + TaskArtifactUpdateEvent, + TaskStatusUpdateEvent, +} from '@a2a-js/sdk'; + +const conversationHistory: Message[] = [] + +// This identifies the session - conversation - context. Basically all the communication is correlated by the context id +// So it has to be used throughout the whole conversation +const contextId = crypto.randomUUID() + +const clientPrompt = 'This is the initial message from user' + +const clientMessage: Message = { + messageId: crypto.randomUUID(), + role: 'user', + parts: [{ kind: 'text', text: clientPrompt }], + kind: 'message', + contextId, + metadata: {} +}; + +const messageStream = client.sendMessageStream({ message: clientMessage }); + +// Ensures that the event coming from the stream is TaskStatusUpdateEvent +function isStatusUpdate(event: unknown): event is TaskStatusUpdateEvent { + return ( + typeof event === 'object' && + event !== null && + 'kind' in event && + (event as { kind: string }).kind === 'status-update' + ); +} + +let agentReply = '' +for await (const event of messageStream) { + // Agent Stack SDK uses TaskStatusUpdateEvent to emit + if (isStatusUpdate(event) && event.status?.message?.parts) { + for (const part of event.status.message.parts) { + if (part.kind === 'text') { + // Each part contains either whole string produced by agent + // or a token + agentReply += part.text + } + } + } +} + +// When stream is terminated, agent stopped generation +console.log(`Client prompted ${clientPrompt} and agent responded with ${agentReply}) +``` + +## Agent Stack Extensions + +Agent Stack is extending the A2A with extensions. They are advertised via Agent Card in A2A protocol and then supplied via Message metadata. + +Agents declare demands via agent card extensions. Client fulfills demands using dependency injection: + +1. Fetch agent card from `/.well-known/agent-card.json` +2. Figure out the demands in the Agent Card +3. Fulfill the demands in the client +4. Assemble and send the fulfillment via Message metadata. + + +### Generate Context Token to access Agent Stack API via Agent + +Most of the extensions might require communication with the Agent Stack API. For example Agent Stack provides OpenAI compatible endpoints for LLM inference that can be used by the agents. The agent then usually needs three values: model, url of the endpoint and API key. + +Let's see how you can obtain the token: + +```ts +import { buildApiClient } from 'agentstack-sdk'; + +const agentstack = buildApiClient({ baseUrl: 'http://localhost:8334' }); + +// UUID of the agent +const agentId = '548cd604-ce87-4ca4-b988-68357ca4cc40' + +// Create working context (session) for the agent +const context = await api.createContext(agentId); + + +// Create token with scope of given permissions +const { token } = await api.createContextToken({ + contextId: context.id, + + // Global Permissions are permissions to global resources + globalPermissions: { llm: ['*'], a2a_proxy: ['*'] }, + + // Context Permissions are scoped to the specified context (session) - eg agent will be + // able to work with files only inside this conversation + contextPermissions: { + files: ['*'], + vector_stores: ['*'], + context_data: ['*'], + }, +}); + +// It is absolutely vital to use the contextID as generated id from Agent Stack +const clientMessage: Message = { + messageId: crypto.randomUUID(), + role: 'user', + parts: [{ kind: 'text', text: 'Text' }], + kind: 'message', + contextId, + metadata: {} +}; + +``` + +Generating the token needs to happen with existing Context in the Agent Stack, it's absolutely VITAL for you to use the context id in A2A messages. + +Having the context token is crucial for any other extension resolution. + +### Example of how fulfill LLM extension + +```ts +import { + buildApiClient, + handleAgentCard, + buildLLMExtensionFulfillmentResolver, + type Fulfillments, +} from 'agentstack-sdk'; + +const agentCard = await a2aClient.getAgentCard(); + +// Extract demands from A2A Agent card +const { resolveMetadata, demands } = handleAgentCard(agentCard); + +// Build fulfillments +const fulfillments: Fulfillments = { + + // You always have to provide context token + getContextToken: () => token, + + llm: async (demand) => { + return { + llm_fulfillments: Object.entries(demand.llm_demands).reduce((memo, [demandKey]) => { + return { + ...memo, + // This can be resolved however you want + [demandKey]: { + api_model: 'gpt5', + api_base: 'http://openai-endpoint', + api_key: 'API_KEY' + } + } + + }, {}) + } + } +}; + +// Resolve the metadata to be added to each message +const metadata = await resolveMetadata(fulfillments); + +const message: Message = { + messageId: crypto.randomUUID(), + role: 'user', + parts: [{ kind: 'text', text: 'Message content' }], + kind: 'message', + contextId, + + // Add metadata to the message + metadata, +}; + +const stream = a2aClient.sendMessageStream({ message }); + +``` + +The Agent Stack provides LLM OpenAI compatible service that can be easily used and automatically resolved. + +```ts +import { buildLLMExtensionFulfillmentResolver } from 'agentstack-sdk'; + +if (demands.llmDemands) { + fulfillments.llm = buildLLMExtensionFulfillmentResolver(agentstackApi, contextToken); +} +``` \ No newline at end of file From 8c72b0d7aa918036c2371c1198a873aac05658be Mon Sep 17 00:00:00 2001 From: Tomas Weiss Date: Wed, 21 Jan 2026 20:02:38 +0100 Subject: [PATCH 3/3] chore: improving the skills Signed-off-by: Tomas Weiss --- .../agentstack-client-sdk-essentials/SKILL.MD | 160 ++++++++---------- 1 file changed, 66 insertions(+), 94 deletions(-) diff --git a/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD b/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD index 7a55ae72ed..5eeed13890 100644 --- a/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD +++ b/.claude/skills/agentstack-client-sdk-essentials/SKILL.MD @@ -11,28 +11,6 @@ You need to install both: pnpm add @a2a-js/sdk agentstack-sdk ``` -## Create A2A Client - -Agent Stack SDK is extending [A2A - Agent to Agent](https://a2a-protocol.org/latest/specification) protocol. The first step is creating proper instance of A2A client. - -Which can be done as follows: - -```ts -import { ClientFactory } from '@a2a-js/sdk/client'; - -// UUID of the agent -const agentId = '548cd604-ce87-4ca4-b988-68357ca4cc40' - -// Agent Stack runs on http://localhost:8334/ by default -const agentstackUrl = 'http://localhost:8334' - -// Agent URL passed to A2A client must ALWAYS be absolute -const agentUrl = `${agentstackUrl}/api/v1/a2a/${agentId}/agent-card.json`; - -const factory = new ClientFactory(); -const a2aClient = await factory.createFromUrl(agentUrl); -``` - ## Get list of available agents You need to know the agent UUID in advance. The easiest way to obtain it is via Agent Stack API client which is a wrapper around Agent Stack server API. @@ -44,19 +22,79 @@ import { buildApiClient } from 'agentstack-sdk'; const agentstack = buildApiClient({ baseUrl: 'http://localhost:8334' }); -// There are some filter options to query providers const providers = await agentstack.listProviders({ query: {} }) if (providers.ok) { - // You will get paginated result providers.data.items.forEach(provider => { console.log(`The agent is named ${provider.agent_card.name} with id ${provider.id}`) }) } ``` +## Create Context and Token + +Before creating A2A client, you must create a context (session) in Agent Stack. The context ID is used to correlate all communication throughout the conversation. It is also used for A2A client to communicate with the agents. + +```ts +import { buildApiClient } from 'agentstack-sdk'; + +const agentstack = buildApiClient({ baseUrl: 'http://localhost:8334' }); + +const agentId = '548cd604-ce87-4ca4-b988-68357ca4cc40' + +const context = await agentstack.createContext(agentId); + +const { token } = await agentstack.createContextToken({ + contextId: context.id, + + globalPermissions: { llm: ['*'], a2a_proxy: ['*'] }, + + contextPermissions: { + files: ['*'], + vector_stores: ['*'], + context_data: ['*'], + }, +}); + +const contextId = context.id; +``` + +The `contextId` from Agent Stack MUST be used in all A2A messages. Do not generate your own UUID. + +## Create A2A Client + +Agent Stack SDK extends [A2A - Agent to Agent](https://a2a-protocol.org/latest/specification) protocol. + +You need to create the client with a caveat that we need to provide fetch implemneation that packs in context token as authorization header. + +Agent Stack SDK exposes helper to build the fetch `createAuthenticatedFetch` + +```ts +import { + ClientFactory, + ClientFactoryOptions, + DefaultAgentCardResolver, + JsonRpcTransportFactory, +} from '@a2a-js/sdk/client'; +import { createAuthenticatedFetch } from "agentstack-sdk"; + +const agentstackUrl = 'http://localhost:8334' + +const agentUrl = `${agentstackUrl}/api/v1/a2a/${agentId}/agent-card.json`; + +const fetchImpl = createAuthenticatedFetch(token); + +const factory = new ClientFactory( + ClientFactoryOptions.createFrom(ClientFactoryOptions.default, { + transports: [new JsonRpcTransportFactory({ fetchImpl })], + cardResolver: new DefaultAgentCardResolver({ fetchImpl }), + }), +); +const a2aClient = await factory.createFromUrl(agentUrl); +``` + ## Multi turn (chat) conversation with the agent Using A2A client to achieve multi turn (Chat) conversation with agent is relatively simple. @@ -70,10 +108,6 @@ import type { const conversationHistory: Message[] = [] -// This identifies the session - conversation - context. Basically all the communication is correlated by the context id -// So it has to be used throughout the whole conversation -const contextId = crypto.randomUUID() - const clientPrompt = 'This is the initial message from user' const clientMessage: Message = { @@ -85,9 +119,8 @@ const clientMessage: Message = { metadata: {} }; -const messageStream = client.sendMessageStream({ message: clientMessage }); +const messageStream = a2aClient.sendMessageStream({ message: clientMessage }); -// Ensures that the event coming from the stream is TaskStatusUpdateEvent function isStatusUpdate(event: unknown): event is TaskStatusUpdateEvent { return ( typeof event === 'object' && @@ -99,25 +132,21 @@ function isStatusUpdate(event: unknown): event is TaskStatusUpdateEvent { let agentReply = '' for await (const event of messageStream) { - // Agent Stack SDK uses TaskStatusUpdateEvent to emit if (isStatusUpdate(event) && event.status?.message?.parts) { for (const part of event.status.message.parts) { if (part.kind === 'text') { - // Each part contains either whole string produced by agent - // or a token agentReply += part.text } } } } -// When stream is terminated, agent stopped generation console.log(`Client prompted ${clientPrompt} and agent responded with ${agentReply}) ``` ## Agent Stack Extensions -Agent Stack is extending the A2A with extensions. They are advertised via Agent Card in A2A protocol and then supplied via Message metadata. +Agent Stack extends A2A with extensions. They are advertised via Agent Card and supplied via Message metadata. Agents declare demands via agent card extensions. Client fulfills demands using dependency injection: @@ -126,57 +155,6 @@ Agents declare demands via agent card extensions. Client fulfills demands using 3. Fulfill the demands in the client 4. Assemble and send the fulfillment via Message metadata. - -### Generate Context Token to access Agent Stack API via Agent - -Most of the extensions might require communication with the Agent Stack API. For example Agent Stack provides OpenAI compatible endpoints for LLM inference that can be used by the agents. The agent then usually needs three values: model, url of the endpoint and API key. - -Let's see how you can obtain the token: - -```ts -import { buildApiClient } from 'agentstack-sdk'; - -const agentstack = buildApiClient({ baseUrl: 'http://localhost:8334' }); - -// UUID of the agent -const agentId = '548cd604-ce87-4ca4-b988-68357ca4cc40' - -// Create working context (session) for the agent -const context = await api.createContext(agentId); - - -// Create token with scope of given permissions -const { token } = await api.createContextToken({ - contextId: context.id, - - // Global Permissions are permissions to global resources - globalPermissions: { llm: ['*'], a2a_proxy: ['*'] }, - - // Context Permissions are scoped to the specified context (session) - eg agent will be - // able to work with files only inside this conversation - contextPermissions: { - files: ['*'], - vector_stores: ['*'], - context_data: ['*'], - }, -}); - -// It is absolutely vital to use the contextID as generated id from Agent Stack -const clientMessage: Message = { - messageId: crypto.randomUUID(), - role: 'user', - parts: [{ kind: 'text', text: 'Text' }], - kind: 'message', - contextId, - metadata: {} -}; - -``` - -Generating the token needs to happen with existing Context in the Agent Stack, it's absolutely VITAL for you to use the context id in A2A messages. - -Having the context token is crucial for any other extension resolution. - ### Example of how fulfill LLM extension ```ts @@ -189,13 +167,10 @@ import { const agentCard = await a2aClient.getAgentCard(); -// Extract demands from A2A Agent card const { resolveMetadata, demands } = handleAgentCard(agentCard); -// Build fulfillments const fulfillments: Fulfillments = { - // You always have to provide context token getContextToken: () => token, llm: async (demand) => { @@ -203,7 +178,6 @@ const fulfillments: Fulfillments = { llm_fulfillments: Object.entries(demand.llm_demands).reduce((memo, [demandKey]) => { return { ...memo, - // This can be resolved however you want [demandKey]: { api_model: 'gpt5', api_base: 'http://openai-endpoint', @@ -216,7 +190,6 @@ const fulfillments: Fulfillments = { } }; -// Resolve the metadata to be added to each message const metadata = await resolveMetadata(fulfillments); const message: Message = { @@ -226,7 +199,6 @@ const message: Message = { kind: 'message', contextId, - // Add metadata to the message metadata, }; @@ -240,6 +212,6 @@ The Agent Stack provides LLM OpenAI compatible service that can be easily used a import { buildLLMExtensionFulfillmentResolver } from 'agentstack-sdk'; if (demands.llmDemands) { - fulfillments.llm = buildLLMExtensionFulfillmentResolver(agentstackApi, contextToken); + fulfillments.llm = buildLLMExtensionFulfillmentResolver(agentstack, token); } -``` \ No newline at end of file +```