Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions .speakeasy/in.openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7909,6 +7909,30 @@ components:
type: number
description: Video input tokens
description: Detailed prompt token usage
cost:
type: number
nullable: true
description: Cost of the completion in USD
is_byok:
type: boolean
description: Whether a request was made using a Bring Your Own Key configuration
cost_details:
type: object
nullable: true
properties:
upstream_inference_cost:
type: number
nullable: true
description: Total cost charged by the upstream provider
upstream_inference_prompt_cost:
type: number
nullable: true
description: Upstream prompt (input) token cost
upstream_inference_completions_cost:
type: number
nullable: true
description: Upstream completions (output) token cost
description: Detailed cost breakdown
required:
- completion_tokens
- prompt_tokens
Expand Down
53 changes: 53 additions & 0 deletions src/models/chatgenerationtokenusage.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,24 @@ export type PromptTokensDetails = {
videoTokens?: number | undefined;
};

/**
* Detailed cost breakdown
*/
export type ChatGenerationCostDetails = {
/**
* Total cost charged by the upstream provider
*/
upstreamInferenceCost?: number | null | undefined;
/**
* Upstream prompt (input) token cost
*/
upstreamInferencePromptCost?: number | null | undefined;
/**
* Upstream completions (output) token cost
*/
upstreamInferenceCompletionsCost?: number | null | undefined;
};

/**
* Token usage statistics
*/
Expand All @@ -77,6 +95,18 @@ export type ChatGenerationTokenUsage = {
* Detailed prompt token usage
*/
promptTokensDetails?: PromptTokensDetails | null | undefined;
/**
* Cost of the completion in USD
*/
cost?: number | null | undefined;
/**
* Whether a request was made using a Bring Your Own Key configuration
*/
isByok?: boolean | undefined;
/**
* Detailed cost breakdown
*/
costDetails?: ChatGenerationCostDetails | null | undefined;
};

/** @internal */
Expand Down Expand Up @@ -135,6 +165,22 @@ export function promptTokensDetailsFromJSON(
);
}

/** @internal */
export const ChatGenerationCostDetails$inboundSchema: z.ZodType<
ChatGenerationCostDetails,
unknown
> = z.object({
upstream_inference_cost: z.nullable(z.number()).optional(),
upstream_inference_prompt_cost: z.nullable(z.number()).optional(),
upstream_inference_completions_cost: z.nullable(z.number()).optional(),
}).transform((v) => {
return remap$(v, {
"upstream_inference_cost": "upstreamInferenceCost",
"upstream_inference_prompt_cost": "upstreamInferencePromptCost",
"upstream_inference_completions_cost": "upstreamInferenceCompletionsCost",
});
});

/** @internal */
export const ChatGenerationTokenUsage$inboundSchema: z.ZodType<
ChatGenerationTokenUsage,
Expand All @@ -149,13 +195,20 @@ export const ChatGenerationTokenUsage$inboundSchema: z.ZodType<
prompt_tokens_details: z.nullable(
z.lazy(() => PromptTokensDetails$inboundSchema),
).optional(),
cost: z.nullable(z.number()).optional(),
is_byok: z.boolean().optional(),
cost_details: z.nullable(
z.lazy(() => ChatGenerationCostDetails$inboundSchema),
).optional(),
}).transform((v) => {
return remap$(v, {
"completion_tokens": "completionTokens",
"prompt_tokens": "promptTokens",
"total_tokens": "totalTokens",
"completion_tokens_details": "completionTokensDetails",
"prompt_tokens_details": "promptTokensDetails",
"is_byok": "isByok",
"cost_details": "costDetails",
});
});

Expand Down