Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
"prepublishOnly": "npm run build:bundle"
},
"dependencies": {
"@google/genai": "^1.37.0",
"@google/genai": "^1.39.0",
"@modelcontextprotocol/sdk": "^1.26.0",
"google-auth-library": "^10.3.0",
"lodash-es": "^4.17.23",
Expand Down
2 changes: 1 addition & 1 deletion core/src/agents/content_processor_utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ function mergeFunctionResponseEvents(events: Event[]): Event {
* Rearrange the async functionResponse events in the history.
*/
function rearrangeEventsForLatestFunctionResponse(events: Event[]): Event[] {
if (events.length === 0) {
if (events.length < 2) {
return events;
}

Expand Down
9 changes: 4 additions & 5 deletions core/src/agents/llm_agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1627,11 +1627,10 @@ export class LlmAgent extends BaseAgent {
yield event;
}

if (!lastEvent || isFinalResponse(lastEvent)) {
break;
}
if (lastEvent.partial) {
logger.warn('The last event is partial, which is not expected.');
if (!lastEvent || isFinalResponse(lastEvent) || lastEvent.partial) {
if (lastEvent?.partial) {
logger.warn('The last event is partial, which is not expected.');
}
break;
}
}
Expand Down
18 changes: 18 additions & 0 deletions core/src/features/index.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/

export enum FeatureName {
PROGRESSIVE_SSE_STREAMING = 'PROGRESSIVE_SSE_STREAMING',
}

export function isFeatureEnabled(featureName: FeatureName): boolean {
switch (featureName) {
case FeatureName.PROGRESSIVE_SSE_STREAMING:
return true;
default:
return false;
}
}
79 changes: 11 additions & 68 deletions core/src/models/google_llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,10 @@
* SPDX-License-Identifier: Apache-2.0
*/

import {
Blob,
createPartFromText,
FileData,
FinishReason,
GenerateContentResponse,
GoogleGenAI,
Part,
} from '@google/genai';
import {Blob, createPartFromText, FileData, GoogleGenAI} from '@google/genai';

import {logger} from '../utils/logger.js';
import {StreamingResponseAggregator} from '../utils/streaming_utils.js';
import {GoogleLLMVariant} from '../utils/variant_utils.js';

import {BaseLlm} from './base_llm.js';
Expand Down Expand Up @@ -172,72 +165,22 @@ export class Gemini extends BaseLlm {
}

if (stream) {
const streamResult = await this.apiClient.models.generateContentStream({
const responses = await this.apiClient.models.generateContentStream({
model: llmRequest.model ?? this.model,
contents: llmRequest.contents,
config: llmRequest.config,
});
let thoughtText = '';
let text = '';
let usageMetadata;
let lastResponse: GenerateContentResponse | undefined;

// TODO - b/425992518: verify the type of streaming response is correct.
for await (const response of streamResult) {
lastResponse = response;
const llmResponse = createLlmResponse(response);
usageMetadata = llmResponse.usageMetadata;
const firstPart = llmResponse.content?.parts?.[0];
// Accumulates the text and thought text from the first part.
if (firstPart?.text) {
if ('thought' in firstPart && firstPart.thought) {
thoughtText += firstPart.text;
} else {
text += firstPart.text;
}
llmResponse.partial = true;
} else if (
(thoughtText || text) &&
(!firstPart || !firstPart.inlineData)
) {
// Flushes the data if there's no more text.
const parts: Part[] = [];
if (thoughtText) {
parts.push({text: thoughtText, thought: true});
}
if (text) {
parts.push(createPartFromText(text));
}
yield {
content: {
role: 'model',
parts,
},
usageMetadata: llmResponse.usageMetadata,
};
thoughtText = '';
text = '';
const aggregator = new StreamingResponseAggregator();
for await (const response of responses) {
for await (const llmResponse of aggregator.processResponse(response)) {
yield llmResponse;
}
yield llmResponse;
}
if (
(text || thoughtText) &&
lastResponse?.candidates?.[0]?.finishReason === FinishReason.STOP
) {
const parts: Part[] = [];
if (thoughtText) {
parts.push({text: thoughtText, thought: true} as Part);
}
if (text) {
parts.push({text: text});
}
yield {
content: {
role: 'model',
parts,
},
usageMetadata,
};

const closeResult = aggregator.close();
if (closeResult) {
yield closeResult;
}
} else {
const response = await this.apiClient.models.generateContent({
Expand Down
Loading
Loading