summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--packages/core/src/core/geminiChat.ts86
-rw-r--r--packages/core/src/core/loggingContentGenerator.ts114
2 files changed, 111 insertions, 89 deletions
diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts
index 27b34d87..cef82d23 100644
--- a/packages/core/src/core/geminiChat.ts
+++ b/packages/core/src/core/geminiChat.ts
@@ -14,15 +14,12 @@ import {
SendMessageParameters,
createUserContent,
Part,
- GenerateContentResponseUsageMetadata,
Tool,
} from '@google/genai';
import { retryWithBackoff } from '../utils/retry.js';
import { isFunctionResponse } from '../utils/messageInspectors.js';
import { ContentGenerator, AuthType } from './contentGenerator.js';
import { Config } from '../config/config.js';
-import { logApiResponse, logApiError } from '../telemetry/loggers.js';
-import { ApiErrorEvent, ApiResponseEvent } from '../telemetry/types.js';
import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js';
import { hasCycleInSchema } from '../tools/tools.js';
import { StructuredError } from './turn.js';
@@ -131,46 +128,6 @@ export class GeminiChat {
validateHistory(history);
}
- private async _logApiResponse(
- durationMs: number,
- prompt_id: string,
- usageMetadata?: GenerateContentResponseUsageMetadata,
- responseText?: string,
- ): Promise<void> {
- logApiResponse(
- this.config,
- new ApiResponseEvent(
- this.config.getModel(),
- durationMs,
- prompt_id,
- this.config.getContentGeneratorConfig()?.authType,
- usageMetadata,
- responseText,
- ),
- );
- }
-
- private _logApiError(
- durationMs: number,
- error: unknown,
- prompt_id: string,
- ): void {
- const errorMessage = error instanceof Error ? error.message : String(error);
- const errorType = error instanceof Error ? error.name : 'unknown';
-
- logApiError(
- this.config,
- new ApiErrorEvent(
- this.config.getModel(),
- errorMessage,
- durationMs,
- prompt_id,
- this.config.getContentGeneratorConfig()?.authType,
- errorType,
- ),
- );
- }
-
/**
* Handles falling back to Flash model when persistent 429 errors occur for OAuth users.
* Uses a fallback handler if provided by the config; otherwise, returns null.
@@ -249,7 +206,6 @@ export class GeminiChat {
const userContent = createUserContent(params.message);
const requestContents = this.getHistory(true).concat(userContent);
- const startTime = Date.now();
let response: GenerateContentResponse;
try {
@@ -290,13 +246,6 @@ export class GeminiChat {
await this.handleFlashFallback(authType, error),
authType: this.config.getContentGeneratorConfig()?.authType,
});
- const durationMs = Date.now() - startTime;
- await this._logApiResponse(
- durationMs,
- prompt_id,
- response.usageMetadata,
- JSON.stringify(response),
- );
this.sendPromise = (async () => {
const outputContent = response.candidates?.[0]?.content;
@@ -324,8 +273,6 @@ export class GeminiChat {
});
return response;
} catch (error) {
- const durationMs = Date.now() - startTime;
- this._logApiError(durationMs, error, prompt_id);
this.sendPromise = Promise.resolve();
throw error;
}
@@ -361,8 +308,6 @@ export class GeminiChat {
const userContent = createUserContent(params.message);
const requestContents = this.getHistory(true).concat(userContent);
- const startTime = Date.now();
-
try {
const apiCall = () => {
const modelToUse = this.config.getModel();
@@ -413,16 +358,9 @@ export class GeminiChat {
.then(() => undefined)
.catch(() => undefined);
- const result = this.processStreamResponse(
- streamResponse,
- userContent,
- startTime,
- prompt_id,
- );
+ const result = this.processStreamResponse(streamResponse, userContent);
return result;
} catch (error) {
- const durationMs = Date.now() - startTime;
- this._logApiError(durationMs, error, prompt_id);
this.sendPromise = Promise.resolve();
throw error;
}
@@ -483,17 +421,6 @@ export class GeminiChat {
this.generationConfig.tools = tools;
}
- getFinalUsageMetadata(
- chunks: GenerateContentResponse[],
- ): GenerateContentResponseUsageMetadata | undefined {
- const lastChunkWithMetadata = chunks
- .slice()
- .reverse()
- .find((chunk) => chunk.usageMetadata);
-
- return lastChunkWithMetadata?.usageMetadata;
- }
-
async maybeIncludeSchemaDepthContext(error: StructuredError): Promise<void> {
// Check for potentially problematic cyclic tools with cyclic schemas
// and include a recommendation to remove potentially problematic tools.
@@ -525,8 +452,6 @@ export class GeminiChat {
private async *processStreamResponse(
streamResponse: AsyncGenerator<GenerateContentResponse>,
inputContent: Content,
- startTime: number,
- prompt_id: string,
) {
const outputContent: Content[] = [];
const chunks: GenerateContentResponse[] = [];
@@ -549,25 +474,16 @@ export class GeminiChat {
}
} catch (error) {
errorOccurred = true;
- const durationMs = Date.now() - startTime;
- this._logApiError(durationMs, error, prompt_id);
throw error;
}
if (!errorOccurred) {
- const durationMs = Date.now() - startTime;
const allParts: Part[] = [];
for (const content of outputContent) {
if (content.parts) {
allParts.push(...content.parts);
}
}
- await this._logApiResponse(
- durationMs,
- prompt_id,
- this.getFinalUsageMetadata(chunks),
- JSON.stringify(chunks),
- );
}
this.recordHistory(inputContent, outputContent);
}
diff --git a/packages/core/src/core/loggingContentGenerator.ts b/packages/core/src/core/loggingContentGenerator.ts
index c9069bab..305b19a4 100644
--- a/packages/core/src/core/loggingContentGenerator.ts
+++ b/packages/core/src/core/loggingContentGenerator.ts
@@ -11,11 +11,20 @@ import {
EmbedContentParameters,
EmbedContentResponse,
GenerateContentParameters,
+ GenerateContentResponseUsageMetadata,
GenerateContentResponse,
} from '@google/genai';
-import { ApiRequestEvent } from '../telemetry/types.js';
+import {
+ ApiRequestEvent,
+ ApiResponseEvent,
+ ApiErrorEvent,
+} from '../telemetry/types.js';
import { Config } from '../config/config.js';
-import { logApiRequest } from '../telemetry/loggers.js';
+import {
+ logApiError,
+ logApiRequest,
+ logApiResponse,
+} from '../telemetry/loggers.js';
import { ContentGenerator } from './contentGenerator.js';
import { toContents } from '../code_assist/converter.js';
@@ -40,20 +49,117 @@ export class LoggingContentGenerator implements ContentGenerator {
);
}
+ private _logApiResponse(
+ durationMs: number,
+ prompt_id: string,
+ usageMetadata?: GenerateContentResponseUsageMetadata,
+ responseText?: string,
+ ): void {
+ logApiResponse(
+ this.config,
+ new ApiResponseEvent(
+ this.config.getModel(),
+ durationMs,
+ prompt_id,
+ this.config.getContentGeneratorConfig()?.authType,
+ usageMetadata,
+ responseText,
+ ),
+ );
+ }
+
+ private _logApiError(
+ durationMs: number,
+ error: unknown,
+ prompt_id: string,
+ ): void {
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ const errorType = error instanceof Error ? error.name : 'unknown';
+
+ logApiError(
+ this.config,
+ new ApiErrorEvent(
+ this.config.getModel(),
+ errorMessage,
+ durationMs,
+ prompt_id,
+ this.config.getContentGeneratorConfig()?.authType,
+ errorType,
+ ),
+ );
+ }
+
async generateContent(
req: GenerateContentParameters,
userPromptId: string,
): Promise<GenerateContentResponse> {
+ const startTime = Date.now();
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
- return this.wrapped.generateContent(req, userPromptId);
+ try {
+ const response = await this.wrapped.generateContent(req, userPromptId);
+ const durationMs = Date.now() - startTime;
+ this._logApiResponse(
+ durationMs,
+ userPromptId,
+ response.usageMetadata,
+ JSON.stringify(response),
+ );
+ return response;
+ } catch (error) {
+ const durationMs = Date.now() - startTime;
+ this._logApiError(durationMs, error, userPromptId);
+ throw error;
+ }
}
async generateContentStream(
req: GenerateContentParameters,
userPromptId: string,
): Promise<AsyncGenerator<GenerateContentResponse>> {
+ const startTime = Date.now();
this.logApiRequest(toContents(req.contents), req.model, userPromptId);
- return this.wrapped.generateContentStream(req, userPromptId);
+
+ let stream: AsyncGenerator<GenerateContentResponse>;
+ try {
+ stream = await this.wrapped.generateContentStream(req, userPromptId);
+ } catch (error) {
+ const durationMs = Date.now() - startTime;
+ this._logApiError(durationMs, error, userPromptId);
+ throw error;
+ }
+
+ return this.loggingStreamWrapper(stream, startTime, userPromptId);
+ }
+
+ private async *loggingStreamWrapper(
+ stream: AsyncGenerator<GenerateContentResponse>,
+ startTime: number,
+ userPromptId: string,
+ ): AsyncGenerator<GenerateContentResponse> {
+ let lastResponse: GenerateContentResponse | undefined;
+ let lastUsageMetadata: GenerateContentResponseUsageMetadata | undefined;
+ try {
+ for await (const response of stream) {
+ lastResponse = response;
+ if (response.usageMetadata) {
+ lastUsageMetadata = response.usageMetadata;
+ }
+ yield response;
+ }
+ } catch (error) {
+ const durationMs = Date.now() - startTime;
+ this._logApiError(durationMs, error, userPromptId);
+ throw error;
+ }
+ const durationMs = Date.now() - startTime;
+ if (lastResponse) {
+ this._logApiResponse(
+ durationMs,
+ userPromptId,
+ lastUsageMetadata,
+ JSON.stringify(lastResponse),
+ );
+ }
}
async countTokens(req: CountTokensParameters): Promise<CountTokensResponse> {