diff options
| author | Jerop Kipruto <[email protected]> | 2025-06-13 03:44:17 -0400 |
|---|---|---|
| committer | GitHub <[email protected]> | 2025-06-13 03:44:17 -0400 |
| commit | b20c8389f3f483f3972c254ec97fff4004b7c75f (patch) | |
| tree | 2afe57f8f7232e3ba7c9dde4cdfd3ee510bb994a /packages/core/src/telemetry/loggers.ts | |
| parent | 8bb6eca91548330f03feeedfa36372edf8aca1c6 (diff) | |
Handle telemetry in non-interactive mode (#1002)
Changes:
- Ensure proper shutdown in non-interactive mode
- Ensures the initial user prompt is logged in non-interactive mode
- Improve telemetry for streaming - handle chunks and input token count is now alongside other token counts in response
To test:
- Follow instructions in https://github.com/google-gemini/gemini-cli/blob/main/docs/core/telemetry.md#google-cloud
- Run CLI in non-interactive mode and observe logs/metrics in GCP Logs Explorer and Metrics Explorer
#750
Diffstat (limited to 'packages/core/src/telemetry/loggers.ts')
| -rw-r--r-- | packages/core/src/telemetry/loggers.ts | 60 |
1 files changed, 52 insertions, 8 deletions
diff --git a/packages/core/src/telemetry/loggers.ts b/packages/core/src/telemetry/loggers.ts index 66f584e7..f6896def 100644 --- a/packages/core/src/telemetry/loggers.ts +++ b/packages/core/src/telemetry/loggers.ts @@ -31,6 +31,10 @@ import { } from './metrics.js'; import { isTelemetrySdkInitialized } from './sdk.js'; import { ToolConfirmationOutcome } from '../index.js'; +import { + GenerateContentResponse, + GenerateContentResponseUsageMetadata, +} from '@google/genai'; const shouldLogUserPrompts = (config: Config): boolean => config.getTelemetryLogUserPromptsEnabled() ?? false; @@ -119,7 +123,7 @@ export function logUserPrompt( const logger = logs.getLogger(SERVICE_NAME); const logRecord: LogRecord = { - body: `User prompt. Length: ${event.prompt_length}`, + body: `User prompt. Length: ${event.prompt_length}.`, attributes, }; logger.emit(logRecord); @@ -176,16 +180,10 @@ export function logApiRequest( }; const logger = logs.getLogger(SERVICE_NAME); const logRecord: LogRecord = { - body: `API request to ${event.model}. Tokens: ${event.input_token_count}.`, + body: `API request to ${event.model}.`, attributes, }; logger.emit(logRecord); - recordTokenUsageMetrics( - config, - event.model, - event.input_token_count, - 'input', - ); } export function logApiError( @@ -261,6 +259,12 @@ export function logApiResponse( recordTokenUsageMetrics( config, event.model, + event.input_token_count, + 'input', + ); + recordTokenUsageMetrics( + config, + event.model, event.output_token_count, 'output', ); @@ -278,3 +282,43 @@ export function logApiResponse( ); recordTokenUsageMetrics(config, event.model, event.tool_token_count, 'tool'); } + +export function combinedUsageMetadata( + chunks: GenerateContentResponse[], +): GenerateContentResponseUsageMetadata { + const metadataKeys: Array<keyof GenerateContentResponseUsageMetadata> = [ + 'promptTokenCount', + 'candidatesTokenCount', + 'cachedContentTokenCount', + 'thoughtsTokenCount', + 'toolUsePromptTokenCount', + 'totalTokenCount', + ]; + + const totals: Record<keyof GenerateContentResponseUsageMetadata, number> = { + promptTokenCount: 0, + candidatesTokenCount: 0, + cachedContentTokenCount: 0, + thoughtsTokenCount: 0, + toolUsePromptTokenCount: 0, + totalTokenCount: 0, + cacheTokensDetails: 0, + candidatesTokensDetails: 0, + promptTokensDetails: 0, + toolUsePromptTokensDetails: 0, + trafficType: 0, + }; + + for (const chunk of chunks) { + if (chunk.usageMetadata) { + for (const key of metadataKeys) { + const chunkValue = chunk.usageMetadata[key]; + if (typeof chunkValue === 'number') { + totals[key] += chunkValue; + } + } + } + } + + return totals as unknown as GenerateContentResponseUsageMetadata; +} |
