summaryrefslogtreecommitdiff
path: root/packages/core
diff options
context:
space:
mode:
Diffstat (limited to 'packages/core')
-rw-r--r--packages/core/package.json8
-rw-r--r--packages/core/src/config/config.test.ts52
-rw-r--r--packages/core/src/config/config.ts27
-rw-r--r--packages/core/src/core/client.ts141
-rw-r--r--packages/core/src/core/coreToolScheduler.ts72
-rw-r--r--packages/core/src/index.ts3
-rw-r--r--packages/core/src/telemetry/constants.ts24
-rw-r--r--packages/core/src/telemetry/index.ts31
-rw-r--r--packages/core/src/telemetry/loggers.ts191
-rw-r--r--packages/core/src/telemetry/metrics.ts145
-rw-r--r--packages/core/src/telemetry/sdk.ts128
-rw-r--r--packages/core/src/telemetry/types.ts73
12 files changed, 872 insertions, 23 deletions
diff --git a/packages/core/package.json b/packages/core/package.json
index ff8e36fc..28dbdb50 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -28,7 +28,13 @@
"fast-glob": "^3.3.3",
"minimatch": "^10.0.0",
"shell-quote": "^1.8.2",
- "strip-ansi": "^7.1.0"
+ "strip-ansi": "^7.1.0",
+ "@opentelemetry/api": "^1.9.0",
+ "@opentelemetry/sdk-node": "^0.52.0",
+ "@opentelemetry/exporter-trace-otlp-grpc": "^0.52.0",
+ "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0",
+ "@opentelemetry/exporter-metrics-otlp-grpc": "^0.52.0",
+ "@opentelemetry/instrumentation-http": "^0.52.0"
},
"devDependencies": {
"@types/diff": "^7.0.2",
diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts
index 85ec9541..9cacdbbb 100644
--- a/packages/core/src/config/config.test.ts
+++ b/packages/core/src/config/config.test.ts
@@ -48,6 +48,7 @@ describe('Server Config (config.ts)', () => {
const FULL_CONTEXT = false;
const USER_AGENT = 'ServerTestAgent/1.0';
const USER_MEMORY = 'Test User Memory';
+ const TELEMETRY = false;
const baseParams: ConfigParameters = {
apiKey: API_KEY,
model: MODEL,
@@ -58,6 +59,7 @@ describe('Server Config (config.ts)', () => {
fullContext: FULL_CONTEXT,
userAgent: USER_AGENT,
userMemory: USER_MEMORY,
+ telemetry: TELEMETRY,
};
beforeEach(() => {
@@ -161,6 +163,56 @@ describe('Server Config (config.ts)', () => {
expect(config.getFileFilteringAllowBuildArtifacts()).toBe(true);
});
+ it('Config constructor should set telemetry to true when provided as true', () => {
+ const paramsWithTelemetry: ConfigParameters = {
+ ...baseParams,
+ telemetry: true,
+ };
+ const config = new Config(paramsWithTelemetry);
+ expect(config.getTelemetryEnabled()).toBe(true);
+ });
+
+ it('Config constructor should set telemetry to false when provided as false', () => {
+ const paramsWithTelemetry: ConfigParameters = {
+ ...baseParams,
+ telemetry: false,
+ };
+ const config = new Config(paramsWithTelemetry);
+ expect(config.getTelemetryEnabled()).toBe(false);
+ });
+
+ it('Config constructor should default telemetry to default value if not provided', () => {
+ const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
+ delete paramsWithoutTelemetry.telemetry;
+ const config = new Config(paramsWithoutTelemetry);
+ expect(config.getTelemetryEnabled()).toBe(TELEMETRY);
+ });
+
+ it('createServerConfig should pass telemetry to Config constructor when true', () => {
+ const paramsWithTelemetry: ConfigParameters = {
+ ...baseParams,
+ telemetry: true,
+ };
+ const config = createServerConfig(paramsWithTelemetry);
+ expect(config.getTelemetryEnabled()).toBe(true);
+ });
+
+ it('createServerConfig should pass telemetry to Config constructor when false', () => {
+ const paramsWithTelemetry: ConfigParameters = {
+ ...baseParams,
+ telemetry: false,
+ };
+ const config = createServerConfig(paramsWithTelemetry);
+ expect(config.getTelemetryEnabled()).toBe(false);
+ });
+
+ it('createServerConfig should default telemetry (to false via Config constructor) if omitted', () => {
+ const paramsWithoutTelemetry: ConfigParameters = { ...baseParams };
+ delete paramsWithoutTelemetry.telemetry;
+ const config = createServerConfig(paramsWithoutTelemetry);
+ expect(config.getTelemetryEnabled()).toBe(TELEMETRY);
+ });
+
it('should have a getFileService method that returns FileDiscoveryService', async () => {
const config = new Config(baseParams);
const fileService = await config.getFileService();
diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts
index 92a929cc..bd744093 100644
--- a/packages/core/src/config/config.ts
+++ b/packages/core/src/config/config.ts
@@ -24,6 +24,7 @@ import { WebSearchTool } from '../tools/web-search.js';
import { GeminiClient } from '../core/client.js';
import { GEMINI_CONFIG_DIR as GEMINI_DIR } from '../tools/memoryTool.js';
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
+import { initializeTelemetry } from '../telemetry/index.js';
export enum ApprovalMode {
DEFAULT = 'default',
@@ -72,6 +73,8 @@ export interface ConfigParameters {
contextFileName?: string;
geminiIgnorePatterns?: string[];
accessibility?: AccessibilitySettings;
+ telemetry?: boolean;
+ telemetryLogUserPromptsEnabled?: boolean;
fileFilteringRespectGitIgnore?: boolean;
fileFilteringAllowBuildArtifacts?: boolean;
}
@@ -97,6 +100,9 @@ export class Config {
private readonly vertexai: boolean | undefined;
private readonly showMemoryUsage: boolean;
private readonly accessibility: AccessibilitySettings;
+ private readonly telemetry: boolean;
+ private readonly telemetryLogUserPromptsEnabled: boolean;
+ private readonly telemetryOtlpEndpoint: string;
private readonly geminiClient: GeminiClient;
private readonly geminiIgnorePatterns: string[] = [];
private readonly fileFilteringRespectGitIgnore: boolean;
@@ -123,6 +129,11 @@ export class Config {
this.vertexai = params.vertexai;
this.showMemoryUsage = params.showMemoryUsage ?? false;
this.accessibility = params.accessibility ?? {};
+ this.telemetry = params.telemetry ?? false;
+ this.telemetryLogUserPromptsEnabled =
+ params.telemetryLogUserPromptsEnabled ?? true;
+ this.telemetryOtlpEndpoint =
+ process.env.OTEL_EXPORTER_OTLP_ENDPOINT ?? 'http://localhost:4317';
this.fileFilteringRespectGitIgnore =
params.fileFilteringRespectGitIgnore ?? true;
this.fileFilteringAllowBuildArtifacts =
@@ -137,6 +148,10 @@ export class Config {
this.toolRegistry = createToolRegistry(this);
this.geminiClient = new GeminiClient(this);
+
+ if (this.telemetry) {
+ initializeTelemetry(this);
+ }
}
getApiKey(): string {
@@ -230,6 +245,18 @@ export class Config {
return this.accessibility;
}
+ getTelemetryEnabled(): boolean {
+ return this.telemetry;
+ }
+
+ getTelemetryLogUserPromptsEnabled(): boolean {
+ return this.telemetryLogUserPromptsEnabled;
+ }
+
+ getTelemetryOtlpEndpoint(): string {
+ return this.telemetryOtlpEndpoint;
+ }
+
getGeminiClient(): GeminiClient {
return this.geminiClient;
}
diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts
index d795f1d2..bce2c5e4 100644
--- a/packages/core/src/core/client.ts
+++ b/packages/core/src/core/client.ts
@@ -27,6 +27,11 @@ import { GeminiChat } from './geminiChat.js';
import { retryWithBackoff } from '../utils/retry.js';
import { getErrorMessage } from '../utils/errors.js';
import { tokenLimit } from './tokenLimits.js';
+import {
+ logApiRequest,
+ logApiResponse,
+ logApiError,
+} from '../telemetry/index.js';
export class GeminiClient {
private chat: Promise<GeminiChat>;
@@ -192,6 +197,74 @@ export class GeminiClient {
}
}
+ private _logApiRequest(model: string, inputTokenCount: number): void {
+ logApiRequest({
+ model,
+ prompt_token_count: inputTokenCount,
+ duration_ms: 0, // Duration is not known at request time
+ });
+ }
+
+ private _logApiResponse(
+ model: string,
+ durationMs: number,
+ attempt: number,
+ response: GenerateContentResponse,
+ ): void {
+ const promptFeedback = response.promptFeedback;
+ const finishReason = response.candidates?.[0]?.finishReason;
+ let responseError;
+ if (promptFeedback?.blockReason) {
+ responseError = `Blocked: ${promptFeedback.blockReason}${promptFeedback.blockReasonMessage ? ' - ' + promptFeedback.blockReasonMessage : ''}`;
+ } else if (
+ finishReason &&
+ !['STOP', 'MAX_TOKENS', 'UNSPECIFIED'].includes(finishReason)
+ ) {
+ responseError = `Finished with reason: ${finishReason}`;
+ }
+
+ logApiResponse({
+ model,
+ duration_ms: durationMs,
+ attempt,
+ status_code: undefined,
+ error: responseError,
+ });
+ }
+
+ private _logApiError(
+ model: string,
+ error: unknown,
+ durationMs: number,
+ attempt: number,
+ isAbort: boolean = false,
+ ): void {
+ let statusCode: number | string | undefined;
+ let errorMessage = getErrorMessage(error);
+
+ if (isAbort) {
+ errorMessage = 'Request aborted by user';
+ statusCode = 'ABORTED'; // Custom S
+ } else if (typeof error === 'object' && error !== null) {
+ if ('status' in error) {
+ statusCode = (error as { status: number | string }).status;
+ } else if ('code' in error) {
+ statusCode = (error as { code: number | string }).code;
+ } else if ('httpStatusCode' in error) {
+ statusCode = (error as { httpStatusCode: number | string })
+ .httpStatusCode;
+ }
+ }
+
+ logApiError({
+ model,
+ error: errorMessage,
+ status_code: statusCode,
+ duration_ms: durationMs,
+ attempt,
+ });
+ }
+
async generateJson(
contents: Content[],
schema: SchemaUnion,
@@ -199,6 +272,8 @@ export class GeminiClient {
model: string = 'gemini-2.0-flash',
config: GenerateContentConfig = {},
): Promise<Record<string, unknown>> {
+ const attempt = 1;
+ const startTime = Date.now();
try {
const userMemory = this.config.getUserMemory();
const systemInstruction = getCoreSystemPrompt(userMemory);
@@ -208,6 +283,22 @@ export class GeminiClient {
...config,
};
+ let inputTokenCount = 0;
+ try {
+ const { totalTokens } = await this.client.models.countTokens({
+ model,
+ contents,
+ });
+ inputTokenCount = totalTokens || 0;
+ } catch (_e) {
+ console.warn(
+ `Failed to count tokens for model ${model}. Proceeding with inputTokenCount = 0. Error: ${getErrorMessage(_e)}`,
+ );
+ inputTokenCount = 0;
+ }
+
+ this._logApiRequest(model, inputTokenCount);
+
const apiCall = () =>
this.client.models.generateContent({
model,
@@ -221,6 +312,7 @@ export class GeminiClient {
});
const result = await retryWithBackoff(apiCall);
+ const durationMs = Date.now() - startTime;
const text = getResponseText(result);
if (!text) {
@@ -233,10 +325,13 @@ export class GeminiClient {
contents,
'generateJson-empty-response',
);
+ this._logApiError(model, error, durationMs, attempt);
throw error;
}
try {
- return JSON.parse(text);
+ const parsedJson = JSON.parse(text);
+ this._logApiResponse(model, durationMs, attempt, result);
+ return parsedJson;
} catch (parseError) {
await reportError(
parseError,
@@ -247,13 +342,15 @@ export class GeminiClient {
},
'generateJson-parse',
);
+ this._logApiError(model, parseError, durationMs, attempt);
throw new Error(
- `Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`,
+ `Failed to parse API response as JSON: ${getErrorMessage(parseError)}`,
);
}
} catch (error) {
+ const durationMs = Date.now() - startTime;
if (abortSignal.aborted) {
- // Regular cancellation error, fail normally
+ this._logApiError(model, error, durationMs, attempt, true);
throw error;
}
@@ -264,15 +361,17 @@ export class GeminiClient {
) {
throw error;
}
+ this._logApiError(model, error, durationMs, attempt);
+
await reportError(
error,
'Error generating JSON content via API.',
contents,
'generateJson-api',
);
- const message =
- error instanceof Error ? error.message : 'Unknown API error.';
- throw new Error(`Failed to generate JSON content: ${message}`);
+ throw new Error(
+ `Failed to generate JSON content: ${getErrorMessage(error)}`,
+ );
}
}
@@ -286,6 +385,8 @@ export class GeminiClient {
...this.generateContentConfig,
...generationConfig,
};
+ const attempt = 1;
+ const startTime = Date.now();
try {
const userMemory = this.config.getUserMemory();
@@ -297,6 +398,22 @@ export class GeminiClient {
systemInstruction,
};
+ let inputTokenCount = 0;
+ try {
+ const { totalTokens } = await this.client.models.countTokens({
+ model: modelToUse,
+ contents,
+ });
+ inputTokenCount = totalTokens || 0;
+ } catch (_e) {
+ console.warn(
+ `Failed to count tokens for model ${modelToUse}. Proceeding with inputTokenCount = 0. Error: ${getErrorMessage(_e)}`,
+ );
+ inputTokenCount = 0;
+ }
+
+ this._logApiRequest(modelToUse, inputTokenCount);
+
const apiCall = () =>
this.client.models.generateContent({
model: modelToUse,
@@ -305,12 +422,18 @@ export class GeminiClient {
});
const result = await retryWithBackoff(apiCall);
+ const durationMs = Date.now() - startTime;
+ this._logApiResponse(modelToUse, durationMs, attempt, result);
return result;
- } catch (error) {
+ } catch (error: unknown) {
+ const durationMs = Date.now() - startTime;
if (abortSignal.aborted) {
+ this._logApiError(modelToUse, error, durationMs, attempt, true);
throw error;
}
+ this._logApiError(modelToUse, error, durationMs, attempt);
+
await reportError(
error,
`Error generating content via API with model ${modelToUse}.`,
@@ -320,10 +443,8 @@ export class GeminiClient {
},
'generateContent-api',
);
- const message =
- error instanceof Error ? error.message : 'Unknown API error.';
throw new Error(
- `Failed to generate content with model ${modelToUse}: ${message}`,
+ `Failed to generate content with model ${modelToUse}: ${getErrorMessage(error)}`,
);
}
}
diff --git a/packages/core/src/core/coreToolScheduler.ts b/packages/core/src/core/coreToolScheduler.ts
index f82676f1..9bc75335 100644
--- a/packages/core/src/core/coreToolScheduler.ts
+++ b/packages/core/src/core/coreToolScheduler.ts
@@ -21,18 +21,21 @@ export type ValidatingToolCall = {
status: 'validating';
request: ToolCallRequestInfo;
tool: Tool;
+ startTime?: number;
};
export type ScheduledToolCall = {
status: 'scheduled';
request: ToolCallRequestInfo;
tool: Tool;
+ startTime?: number;
};
export type ErroredToolCall = {
status: 'error';
request: ToolCallRequestInfo;
response: ToolCallResponseInfo;
+ durationMs?: number;
};
export type SuccessfulToolCall = {
@@ -40,6 +43,7 @@ export type SuccessfulToolCall = {
request: ToolCallRequestInfo;
tool: Tool;
response: ToolCallResponseInfo;
+ durationMs?: number;
};
export type ExecutingToolCall = {
@@ -47,6 +51,7 @@ export type ExecutingToolCall = {
request: ToolCallRequestInfo;
tool: Tool;
liveOutput?: string;
+ startTime?: number;
};
export type CancelledToolCall = {
@@ -54,6 +59,7 @@ export type CancelledToolCall = {
request: ToolCallRequestInfo;
response: ToolCallResponseInfo;
tool: Tool;
+ durationMs?: number;
};
export type WaitingToolCall = {
@@ -61,6 +67,7 @@ export type WaitingToolCall = {
request: ToolCallRequestInfo;
tool: Tool;
confirmationDetails: ToolCallConfirmationDetails;
+ startTime?: number;
};
export type Status = ToolCall['status'];
@@ -246,40 +253,69 @@ export class CoreToolScheduler {
this.toolCalls = this.toolCalls.map((currentCall) => {
if (
currentCall.request.callId !== targetCallId ||
- currentCall.status === 'error'
+ currentCall.status === 'success' ||
+ currentCall.status === 'error' ||
+ currentCall.status === 'cancelled'
) {
return currentCall;
}
- const callWithToolContext = currentCall as ToolCall & { tool: Tool };
+ // currentCall is a non-terminal state here and should have startTime and tool.
+ const existingStartTime = currentCall.startTime;
+ const toolInstance = (
+ currentCall as
+ | ValidatingToolCall
+ | ScheduledToolCall
+ | ExecutingToolCall
+ | WaitingToolCall
+ ).tool;
switch (newStatus) {
- case 'success':
+ case 'success': {
+ const durationMs = existingStartTime
+ ? Date.now() - existingStartTime
+ : undefined;
return {
- ...callWithToolContext,
+ request: currentCall.request,
+ tool: toolInstance,
status: 'success',
response: auxiliaryData as ToolCallResponseInfo,
+ durationMs,
} as SuccessfulToolCall;
- case 'error':
+ }
+ case 'error': {
+ const durationMs = existingStartTime
+ ? Date.now() - existingStartTime
+ : undefined;
return {
request: currentCall.request,
status: 'error',
response: auxiliaryData as ToolCallResponseInfo,
+ durationMs,
} as ErroredToolCall;
+ }
case 'awaiting_approval':
return {
- ...callWithToolContext,
+ request: currentCall.request,
+ tool: toolInstance,
status: 'awaiting_approval',
confirmationDetails: auxiliaryData as ToolCallConfirmationDetails,
+ startTime: existingStartTime,
} as WaitingToolCall;
case 'scheduled':
return {
- ...callWithToolContext,
+ request: currentCall.request,
+ tool: toolInstance,
status: 'scheduled',
+ startTime: existingStartTime,
} as ScheduledToolCall;
- case 'cancelled':
+ case 'cancelled': {
+ const durationMs = existingStartTime
+ ? Date.now() - existingStartTime
+ : undefined;
return {
- ...callWithToolContext,
+ request: currentCall.request,
+ tool: toolInstance,
status: 'cancelled',
response: {
callId: currentCall.request.callId,
@@ -295,16 +331,22 @@ export class CoreToolScheduler {
resultDisplay: undefined,
error: undefined,
},
+ durationMs,
} as CancelledToolCall;
+ }
case 'validating':
return {
- ...(currentCall as ValidatingToolCall),
+ request: currentCall.request,
+ tool: toolInstance,
status: 'validating',
+ startTime: existingStartTime,
} as ValidatingToolCall;
case 'executing':
return {
- ...callWithToolContext,
+ request: currentCall.request,
+ tool: toolInstance,
status: 'executing',
+ startTime: existingStartTime,
} as ExecutingToolCall;
default: {
const exhaustiveCheck: never = newStatus;
@@ -345,9 +387,15 @@ export class CoreToolScheduler {
reqInfo,
new Error(`Tool "${reqInfo.name}" not found in registry.`),
),
+ durationMs: 0,
};
}
- return { status: 'validating', request: reqInfo, tool: toolInstance };
+ return {
+ status: 'validating',
+ request: reqInfo,
+ tool: toolInstance,
+ startTime: Date.now(),
+ };
},
);
diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts
index b221b525..3efb1318 100644
--- a/packages/core/src/index.ts
+++ b/packages/core/src/index.ts
@@ -44,3 +44,6 @@ export * from './tools/memoryTool.js';
export * from './tools/shell.js';
export * from './tools/web-search.js';
export * from './tools/read-many-files.js';
+
+// Export telemetry functions
+export * from './telemetry/index.js';
diff --git a/packages/core/src/telemetry/constants.ts b/packages/core/src/telemetry/constants.ts
new file mode 100644
index 00000000..67d5b38b
--- /dev/null
+++ b/packages/core/src/telemetry/constants.ts
@@ -0,0 +1,24 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+import { randomUUID } from 'crypto';
+
+export const SERVICE_NAME = 'gemini-code';
+export const sessionId = randomUUID();
+
+export const EVENT_USER_PROMPT = 'gemini_code.user_prompt';
+export const EVENT_TOOL_CALL = 'gemini_code.tool_call';
+export const EVENT_API_REQUEST = 'gemini_code.api_request';
+export const EVENT_API_ERROR = 'gemini_code.api_error';
+export const EVENT_API_RESPONSE = 'gemini_code.api_response';
+export const EVENT_CLI_CONFIG = 'gemini_code.config';
+
+export const METRIC_TOOL_CALL_COUNT = 'gemini_code.tool.call.count';
+export const METRIC_TOOL_CALL_LATENCY = 'gemini_code.tool.call.latency';
+export const METRIC_API_REQUEST_COUNT = 'gemini_code.api.request.count';
+export const METRIC_API_REQUEST_LATENCY = 'gemini_code.api.request.latency';
+export const METRIC_TOKEN_INPUT_COUNT = 'gemini_code.token.input.count';
+export const METRIC_SESSION_COUNT = 'gemini_code.session.count';
diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts
new file mode 100644
index 00000000..7b2ab0e7
--- /dev/null
+++ b/packages/core/src/telemetry/index.ts
@@ -0,0 +1,31 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+export {
+ initializeTelemetry,
+ shutdownTelemetry,
+ isTelemetrySdkInitialized,
+} from './sdk.js';
+export {
+ logCliConfiguration,
+ logUserPrompt,
+ logToolCall,
+ logApiRequest,
+ logApiError,
+ logApiResponse,
+} from './loggers.js';
+export {
+ UserPromptEvent,
+ ToolCallEvent,
+ ApiRequestEvent,
+ ApiErrorEvent,
+ ApiResponseEvent,
+ CliConfigEvent,
+ TelemetryEvent,
+} from './types.js';
+export { SpanStatusCode, ValueType } from '@opentelemetry/api';
+export { SemanticAttributes } from '@opentelemetry/semantic-conventions';
+export { sessionId } from './constants.js';
diff --git a/packages/core/src/telemetry/loggers.ts b/packages/core/src/telemetry/loggers.ts
new file mode 100644
index 00000000..ccab12ff
--- /dev/null
+++ b/packages/core/src/telemetry/loggers.ts
@@ -0,0 +1,191 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+import { logs, LogRecord, LogAttributes } from '@opentelemetry/api-logs';
+import { SemanticAttributes } from '@opentelemetry/semantic-conventions';
+import { Config } from '../config/config.js';
+import {
+ EVENT_API_ERROR,
+ EVENT_API_REQUEST,
+ EVENT_API_RESPONSE,
+ EVENT_CLI_CONFIG,
+ EVENT_TOOL_CALL,
+ EVENT_USER_PROMPT,
+ SERVICE_NAME,
+} from './constants.js';
+import {
+ ApiErrorEvent,
+ ApiRequestEvent,
+ ApiResponseEvent,
+ ToolCallEvent,
+ UserPromptEvent,
+} from './types.js';
+import {
+ recordApiErrorMetrics,
+ recordApiRequestMetrics,
+ recordApiResponseMetrics,
+ recordToolCallMetrics,
+} from './metrics.js';
+import { isTelemetrySdkInitialized } from './sdk.js';
+
+const shouldLogUserPrompts = (config: Config): boolean =>
+ config.getTelemetryLogUserPromptsEnabled() ?? false;
+
+export function logCliConfiguration(config: Config): void {
+ if (!isTelemetrySdkInitialized()) return;
+
+ const attributes: LogAttributes = {
+ 'event.name': EVENT_CLI_CONFIG,
+ 'event.timestamp': new Date().toISOString(),
+ model: config.getModel(),
+ sandbox_enabled:
+ typeof config.getSandbox() === 'string' ? true : config.getSandbox(),
+ core_tools_enabled: (config.getCoreTools() ?? []).join(','),
+ approval_mode: config.getApprovalMode(),
+ vertex_ai_enabled: config.getVertexAI() ?? false,
+ log_user_prompts_enabled: config.getTelemetryLogUserPromptsEnabled(),
+ file_filtering_respect_git_ignore:
+ config.getFileFilteringRespectGitIgnore(),
+ file_filtering_allow_build_artifacts:
+ config.getFileFilteringAllowBuildArtifacts(),
+ };
+ const logger = logs.getLogger(SERVICE_NAME);
+ const logRecord: LogRecord = {
+ body: 'CLI configuration loaded.',
+ attributes,
+ };
+ logger.emit(logRecord);
+}
+
+export function logUserPrompt(
+ config: Config,
+ event: Omit<UserPromptEvent, 'event.name' | 'event.timestamp' | 'prompt'> & {
+ prompt: string;
+ },
+): void {
+ if (!isTelemetrySdkInitialized()) return;
+ const { prompt, ...restOfEventArgs } = event;
+ const attributes: LogAttributes = {
+ ...restOfEventArgs,
+ 'event.name': EVENT_USER_PROMPT,
+ 'event.timestamp': new Date().toISOString(),
+ };
+ if (shouldLogUserPrompts(config)) {
+ attributes.prompt = prompt;
+ }
+ const logger = logs.getLogger(SERVICE_NAME);
+ const logRecord: LogRecord = {
+ body: `User prompt. Length: ${event.prompt_char_count}`,
+ attributes,
+ };
+ logger.emit(logRecord);
+}
+
+export function logToolCall(
+ event: Omit<ToolCallEvent, 'event.name' | 'event.timestamp'>,
+): void {
+ if (!isTelemetrySdkInitialized()) return;
+ const attributes: LogAttributes = {
+ ...event,
+ 'event.name': EVENT_TOOL_CALL,
+ 'event.timestamp': new Date().toISOString(),
+ function_args: JSON.stringify(event.function_args),
+ };
+ if (event.error) {
+ attributes['error.message'] = event.error;
+ if (event.error_type) {
+ attributes['error.type'] = event.error_type;
+ }
+ }
+ const logger = logs.getLogger(SERVICE_NAME);
+ const logRecord: LogRecord = {
+ body: `Tool call: ${event.function_name}. Success: ${event.success}. Duration: ${event.duration_ms}ms.`,
+ attributes,
+ };
+ logger.emit(logRecord);
+ recordToolCallMetrics(event.function_name, event.duration_ms, event.success);
+}
+
+export function logApiRequest(
+ event: Omit<ApiRequestEvent, 'event.name' | 'event.timestamp'>,
+): void {
+ if (!isTelemetrySdkInitialized()) return;
+ const attributes: LogAttributes = {
+ ...event,
+ 'event.name': EVENT_API_REQUEST,
+ 'event.timestamp': new Date().toISOString(),
+ };
+ const logger = logs.getLogger(SERVICE_NAME);
+ const logRecord: LogRecord = {
+ body: `API request to ${event.model}. Tokens: ${event.prompt_token_count}.`,
+ attributes,
+ };
+ logger.emit(logRecord);
+ recordApiRequestMetrics(event.model, event.prompt_token_count);
+}
+
+export function logApiError(
+ event: Omit<ApiErrorEvent, 'event.name' | 'event.timestamp'>,
+): void {
+ if (!isTelemetrySdkInitialized()) return;
+ const attributes: LogAttributes = {
+ ...event,
+ 'event.name': EVENT_API_ERROR,
+ 'event.timestamp': new Date().toISOString(),
+ ['error.message']: event.error,
+ };
+
+ if (event.error_type) {
+ attributes['error.type'] = event.error_type;
+ }
+ if (typeof event.status_code === 'number') {
+ attributes[SemanticAttributes.HTTP_STATUS_CODE] = event.status_code;
+ }
+
+ const logger = logs.getLogger(SERVICE_NAME);
+ const logRecord: LogRecord = {
+ body: `API error for ${event.model}. Error: ${event.error}. Duration: ${event.duration_ms}ms.`,
+ attributes,
+ };
+ logger.emit(logRecord);
+ recordApiErrorMetrics(
+ event.model,
+ event.duration_ms,
+ event.status_code,
+ event.error_type,
+ );
+}
+
+export function logApiResponse(
+ event: Omit<ApiResponseEvent, 'event.name' | 'event.timestamp'>,
+): void {
+ if (!isTelemetrySdkInitialized()) return;
+ const attributes: LogAttributes = {
+ ...event,
+ 'event.name': EVENT_API_RESPONSE,
+ 'event.timestamp': new Date().toISOString(),
+ };
+ if (event.error) {
+ attributes['error.message'] = event.error;
+ } else if (event.status_code) {
+ if (typeof event.status_code === 'number') {
+ attributes[SemanticAttributes.HTTP_STATUS_CODE] = event.status_code;
+ }
+ }
+
+ const logger = logs.getLogger(SERVICE_NAME);
+ const logRecord: LogRecord = {
+ body: `API response from ${event.model}. Status: ${event.status_code || 'N/A'}. Duration: ${event.duration_ms}ms.`,
+ attributes,
+ };
+ logger.emit(logRecord);
+ recordApiResponseMetrics(
+ event.model,
+ event.duration_ms,
+ event.status_code,
+ event.error,
+ );
+}
diff --git a/packages/core/src/telemetry/metrics.ts b/packages/core/src/telemetry/metrics.ts
new file mode 100644
index 00000000..2e6bd909
--- /dev/null
+++ b/packages/core/src/telemetry/metrics.ts
@@ -0,0 +1,145 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+import {
+ metrics,
+ Attributes,
+ ValueType,
+ Meter,
+ Counter,
+ Histogram,
+} from '@opentelemetry/api';
+import {
+ SERVICE_NAME,
+ METRIC_TOOL_CALL_COUNT,
+ METRIC_TOOL_CALL_LATENCY,
+ METRIC_API_REQUEST_COUNT,
+ METRIC_API_REQUEST_LATENCY,
+ METRIC_TOKEN_INPUT_COUNT,
+ METRIC_SESSION_COUNT,
+} from './constants.js';
+
+let cliMeter: Meter | undefined;
+let toolCallCounter: Counter | undefined;
+let toolCallLatencyHistogram: Histogram | undefined;
+let apiRequestCounter: Counter | undefined;
+let apiRequestLatencyHistogram: Histogram | undefined;
+let tokenInputCounter: Counter | undefined;
+let isMetricsInitialized = false;
+
+export function getMeter(): Meter | undefined {
+ if (!cliMeter) {
+ cliMeter = metrics.getMeter(SERVICE_NAME);
+ }
+ return cliMeter;
+}
+
+export function initializeMetrics(): void {
+ if (isMetricsInitialized) return;
+
+ const meter = getMeter();
+ if (!meter) return;
+
+ toolCallCounter = meter.createCounter(METRIC_TOOL_CALL_COUNT, {
+ description: 'Counts tool calls, tagged by function name and success.',
+ valueType: ValueType.INT,
+ });
+ toolCallLatencyHistogram = meter.createHistogram(METRIC_TOOL_CALL_LATENCY, {
+ description: 'Latency of tool calls in milliseconds.',
+ unit: 'ms',
+ valueType: ValueType.INT,
+ });
+ apiRequestCounter = meter.createCounter(METRIC_API_REQUEST_COUNT, {
+ description: 'Counts API requests, tagged by model and status.',
+ valueType: ValueType.INT,
+ });
+ apiRequestLatencyHistogram = meter.createHistogram(
+ METRIC_API_REQUEST_LATENCY,
+ {
+ description: 'Latency of API requests in milliseconds.',
+ unit: 'ms',
+ valueType: ValueType.INT,
+ },
+ );
+ tokenInputCounter = meter.createCounter(METRIC_TOKEN_INPUT_COUNT, {
+ description: 'Counts the total number of input tokens sent to the API.',
+ valueType: ValueType.INT,
+ });
+
+ const sessionCounter = meter.createCounter(METRIC_SESSION_COUNT, {
+ description: 'Count of CLI sessions started.',
+ valueType: ValueType.INT,
+ });
+ sessionCounter.add(1);
+ isMetricsInitialized = true;
+}
+
+export function recordToolCallMetrics(
+ functionName: string,
+ durationMs: number,
+ success: boolean,
+): void {
+ if (!toolCallCounter || !toolCallLatencyHistogram || !isMetricsInitialized)
+ return;
+
+ const metricAttributes: Attributes = {
+ function_name: functionName,
+ success,
+ };
+ toolCallCounter.add(1, metricAttributes);
+ toolCallLatencyHistogram.record(durationMs, {
+ function_name: functionName,
+ });
+}
+
+export function recordApiRequestMetrics(
+ model: string,
+ inputTokenCount: number,
+): void {
+ if (!tokenInputCounter || !isMetricsInitialized) return;
+ tokenInputCounter.add(inputTokenCount, { model });
+}
+
+export function recordApiResponseMetrics(
+ model: string,
+ durationMs: number,
+ statusCode?: number | string,
+ error?: string,
+): void {
+ if (
+ !apiRequestCounter ||
+ !apiRequestLatencyHistogram ||
+ !isMetricsInitialized
+ )
+ return;
+ const metricAttributes: Attributes = {
+ model,
+ status_code: statusCode ?? (error ? 'error' : 'ok'),
+ };
+ apiRequestCounter.add(1, metricAttributes);
+ apiRequestLatencyHistogram.record(durationMs, { model });
+}
+
+export function recordApiErrorMetrics(
+ model: string,
+ durationMs: number,
+ statusCode?: number | string,
+ errorType?: string,
+): void {
+ if (
+ !apiRequestCounter ||
+ !apiRequestLatencyHistogram ||
+ !isMetricsInitialized
+ )
+ return;
+ const metricAttributes: Attributes = {
+ model,
+ status_code: statusCode ?? 'error',
+ error_type: errorType ?? 'unknown',
+ };
+ apiRequestCounter.add(1, metricAttributes);
+ apiRequestLatencyHistogram.record(durationMs, { model });
+}
diff --git a/packages/core/src/telemetry/sdk.ts b/packages/core/src/telemetry/sdk.ts
new file mode 100644
index 00000000..b55cb149
--- /dev/null
+++ b/packages/core/src/telemetry/sdk.ts
@@ -0,0 +1,128 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+import { DiagConsoleLogger, DiagLogLevel, diag } from '@opentelemetry/api';
+import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-grpc';
+import { OTLPLogExporter } from '@opentelemetry/exporter-logs-otlp-grpc';
+import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-grpc';
+import { NodeSDK } from '@opentelemetry/sdk-node';
+import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions';
+import { Resource } from '@opentelemetry/resources';
+import {
+ BatchSpanProcessor,
+ ConsoleSpanExporter,
+} from '@opentelemetry/sdk-trace-node';
+import {
+ BatchLogRecordProcessor,
+ ConsoleLogRecordExporter,
+} from '@opentelemetry/sdk-logs';
+import {
+ ConsoleMetricExporter,
+ PeriodicExportingMetricReader,
+} from '@opentelemetry/sdk-metrics';
+import { HttpInstrumentation } from '@opentelemetry/instrumentation-http';
+import { Config } from '../config/config.js';
+import { SERVICE_NAME, sessionId } from './constants.js';
+import { initializeMetrics } from './metrics.js';
+import { logCliConfiguration } from './loggers.js';
+
+// For troubleshooting, set the log level to DiagLogLevel.DEBUG
+diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO);
+
+let sdk: NodeSDK | undefined;
+let telemetryInitialized = false;
+
+export function isTelemetrySdkInitialized(): boolean {
+ return telemetryInitialized;
+}
+
+function parseGrpcEndpoint(
+ otlpEndpointSetting: string | undefined,
+): string | undefined {
+ if (!otlpEndpointSetting) {
+ return undefined;
+ }
+ // Trim leading/trailing quotes that might come from env variables
+ const trimmedEndpoint = otlpEndpointSetting.replace(/^["']|["']$/g, '');
+
+ try {
+ const url = new URL(trimmedEndpoint);
+ // OTLP gRPC exporters expect an endpoint in the format scheme://host:port
+ // The `origin` property provides this, stripping any path, query, or hash.
+ return url.origin;
+ } catch (error) {
+ diag.error('Invalid OTLP endpoint URL provided:', trimmedEndpoint, error);
+ return undefined;
+ }
+}
+
+export function initializeTelemetry(config: Config): void {
+ if (telemetryInitialized || !config.getTelemetryEnabled()) {
+ return;
+ }
+
+ const geminiCliVersion = config.getUserAgent() || 'unknown';
+ const resource = new Resource({
+ [SemanticResourceAttributes.SERVICE_NAME]: SERVICE_NAME,
+ [SemanticResourceAttributes.SERVICE_VERSION]: geminiCliVersion,
+ 'session.id': sessionId,
+ });
+
+ const otlpEndpoint = config.getTelemetryOtlpEndpoint();
+ const grpcParsedEndpoint = parseGrpcEndpoint(otlpEndpoint);
+ const useOtlp = !!grpcParsedEndpoint;
+
+ const spanExporter = useOtlp
+ ? new OTLPTraceExporter({ url: grpcParsedEndpoint })
+ : new ConsoleSpanExporter();
+ const logExporter = useOtlp
+ ? new OTLPLogExporter({ url: grpcParsedEndpoint })
+ : new ConsoleLogRecordExporter();
+ const metricReader = useOtlp
+ ? new PeriodicExportingMetricReader({
+ exporter: new OTLPMetricExporter({ url: grpcParsedEndpoint }),
+ exportIntervalMillis: 10000,
+ })
+ : new PeriodicExportingMetricReader({
+ exporter: new ConsoleMetricExporter(),
+ exportIntervalMillis: 10000,
+ });
+
+ sdk = new NodeSDK({
+ resource,
+ spanProcessors: [new BatchSpanProcessor(spanExporter)],
+ logRecordProcessor: new BatchLogRecordProcessor(logExporter),
+ metricReader,
+ instrumentations: [new HttpInstrumentation()],
+ });
+
+ try {
+ sdk.start();
+ console.log('OpenTelemetry SDK started successfully.');
+ telemetryInitialized = true;
+ initializeMetrics();
+ logCliConfiguration(config);
+ } catch (error) {
+ console.error('Error starting OpenTelemetry SDK:', error);
+ }
+
+ process.on('SIGTERM', shutdownTelemetry);
+ process.on('SIGINT', shutdownTelemetry);
+}
+
+export async function shutdownTelemetry(): Promise<void> {
+ if (!telemetryInitialized || !sdk) {
+ return;
+ }
+ try {
+ await sdk.shutdown();
+ console.log('OpenTelemetry SDK shut down successfully.');
+ } catch (error) {
+ console.error('Error shutting down SDK:', error);
+ } finally {
+ telemetryInitialized = false;
+ }
+}
diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts
new file mode 100644
index 00000000..ea65d6de
--- /dev/null
+++ b/packages/core/src/telemetry/types.ts
@@ -0,0 +1,73 @@
+/**
+ * @license
+ * Copyright 2025 Google LLC
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+export interface UserPromptEvent {
+ 'event.name': 'user_prompt';
+ 'event.timestamp': string; // ISO 8601
+ prompt_char_count: number;
+ prompt?: string;
+}
+
+export interface ToolCallEvent {
+ 'event.name': 'tool_call';
+ 'event.timestamp': string; // ISO 8601
+ function_name: string;
+ function_args: Record<string, unknown>;
+ duration_ms: number;
+ success: boolean;
+ error?: string;
+ error_type?: string;
+}
+
+export interface ApiRequestEvent {
+ 'event.name': 'api_request';
+ 'event.timestamp': string; // ISO 8601
+ model: string;
+ duration_ms: number;
+ prompt_token_count: number;
+}
+
+export interface ApiErrorEvent {
+ 'event.name': 'api_error';
+ 'event.timestamp': string; // ISO 8601
+ model: string;
+ error: string;
+ error_type?: string;
+ status_code?: number | string;
+ duration_ms: number;
+ attempt: number;
+}
+
+export interface ApiResponseEvent {
+ 'event.name': 'api_response';
+ 'event.timestamp': string; // ISO 8601
+ model: string;
+ status_code?: number | string;
+ duration_ms: number;
+ error?: string;
+ attempt: number;
+}
+
+export interface CliConfigEvent {
+ 'event.name': 'cli_config';
+ 'event.timestamp': string; // ISO 8601
+ model: string;
+ sandbox_enabled: boolean;
+ core_tools_enabled: string;
+ approval_mode: string;
+ vertex_ai_enabled: boolean;
+ log_user_prompts_enabled: boolean;
+ file_filtering_respect_git_ignore: boolean;
+ file_filtering_allow_build_artifacts: boolean;
+}
+
+export type TelemetryEvent =
+ | UserPromptEvent
+ | ToolCallEvent
+ | ApiRequestEvent
+ | ApiErrorEvent
+ | ApiResponseEvent
+ | CliConfigEvent;