diff options
Diffstat (limited to 'packages/cli/src/ui/contexts')
| -rw-r--r-- | packages/cli/src/ui/contexts/SessionContext.test.tsx | 223 | ||||
| -rw-r--r-- | packages/cli/src/ui/contexts/SessionContext.tsx | 183 |
2 files changed, 107 insertions, 299 deletions
diff --git a/packages/cli/src/ui/contexts/SessionContext.test.tsx b/packages/cli/src/ui/contexts/SessionContext.test.tsx index fedb5341..5b05c284 100644 --- a/packages/cli/src/ui/contexts/SessionContext.test.tsx +++ b/packages/cli/src/ui/contexts/SessionContext.test.tsx @@ -8,28 +8,13 @@ import { type MutableRefObject } from 'react'; import { render } from 'ink-testing-library'; import { renderHook } from '@testing-library/react'; import { act } from 'react-dom/test-utils'; -import { SessionStatsProvider, useSessionStats } from './SessionContext.js'; +import { + SessionStatsProvider, + useSessionStats, + SessionMetrics, +} from './SessionContext.js'; import { describe, it, expect, vi } from 'vitest'; -import { GenerateContentResponseUsageMetadata } from '@google/genai'; - -// Mock data that simulates what the Gemini API would return. -const mockMetadata1: GenerateContentResponseUsageMetadata = { - promptTokenCount: 100, - candidatesTokenCount: 200, - totalTokenCount: 300, - cachedContentTokenCount: 50, - toolUsePromptTokenCount: 10, - thoughtsTokenCount: 20, -}; - -const mockMetadata2: GenerateContentResponseUsageMetadata = { - promptTokenCount: 10, - candidatesTokenCount: 20, - totalTokenCount: 30, - cachedContentTokenCount: 5, - toolUsePromptTokenCount: 1, - thoughtsTokenCount: 2, -}; +import { uiTelemetryService } from '@google/gemini-cli-core'; /** * A test harness component that uses the hook and exposes the context value @@ -60,13 +45,11 @@ describe('SessionStatsContext', () => { const stats = contextRef.current?.stats; expect(stats?.sessionStartTime).toBeInstanceOf(Date); - expect(stats?.currentTurn).toBeDefined(); - expect(stats?.cumulative.turnCount).toBe(0); - expect(stats?.cumulative.totalTokenCount).toBe(0); - expect(stats?.cumulative.promptTokenCount).toBe(0); + expect(stats?.metrics).toBeDefined(); + expect(stats?.metrics.models).toEqual({}); }); - it('should increment turnCount when startNewTurn is called', () => { + it('should update metrics when the uiTelemetryService emits an update', () => { const contextRef: MutableRefObject< ReturnType<typeof useSessionStats> | undefined > = { current: undefined }; @@ -77,150 +60,60 @@ describe('SessionStatsContext', () => { </SessionStatsProvider>, ); - act(() => { - contextRef.current?.startNewTurn(); - }); - - const stats = contextRef.current?.stats; - expect(stats?.currentTurn.totalTokenCount).toBe(0); - expect(stats?.cumulative.turnCount).toBe(1); - // Ensure token counts are unaffected - expect(stats?.cumulative.totalTokenCount).toBe(0); - }); - - it('should aggregate token usage correctly when addUsage is called', () => { - const contextRef: MutableRefObject< - ReturnType<typeof useSessionStats> | undefined - > = { current: undefined }; - - render( - <SessionStatsProvider> - <TestHarness contextRef={contextRef} /> - </SessionStatsProvider>, - ); + const newMetrics: SessionMetrics = { + models: { + 'gemini-pro': { + api: { + totalRequests: 1, + totalErrors: 0, + totalLatencyMs: 123, + }, + tokens: { + prompt: 100, + candidates: 200, + total: 300, + cached: 50, + thoughts: 20, + tool: 10, + }, + }, + }, + tools: { + totalCalls: 1, + totalSuccess: 1, + totalFail: 0, + totalDurationMs: 456, + totalDecisions: { + accept: 1, + reject: 0, + modify: 0, + }, + byName: { + 'test-tool': { + count: 1, + success: 1, + fail: 0, + durationMs: 456, + decisions: { + accept: 1, + reject: 0, + modify: 0, + }, + }, + }, + }, + }; act(() => { - contextRef.current?.addUsage({ ...mockMetadata1, apiTimeMs: 123 }); + uiTelemetryService.emit('update', { + metrics: newMetrics, + lastPromptTokenCount: 100, + }); }); const stats = contextRef.current?.stats; - - // Check that token counts are updated - expect(stats?.cumulative.totalTokenCount).toBe( - mockMetadata1.totalTokenCount ?? 0, - ); - expect(stats?.cumulative.promptTokenCount).toBe( - mockMetadata1.promptTokenCount ?? 0, - ); - expect(stats?.cumulative.apiTimeMs).toBe(123); - - // Check that turn count is NOT incremented - expect(stats?.cumulative.turnCount).toBe(0); - - // Check that currentTurn is updated - expect(stats?.currentTurn?.totalTokenCount).toEqual( - mockMetadata1.totalTokenCount, - ); - expect(stats?.currentTurn?.apiTimeMs).toBe(123); - }); - - it('should correctly track a full logical turn with multiple API calls', () => { - const contextRef: MutableRefObject< - ReturnType<typeof useSessionStats> | undefined - > = { current: undefined }; - - render( - <SessionStatsProvider> - <TestHarness contextRef={contextRef} /> - </SessionStatsProvider>, - ); - - // 1. User starts a new turn - act(() => { - contextRef.current?.startNewTurn(); - }); - - // 2. First API call (e.g., prompt with a tool request) - act(() => { - contextRef.current?.addUsage({ ...mockMetadata1, apiTimeMs: 100 }); - }); - - // 3. Second API call (e.g., sending tool response back) - act(() => { - contextRef.current?.addUsage({ ...mockMetadata2, apiTimeMs: 50 }); - }); - - const stats = contextRef.current?.stats; - - // Turn count should only be 1 - expect(stats?.cumulative.turnCount).toBe(1); - - // --- Check Cumulative Stats --- - // These fields should be the SUM of both calls - expect(stats?.cumulative.totalTokenCount).toBe(300 + 30); - expect(stats?.cumulative.candidatesTokenCount).toBe(200 + 20); - expect(stats?.cumulative.thoughtsTokenCount).toBe(20 + 2); - expect(stats?.cumulative.apiTimeMs).toBe(100 + 50); - - // These fields should be the SUM of both calls - expect(stats?.cumulative.promptTokenCount).toBe(100 + 10); - expect(stats?.cumulative.cachedContentTokenCount).toBe(50 + 5); - expect(stats?.cumulative.toolUsePromptTokenCount).toBe(10 + 1); - - // --- Check Current Turn Stats --- - // All fields should be the SUM of both calls for the turn - expect(stats?.currentTurn.totalTokenCount).toBe(300 + 30); - expect(stats?.currentTurn.candidatesTokenCount).toBe(200 + 20); - expect(stats?.currentTurn.thoughtsTokenCount).toBe(20 + 2); - expect(stats?.currentTurn.promptTokenCount).toBe(100 + 10); - expect(stats?.currentTurn.cachedContentTokenCount).toBe(50 + 5); - expect(stats?.currentTurn.toolUsePromptTokenCount).toBe(10 + 1); - expect(stats?.currentTurn.apiTimeMs).toBe(100 + 50); - }); - - it('should overwrite currentResponse with each API call', () => { - const contextRef: MutableRefObject< - ReturnType<typeof useSessionStats> | undefined - > = { current: undefined }; - - render( - <SessionStatsProvider> - <TestHarness contextRef={contextRef} /> - </SessionStatsProvider>, - ); - - // 1. First API call - act(() => { - contextRef.current?.addUsage({ ...mockMetadata1, apiTimeMs: 100 }); - }); - - let stats = contextRef.current?.stats; - - // currentResponse should match the first call - expect(stats?.currentResponse.totalTokenCount).toBe(300); - expect(stats?.currentResponse.apiTimeMs).toBe(100); - - // 2. Second API call - act(() => { - contextRef.current?.addUsage({ ...mockMetadata2, apiTimeMs: 50 }); - }); - - stats = contextRef.current?.stats; - - // currentResponse should now match the second call - expect(stats?.currentResponse.totalTokenCount).toBe(30); - expect(stats?.currentResponse.apiTimeMs).toBe(50); - - // 3. Start a new turn - act(() => { - contextRef.current?.startNewTurn(); - }); - - stats = contextRef.current?.stats; - - // currentResponse should be reset - expect(stats?.currentResponse.totalTokenCount).toBe(0); - expect(stats?.currentResponse.apiTimeMs).toBe(0); + expect(stats?.metrics).toEqual(newMetrics); + expect(stats?.lastPromptTokenCount).toBe(100); }); it('should throw an error when useSessionStats is used outside of a provider', () => { diff --git a/packages/cli/src/ui/contexts/SessionContext.tsx b/packages/cli/src/ui/contexts/SessionContext.tsx index f59e17e1..b89d19e7 100644 --- a/packages/cli/src/ui/contexts/SessionContext.tsx +++ b/packages/cli/src/ui/contexts/SessionContext.tsx @@ -9,39 +9,43 @@ import React, { useContext, useState, useMemo, - useCallback, + useEffect, } from 'react'; -import { type GenerateContentResponseUsageMetadata } from '@google/genai'; +import { + uiTelemetryService, + SessionMetrics, + ModelMetrics, +} from '@google/gemini-cli-core'; // --- Interface Definitions --- -export interface CumulativeStats { - turnCount: number; - promptTokenCount: number; - candidatesTokenCount: number; - totalTokenCount: number; - cachedContentTokenCount: number; - toolUsePromptTokenCount: number; - thoughtsTokenCount: number; - apiTimeMs: number; -} +export type { SessionMetrics, ModelMetrics }; interface SessionStatsState { sessionStartTime: Date; - cumulative: CumulativeStats; - currentTurn: CumulativeStats; - currentResponse: CumulativeStats; + metrics: SessionMetrics; + lastPromptTokenCount: number; +} + +export interface ComputedSessionStats { + totalApiTime: number; + totalToolTime: number; + agentActiveTime: number; + apiTimePercent: number; + toolTimePercent: number; + cacheEfficiency: number; + totalDecisions: number; + successRate: number; + agreementRate: number; + totalCachedTokens: number; + totalPromptTokens: number; } // Defines the final "value" of our context, including the state // and the functions to update it. interface SessionStatsContextValue { stats: SessionStatsState; - startNewTurn: () => void; - addUsage: ( - metadata: GenerateContentResponseUsageMetadata & { apiTimeMs?: number }, - ) => void; } // --- Context Definition --- @@ -50,27 +54,6 @@ const SessionStatsContext = createContext<SessionStatsContextValue | undefined>( undefined, ); -// --- Helper Functions --- - -/** - * A small, reusable helper function to sum token counts. - * It unconditionally adds all token values from the source to the target. - * @param target The object to add the tokens to (e.g., cumulative, currentTurn). - * @param source The metadata object from the API response. - */ -const addTokens = ( - target: CumulativeStats, - source: GenerateContentResponseUsageMetadata & { apiTimeMs?: number }, -) => { - target.candidatesTokenCount += source.candidatesTokenCount ?? 0; - target.thoughtsTokenCount += source.thoughtsTokenCount ?? 0; - target.totalTokenCount += source.totalTokenCount ?? 0; - target.apiTimeMs += source.apiTimeMs ?? 0; - target.promptTokenCount += source.promptTokenCount ?? 0; - target.cachedContentTokenCount += source.cachedContentTokenCount ?? 0; - target.toolUsePromptTokenCount += source.toolUsePromptTokenCount ?? 0; -}; - // --- Provider Component --- export const SessionStatsProvider: React.FC<{ children: React.ReactNode }> = ({ @@ -78,110 +61,42 @@ export const SessionStatsProvider: React.FC<{ children: React.ReactNode }> = ({ }) => { const [stats, setStats] = useState<SessionStatsState>({ sessionStartTime: new Date(), - cumulative: { - turnCount: 0, - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - apiTimeMs: 0, - }, - currentTurn: { - turnCount: 0, - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - apiTimeMs: 0, - }, - currentResponse: { - turnCount: 0, - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - apiTimeMs: 0, - }, + metrics: uiTelemetryService.getMetrics(), + lastPromptTokenCount: 0, }); - // A single, internal worker function to handle all metadata aggregation. - const aggregateTokens = useCallback( - ( - metadata: GenerateContentResponseUsageMetadata & { apiTimeMs?: number }, - ) => { - setStats((prevState) => { - const newCumulative = { ...prevState.cumulative }; - const newCurrentTurn = { ...prevState.currentTurn }; - const newCurrentResponse = { - turnCount: 0, - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - apiTimeMs: 0, - }; + useEffect(() => { + const handleUpdate = ({ + metrics, + lastPromptTokenCount, + }: { + metrics: SessionMetrics; + lastPromptTokenCount: number; + }) => { + setStats((prevState) => ({ + ...prevState, + metrics, + lastPromptTokenCount, + })); + }; - // Add all tokens to the current turn's stats as well as cumulative stats. - addTokens(newCurrentTurn, metadata); - addTokens(newCumulative, metadata); - addTokens(newCurrentResponse, metadata); - - return { - ...prevState, - cumulative: newCumulative, - currentTurn: newCurrentTurn, - currentResponse: newCurrentResponse, - }; - }); - }, - [], - ); + uiTelemetryService.on('update', handleUpdate); + // Set initial state + handleUpdate({ + metrics: uiTelemetryService.getMetrics(), + lastPromptTokenCount: uiTelemetryService.getLastPromptTokenCount(), + }); - const startNewTurn = useCallback(() => { - setStats((prevState) => ({ - ...prevState, - cumulative: { - ...prevState.cumulative, - turnCount: prevState.cumulative.turnCount + 1, - }, - currentTurn: { - turnCount: 0, // Reset for the new turn's accumulation. - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - apiTimeMs: 0, - }, - currentResponse: { - turnCount: 0, - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - apiTimeMs: 0, - }, - })); + return () => { + uiTelemetryService.off('update', handleUpdate); + }; }, []); const value = useMemo( () => ({ stats, - startNewTurn, - addUsage: aggregateTokens, }), - [stats, startNewTurn, aggregateTokens], + [stats], ); return ( |
