summaryrefslogtreecommitdiff
path: root/packages/core/src/telemetry/uiTelemetry.test.ts
diff options
context:
space:
mode:
Diffstat (limited to 'packages/core/src/telemetry/uiTelemetry.test.ts')
-rw-r--r--packages/core/src/telemetry/uiTelemetry.test.ts112
1 files changed, 112 insertions, 0 deletions
diff --git a/packages/core/src/telemetry/uiTelemetry.test.ts b/packages/core/src/telemetry/uiTelemetry.test.ts
index 34a2fe22..38ba7a91 100644
--- a/packages/core/src/telemetry/uiTelemetry.test.ts
+++ b/packages/core/src/telemetry/uiTelemetry.test.ts
@@ -508,4 +508,116 @@ describe('UiTelemetryService', () => {
expect(tools.byName['tool_B'].count).toBe(1);
});
});
+
+ describe('resetLastPromptTokenCount', () => {
+ it('should reset the last prompt token count to 0', () => {
+ // First, set up some initial token count
+ const event = {
+ 'event.name': EVENT_API_RESPONSE,
+ model: 'gemini-2.5-pro',
+ duration_ms: 500,
+ input_token_count: 100,
+ output_token_count: 200,
+ total_token_count: 300,
+ cached_content_token_count: 50,
+ thoughts_token_count: 20,
+ tool_token_count: 30,
+ } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
+
+ service.addEvent(event);
+ expect(service.getLastPromptTokenCount()).toBe(100);
+
+ // Now reset the token count
+ service.resetLastPromptTokenCount();
+ expect(service.getLastPromptTokenCount()).toBe(0);
+ });
+
+ it('should emit an update event when resetLastPromptTokenCount is called', () => {
+ const spy = vi.fn();
+ service.on('update', spy);
+
+ // Set up initial token count
+ const event = {
+ 'event.name': EVENT_API_RESPONSE,
+ model: 'gemini-2.5-pro',
+ duration_ms: 500,
+ input_token_count: 100,
+ output_token_count: 200,
+ total_token_count: 300,
+ cached_content_token_count: 50,
+ thoughts_token_count: 20,
+ tool_token_count: 30,
+ } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
+
+ service.addEvent(event);
+ spy.mockClear(); // Clear the spy to focus on the reset call
+
+ service.resetLastPromptTokenCount();
+
+ expect(spy).toHaveBeenCalledOnce();
+ const { metrics, lastPromptTokenCount } = spy.mock.calls[0][0];
+ expect(metrics).toBeDefined();
+ expect(lastPromptTokenCount).toBe(0);
+ });
+
+ it('should not affect other metrics when resetLastPromptTokenCount is called', () => {
+ // Set up initial state with some metrics
+ const event = {
+ 'event.name': EVENT_API_RESPONSE,
+ model: 'gemini-2.5-pro',
+ duration_ms: 500,
+ input_token_count: 100,
+ output_token_count: 200,
+ total_token_count: 300,
+ cached_content_token_count: 50,
+ thoughts_token_count: 20,
+ tool_token_count: 30,
+ } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
+
+ service.addEvent(event);
+
+ const metricsBefore = service.getMetrics();
+
+ service.resetLastPromptTokenCount();
+
+ const metricsAfter = service.getMetrics();
+
+ // Metrics should be unchanged
+ expect(metricsAfter).toEqual(metricsBefore);
+
+ // Only the last prompt token count should be reset
+ expect(service.getLastPromptTokenCount()).toBe(0);
+ });
+
+ it('should work correctly when called multiple times', () => {
+ const spy = vi.fn();
+ service.on('update', spy);
+
+ // Set up initial token count
+ const event = {
+ 'event.name': EVENT_API_RESPONSE,
+ model: 'gemini-2.5-pro',
+ duration_ms: 500,
+ input_token_count: 100,
+ output_token_count: 200,
+ total_token_count: 300,
+ cached_content_token_count: 50,
+ thoughts_token_count: 20,
+ tool_token_count: 30,
+ } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE };
+
+ service.addEvent(event);
+ expect(service.getLastPromptTokenCount()).toBe(100);
+
+ // Reset once
+ service.resetLastPromptTokenCount();
+ expect(service.getLastPromptTokenCount()).toBe(0);
+
+ // Reset again - should still be 0 and still emit event
+ spy.mockClear();
+ service.resetLastPromptTokenCount();
+ expect(service.getLastPromptTokenCount()).toBe(0);
+ expect(spy).toHaveBeenCalledOnce();
+ });
+ });
});