From 81ba61df7f967f141cd8abc57d58a3612dfd4c2b Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Thu, 17 Apr 2025 12:03:02 -0700 Subject: Improve readability issues This is only the first change of many changes. * Remove redundant autogenerated comments * Use the recommended file name style * Use camelCase for variable names * Don't introduce submodules for relevant types * Don't introduce constants like modules, these are implementation details * Remove empty files --- packages/cli/src/core/GeminiClient.ts | 383 ------------------------- packages/cli/src/core/GeminiStream.ts | 22 -- packages/cli/src/core/StreamingState.ts | 4 - packages/cli/src/core/agent.ts | 0 packages/cli/src/core/constants.ts | 1 - packages/cli/src/core/gemini-client.ts | 383 +++++++++++++++++++++++++ packages/cli/src/core/gemini-stream.ts | 167 +++++++++++ packages/cli/src/core/geminiStreamProcessor.ts | 142 --------- packages/cli/src/core/history-updater.ts | 168 +++++++++++ packages/cli/src/core/historyUpdater.ts | 173 ----------- packages/cli/src/core/prompts.ts | 3 +- 11 files changed, 720 insertions(+), 726 deletions(-) delete mode 100644 packages/cli/src/core/GeminiClient.ts delete mode 100644 packages/cli/src/core/GeminiStream.ts delete mode 100644 packages/cli/src/core/StreamingState.ts delete mode 100644 packages/cli/src/core/agent.ts delete mode 100644 packages/cli/src/core/constants.ts create mode 100644 packages/cli/src/core/gemini-client.ts create mode 100644 packages/cli/src/core/gemini-stream.ts delete mode 100644 packages/cli/src/core/geminiStreamProcessor.ts create mode 100644 packages/cli/src/core/history-updater.ts delete mode 100644 packages/cli/src/core/historyUpdater.ts (limited to 'packages/cli/src/core') diff --git a/packages/cli/src/core/GeminiClient.ts b/packages/cli/src/core/GeminiClient.ts deleted file mode 100644 index 0cdeed86..00000000 --- a/packages/cli/src/core/GeminiClient.ts +++ /dev/null @@ -1,383 +0,0 @@ -import { - GenerateContentConfig, GoogleGenAI, Part, Chat, - Type, - SchemaUnion, - PartListUnion, - Content -} from '@google/genai'; -import { getApiKey } from '../config/env.js'; -import { CoreSystemPrompt } from './prompts.js'; -import { type ToolCallEvent, type ToolCallConfirmationDetails, ToolCallStatus } from '../ui/types.js'; -import process from 'node:process'; -import { toolRegistry } from '../tools/tool-registry.js'; -import { ToolResult } from '../tools/ToolResult.js'; -import { getFolderStructure } from '../utils/getFolderStructure.js'; -import { GeminiEventType, GeminiStream } from './GeminiStream.js'; - -type ToolExecutionOutcome = { - callId: string; - name: string; - args: Record; - result?: ToolResult; - error?: any; - confirmationDetails?: ToolCallConfirmationDetails; -}; - -export class GeminiClient { - private ai: GoogleGenAI; - private defaultHyperParameters: GenerateContentConfig = { - temperature: 0, - topP: 1, - }; - private readonly MAX_TURNS = 100; - - constructor() { - const apiKey = getApiKey(); - this.ai = new GoogleGenAI({ apiKey }); - } - - public async startChat(): Promise { - const tools = toolRegistry.getToolSchemas(); - - // --- Get environmental information --- - const cwd = process.cwd(); - const today = new Date().toLocaleDateString(undefined, { // Use locale-aware date formatting - weekday: 'long', year: 'numeric', month: 'long', day: 'numeric' - }); - const platform = process.platform; - - // --- Format information into a conversational multi-line string --- - const folderStructure = await getFolderStructure(cwd); - // --- End folder structure formatting ---) - const initialContextText = ` -Okay, just setting up the context for our chat. -Today is ${today}. -My operating system is: ${platform} -I'm currently working in the directory: ${cwd} -${folderStructure} - `.trim(); - - const initialContextPart: Part = { text: initialContextText }; - // --- End environmental information formatting --- - - try { - const chat = this.ai.chats.create({ - model: 'gemini-2.5-pro-preview-03-25',//'gemini-2.0-flash', - config: { - systemInstruction: CoreSystemPrompt, - ...this.defaultHyperParameters, - tools, - }, - history: [ - // --- Add the context as a single part in the initial user message --- - { - role: "user", - parts: [initialContextPart] // Pass the single Part object in an array - }, - // --- Add an empty model response to balance the history --- - { - role: "model", - parts: [{ text: "Got it. Thanks for the context!" }] // A slightly more conversational model response - } - // --- End history modification --- - ], - }); - return chat; - } catch (error) { - console.error("Error initializing Gemini chat session:", error); - const message = error instanceof Error ? error.message : "Unknown error."; - throw new Error(`Failed to initialize chat: ${message}`); - } - } - - public addMessageToHistory(chat: Chat, message: Content): void { - const history = chat.getHistory(); - history.push(message); - this.ai.chats - chat - } - - public async* sendMessageStream( - chat: Chat, - request: PartListUnion, - signal?: AbortSignal - ): GeminiStream { - let currentMessageToSend: PartListUnion = request; - let turns = 0; - - try { - while (turns < this.MAX_TURNS) { - turns++; - const resultStream = await chat.sendMessageStream({ message: currentMessageToSend }); - let functionResponseParts: Part[] = []; - let pendingToolCalls: Array<{ callId: string; name: string; args: Record }> = []; - let yieldedTextInTurn = false; - const chunksForDebug = []; - - for await (const chunk of resultStream) { - chunksForDebug.push(chunk); - if (signal?.aborted) { - const abortError = new Error("Request cancelled by user during stream."); - abortError.name = 'AbortError'; - throw abortError; - } - - const functionCalls = chunk.functionCalls; - if (functionCalls && functionCalls.length > 0) { - for (const call of functionCalls) { - const callId = call.id ?? `${call.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`; - const name = call.name || 'undefined_tool_name'; - const args = (call.args || {}) as Record; - - pendingToolCalls.push({ callId, name, args }); - const evtValue: ToolCallEvent = { - type: 'tool_call', - status: ToolCallStatus.Pending, - callId, - name, - args, - resultDisplay: undefined, - confirmationDetails: undefined, - } - yield { - type: GeminiEventType.ToolCallInfo, - value: evtValue, - }; - } - } else { - const text = chunk.text; - if (text) { - yieldedTextInTurn = true; - yield { - type: GeminiEventType.Content, - value: text, - }; - } - } - } - - if (pendingToolCalls.length > 0) { - const toolPromises: Promise[] = pendingToolCalls.map(async pendingToolCall => { - const tool = toolRegistry.getTool(pendingToolCall.name); - - if (!tool) { - // Directly return error outcome if tool not found - return { ...pendingToolCall, error: new Error(`Tool "${pendingToolCall.name}" not found or is not registered.`) }; - } - - try { - const confirmation = await tool.shouldConfirmExecute(pendingToolCall.args); - if (confirmation) { - return { ...pendingToolCall, confirmationDetails: confirmation }; - } - } catch (error) { - return { ...pendingToolCall, error: new Error(`Tool failed to check tool confirmation: ${error}`) }; - } - - try { - const result = await tool.execute(pendingToolCall.args); - return { ...pendingToolCall, result }; - } catch (error) { - return { ...pendingToolCall, error: new Error(`Tool failed to execute: ${error}`) }; - } - }); - const toolExecutionOutcomes: ToolExecutionOutcome[] = await Promise.all(toolPromises); - - for (const executedTool of toolExecutionOutcomes) { - const { callId, name, args, result, error, confirmationDetails } = executedTool; - - if (error) { - const errorMessage = error?.message || String(error); - yield { - type: GeminiEventType.Content, - value: `[Error invoking tool ${name}: ${errorMessage}]`, - }; - } else if (result && typeof result === 'object' && result !== null && 'error' in result) { - const errorMessage = String(result.error); - yield { - type: GeminiEventType.Content, - value: `[Error executing tool ${name}: ${errorMessage}]`, - }; - } else { - const status = confirmationDetails ? ToolCallStatus.Confirming : ToolCallStatus.Invoked; - const evtValue: ToolCallEvent = { type: 'tool_call', status, callId, name, args, resultDisplay: result?.returnDisplay, confirmationDetails } - yield { - type: GeminiEventType.ToolCallInfo, - value: evtValue, - }; - } - } - - pendingToolCalls = []; - - const waitingOnConfirmations = toolExecutionOutcomes.filter(outcome => outcome.confirmationDetails).length > 0; - if (waitingOnConfirmations) { - // Stop processing content, wait for user. - // TODO: Kill token processing once API supports signals. - break; - } - - functionResponseParts = toolExecutionOutcomes.map((executedTool: ToolExecutionOutcome): Part => { - const { name, result, error } = executedTool; - const output = { "output": result?.llmContent }; - let toolOutcomePayload: any; - - if (error) { - const errorMessage = error?.message || String(error); - toolOutcomePayload = { error: `Invocation failed: ${errorMessage}` }; - console.error(`[Turn ${turns}] Critical error invoking tool ${name}:`, error); - } else if (result && typeof result === 'object' && result !== null && 'error' in result) { - toolOutcomePayload = output; - console.warn(`[Turn ${turns}] Tool ${name} returned an error structure:`, result.error); - } else { - toolOutcomePayload = output; - } - - return { - functionResponse: { - name: name, - id: executedTool.callId, - response: toolOutcomePayload, - }, - }; - }); - currentMessageToSend = functionResponseParts; - } else if (yieldedTextInTurn) { - const history = chat.getHistory(); - const checkPrompt = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you). - -**Decision Rules (apply in order):** - -1. **Model Continues:** If your last response explicitly states an immediate next action *you* intend to take (e.g., "Next, I will...", "Now I'll process...", "Moving on to analyze...", indicates an intended tool call that didn't execute), OR if the response seems clearly incomplete (cut off mid-thought without a natural conclusion), then the **'model'** should speak next. -2. **Question to User:** If your last response ends with a direct question specifically addressed *to the user*, then the **'user'** should speak next. -3. **Waiting for User:** If your last response completed a thought, statement, or task *and* does not meet the criteria for Rule 1 (Model Continues) or Rule 2 (Question to User), it implies a pause expecting user input or reaction. In this case, the **'user'** should speak next. - -**Output Format:** - -Respond *only* in JSON format according to the following schema. Do not include any text outside the JSON structure. - -\`\`\`json -{ - "type": "object", - "properties": { - "reasoning": { - "type": "string", - "description": "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn." - }, - "next_speaker": { - "type": "string", - "enum": ["user", "model"], - "description": "Who should speak next based *only* on the preceding turn and the decision rules." - } - }, - "required": ["next_speaker", "reasoning"] -\`\`\` -}`; - - // Schema Idea - const responseSchema: SchemaUnion = { - type: Type.OBJECT, - properties: { - reasoning: { - type: Type.STRING, - description: "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn." - }, - next_speaker: { - type: Type.STRING, - enum: ['user', 'model'], // Enforce the choices - description: "Who should speak next based *only* on the preceding turn and the decision rules", - }, - }, - required: ['reasoning', 'next_speaker'] - }; - - try { - // Use the new generateJson method, passing the history and the check prompt - const parsedResponse = await this.generateJson([...history, { role: "user", parts: [{ text: checkPrompt }] }], responseSchema); - - // Safely extract the next speaker value - const nextSpeaker: string | undefined = typeof parsedResponse?.next_speaker === 'string' ? parsedResponse.next_speaker : undefined; - - if (nextSpeaker === 'model') { - currentMessageToSend = { text: 'alright' }; // Or potentially a more meaningful continuation prompt - } else { - // 'user' should speak next, or value is missing/invalid. End the turn. - break; - } - - } catch (error) { - console.error(`[Turn ${turns}] Failed to get or parse next speaker check:`, error); - // If the check fails, assume user should speak next to avoid infinite loops - break; - } - } else { - console.warn(`[Turn ${turns}] No text or function calls received from Gemini. Ending interaction.`); - break; - } - - } - - if (turns >= this.MAX_TURNS) { - console.warn("sendMessageStream: Reached maximum tool call turns limit."); - yield { - type: GeminiEventType.Content, - value: "\n\n[System Notice: Maximum interaction turns reached. The conversation may be incomplete.]", - }; - } - - } catch (error: unknown) { - if (error instanceof Error && error.name === 'AbortError') { - console.log("Gemini stream request aborted by user."); - throw error; - } else { - console.error(`Error during Gemini stream or tool interaction:`, error); - const message = error instanceof Error ? error.message : String(error); - yield { - type: GeminiEventType.Content, - value: `\n\n[Error: An unexpected error occurred during the chat: ${message}]`, - }; - throw error; - } - } - } - - /** - * Generates structured JSON content based on conversational history and a schema. - * @param contents The conversational history (Content array) to provide context. - * @param schema The SchemaUnion defining the desired JSON structure. - * @returns A promise that resolves to the parsed JSON object matching the schema. - * @throws Throws an error if the API call fails or the response is not valid JSON. - */ - public async generateJson(contents: Content[], schema: SchemaUnion): Promise { - try { - const result = await this.ai.models.generateContent({ - model: 'gemini-2.0-flash', // Using flash for potentially faster structured output - config: { - ...this.defaultHyperParameters, - systemInstruction: CoreSystemPrompt, - responseSchema: schema, - responseMimeType: 'application/json', - }, - contents: contents, // Pass the full Content array - }); - - const responseText = result.text; - if (!responseText) { - throw new Error("API returned an empty response."); - } - - try { - const parsedJson = JSON.parse(responseText); - // TODO: Add schema validation if needed - return parsedJson; - } catch (parseError) { - console.error("Failed to parse JSON response:", responseText); - throw new Error(`Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`); - } - } catch (error) { - console.error("Error generating JSON content:", error); - const message = error instanceof Error ? error.message : "Unknown API error."; - throw new Error(`Failed to generate JSON content: ${message}`); - } - } -} diff --git a/packages/cli/src/core/GeminiStream.ts b/packages/cli/src/core/GeminiStream.ts deleted file mode 100644 index 28568306..00000000 --- a/packages/cli/src/core/GeminiStream.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { ToolCallEvent } from "../ui/types.js"; - -export enum GeminiEventType { - Content, - ToolCallInfo, -} - -export interface GeminiContentEvent { - type: GeminiEventType.Content; - value: string; -} - -export interface GeminiToolCallInfoEvent { - type: GeminiEventType.ToolCallInfo; - value: ToolCallEvent; -} - -export type GeminiEvent = - | GeminiContentEvent - | GeminiToolCallInfoEvent; - -export type GeminiStream = AsyncIterable; diff --git a/packages/cli/src/core/StreamingState.ts b/packages/cli/src/core/StreamingState.ts deleted file mode 100644 index 5aed1ff0..00000000 --- a/packages/cli/src/core/StreamingState.ts +++ /dev/null @@ -1,4 +0,0 @@ -export enum StreamingState { - Idle, - Responding, -} \ No newline at end of file diff --git a/packages/cli/src/core/agent.ts b/packages/cli/src/core/agent.ts deleted file mode 100644 index e69de29b..00000000 diff --git a/packages/cli/src/core/constants.ts b/packages/cli/src/core/constants.ts deleted file mode 100644 index 16ac74d1..00000000 --- a/packages/cli/src/core/constants.ts +++ /dev/null @@ -1 +0,0 @@ -export const MEMORY_FILE_NAME = 'GEMINI.md'; \ No newline at end of file diff --git a/packages/cli/src/core/gemini-client.ts b/packages/cli/src/core/gemini-client.ts new file mode 100644 index 00000000..e41fda6f --- /dev/null +++ b/packages/cli/src/core/gemini-client.ts @@ -0,0 +1,383 @@ +import { + GenerateContentConfig, GoogleGenAI, Part, Chat, + Type, + SchemaUnion, + PartListUnion, + Content +} from '@google/genai'; +import { getApiKey } from '../config/env.js'; +import { CoreSystemPrompt } from './prompts.js'; +import { type ToolCallEvent, type ToolCallConfirmationDetails, ToolCallStatus } from '../ui/types.js'; +import process from 'node:process'; +import { toolRegistry } from '../tools/tool-registry.js'; +import { ToolResult } from '../tools/tool.js'; +import { getFolderStructure } from '../utils/getFolderStructure.js'; +import { GeminiEventType, GeminiStream } from './gemini-stream.js'; + +type ToolExecutionOutcome = { + callId: string; + name: string; + args: Record; + result?: ToolResult; + error?: any; + confirmationDetails?: ToolCallConfirmationDetails; +}; + +export class GeminiClient { + private ai: GoogleGenAI; + private defaultHyperParameters: GenerateContentConfig = { + temperature: 0, + topP: 1, + }; + private readonly MAX_TURNS = 100; + + constructor() { + const apiKey = getApiKey(); + this.ai = new GoogleGenAI({ apiKey }); + } + + public async startChat(): Promise { + const tools = toolRegistry.getToolSchemas(); + + // --- Get environmental information --- + const cwd = process.cwd(); + const today = new Date().toLocaleDateString(undefined, { // Use locale-aware date formatting + weekday: 'long', year: 'numeric', month: 'long', day: 'numeric' + }); + const platform = process.platform; + + // --- Format information into a conversational multi-line string --- + const folderStructure = await getFolderStructure(cwd); + // --- End folder structure formatting ---) + const initialContextText = ` +Okay, just setting up the context for our chat. +Today is ${today}. +My operating system is: ${platform} +I'm currently working in the directory: ${cwd} +${folderStructure} + `.trim(); + + const initialContextPart: Part = { text: initialContextText }; + // --- End environmental information formatting --- + + try { + const chat = this.ai.chats.create({ + model: 'gemini-2.0-flash',//'gemini-2.0-flash', + config: { + systemInstruction: CoreSystemPrompt, + ...this.defaultHyperParameters, + tools, + }, + history: [ + // --- Add the context as a single part in the initial user message --- + { + role: "user", + parts: [initialContextPart] // Pass the single Part object in an array + }, + // --- Add an empty model response to balance the history --- + { + role: "model", + parts: [{ text: "Got it. Thanks for the context!" }] // A slightly more conversational model response + } + // --- End history modification --- + ], + }); + return chat; + } catch (error) { + console.error("Error initializing Gemini chat session:", error); + const message = error instanceof Error ? error.message : "Unknown error."; + throw new Error(`Failed to initialize chat: ${message}`); + } + } + + public addMessageToHistory(chat: Chat, message: Content): void { + const history = chat.getHistory(); + history.push(message); + this.ai.chats + chat + } + + public async* sendMessageStream( + chat: Chat, + request: PartListUnion, + signal?: AbortSignal + ): GeminiStream { + let currentMessageToSend: PartListUnion = request; + let turns = 0; + + try { + while (turns < this.MAX_TURNS) { + turns++; + const resultStream = await chat.sendMessageStream({ message: currentMessageToSend }); + let functionResponseParts: Part[] = []; + let pendingToolCalls: Array<{ callId: string; name: string; args: Record }> = []; + let yieldedTextInTurn = false; + const chunksForDebug = []; + + for await (const chunk of resultStream) { + chunksForDebug.push(chunk); + if (signal?.aborted) { + const abortError = new Error("Request cancelled by user during stream."); + abortError.name = 'AbortError'; + throw abortError; + } + + const functionCalls = chunk.functionCalls; + if (functionCalls && functionCalls.length > 0) { + for (const call of functionCalls) { + const callId = call.id ?? `${call.name}-${Date.now()}-${Math.random().toString(16).slice(2)}`; + const name = call.name || 'undefined_tool_name'; + const args = (call.args || {}) as Record; + + pendingToolCalls.push({ callId, name, args }); + const evtValue: ToolCallEvent = { + type: 'tool_call', + status: ToolCallStatus.Pending, + callId, + name, + args, + resultDisplay: undefined, + confirmationDetails: undefined, + } + yield { + type: GeminiEventType.ToolCallInfo, + value: evtValue, + }; + } + } else { + const text = chunk.text; + if (text) { + yieldedTextInTurn = true; + yield { + type: GeminiEventType.Content, + value: text, + }; + } + } + } + + if (pendingToolCalls.length > 0) { + const toolPromises: Promise[] = pendingToolCalls.map(async pendingToolCall => { + const tool = toolRegistry.getTool(pendingToolCall.name); + + if (!tool) { + // Directly return error outcome if tool not found + return { ...pendingToolCall, error: new Error(`Tool "${pendingToolCall.name}" not found or is not registered.`) }; + } + + try { + const confirmation = await tool.shouldConfirmExecute(pendingToolCall.args); + if (confirmation) { + return { ...pendingToolCall, confirmationDetails: confirmation }; + } + } catch (error) { + return { ...pendingToolCall, error: new Error(`Tool failed to check tool confirmation: ${error}`) }; + } + + try { + const result = await tool.execute(pendingToolCall.args); + return { ...pendingToolCall, result }; + } catch (error) { + return { ...pendingToolCall, error: new Error(`Tool failed to execute: ${error}`) }; + } + }); + const toolExecutionOutcomes: ToolExecutionOutcome[] = await Promise.all(toolPromises); + + for (const executedTool of toolExecutionOutcomes) { + const { callId, name, args, result, error, confirmationDetails } = executedTool; + + if (error) { + const errorMessage = error?.message || String(error); + yield { + type: GeminiEventType.Content, + value: `[Error invoking tool ${name}: ${errorMessage}]`, + }; + } else if (result && typeof result === 'object' && result !== null && 'error' in result) { + const errorMessage = String(result.error); + yield { + type: GeminiEventType.Content, + value: `[Error executing tool ${name}: ${errorMessage}]`, + }; + } else { + const status = confirmationDetails ? ToolCallStatus.Confirming : ToolCallStatus.Invoked; + const evtValue: ToolCallEvent = { type: 'tool_call', status, callId, name, args, resultDisplay: result?.returnDisplay, confirmationDetails } + yield { + type: GeminiEventType.ToolCallInfo, + value: evtValue, + }; + } + } + + pendingToolCalls = []; + + const waitingOnConfirmations = toolExecutionOutcomes.filter(outcome => outcome.confirmationDetails).length > 0; + if (waitingOnConfirmations) { + // Stop processing content, wait for user. + // TODO: Kill token processing once API supports signals. + break; + } + + functionResponseParts = toolExecutionOutcomes.map((executedTool: ToolExecutionOutcome): Part => { + const { name, result, error } = executedTool; + const output = { "output": result?.llmContent }; + let toolOutcomePayload: any; + + if (error) { + const errorMessage = error?.message || String(error); + toolOutcomePayload = { error: `Invocation failed: ${errorMessage}` }; + console.error(`[Turn ${turns}] Critical error invoking tool ${name}:`, error); + } else if (result && typeof result === 'object' && result !== null && 'error' in result) { + toolOutcomePayload = output; + console.warn(`[Turn ${turns}] Tool ${name} returned an error structure:`, result.error); + } else { + toolOutcomePayload = output; + } + + return { + functionResponse: { + name: name, + id: executedTool.callId, + response: toolOutcomePayload, + }, + }; + }); + currentMessageToSend = functionResponseParts; + } else if (yieldedTextInTurn) { + const history = chat.getHistory(); + const checkPrompt = `Analyze *only* the content and structure of your immediately preceding response (your last turn in the conversation history). Based *strictly* on that response, determine who should logically speak next: the 'user' or the 'model' (you). + +**Decision Rules (apply in order):** + +1. **Model Continues:** If your last response explicitly states an immediate next action *you* intend to take (e.g., "Next, I will...", "Now I'll process...", "Moving on to analyze...", indicates an intended tool call that didn't execute), OR if the response seems clearly incomplete (cut off mid-thought without a natural conclusion), then the **'model'** should speak next. +2. **Question to User:** If your last response ends with a direct question specifically addressed *to the user*, then the **'user'** should speak next. +3. **Waiting for User:** If your last response completed a thought, statement, or task *and* does not meet the criteria for Rule 1 (Model Continues) or Rule 2 (Question to User), it implies a pause expecting user input or reaction. In this case, the **'user'** should speak next. + +**Output Format:** + +Respond *only* in JSON format according to the following schema. Do not include any text outside the JSON structure. + +\`\`\`json +{ + "type": "object", + "properties": { + "reasoning": { + "type": "string", + "description": "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn." + }, + "next_speaker": { + "type": "string", + "enum": ["user", "model"], + "description": "Who should speak next based *only* on the preceding turn and the decision rules." + } + }, + "required": ["next_speaker", "reasoning"] +\`\`\` +}`; + + // Schema Idea + const responseSchema: SchemaUnion = { + type: Type.OBJECT, + properties: { + reasoning: { + type: Type.STRING, + description: "Brief explanation justifying the 'next_speaker' choice based *strictly* on the applicable rule and the content/structure of the preceding turn." + }, + next_speaker: { + type: Type.STRING, + enum: ['user', 'model'], // Enforce the choices + description: "Who should speak next based *only* on the preceding turn and the decision rules", + }, + }, + required: ['reasoning', 'next_speaker'] + }; + + try { + // Use the new generateJson method, passing the history and the check prompt + const parsedResponse = await this.generateJson([...history, { role: "user", parts: [{ text: checkPrompt }] }], responseSchema); + + // Safely extract the next speaker value + const nextSpeaker: string | undefined = typeof parsedResponse?.next_speaker === 'string' ? parsedResponse.next_speaker : undefined; + + if (nextSpeaker === 'model') { + currentMessageToSend = { text: 'alright' }; // Or potentially a more meaningful continuation prompt + } else { + // 'user' should speak next, or value is missing/invalid. End the turn. + break; + } + + } catch (error) { + console.error(`[Turn ${turns}] Failed to get or parse next speaker check:`, error); + // If the check fails, assume user should speak next to avoid infinite loops + break; + } + } else { + console.warn(`[Turn ${turns}] No text or function calls received from Gemini. Ending interaction.`); + break; + } + + } + + if (turns >= this.MAX_TURNS) { + console.warn("sendMessageStream: Reached maximum tool call turns limit."); + yield { + type: GeminiEventType.Content, + value: "\n\n[System Notice: Maximum interaction turns reached. The conversation may be incomplete.]", + }; + } + + } catch (error: unknown) { + if (error instanceof Error && error.name === 'AbortError') { + console.log("Gemini stream request aborted by user."); + throw error; + } else { + console.error(`Error during Gemini stream or tool interaction:`, error); + const message = error instanceof Error ? error.message : String(error); + yield { + type: GeminiEventType.Content, + value: `\n\n[Error: An unexpected error occurred during the chat: ${message}]`, + }; + throw error; + } + } + } + + /** + * Generates structured JSON content based on conversational history and a schema. + * @param contents The conversational history (Content array) to provide context. + * @param schema The SchemaUnion defining the desired JSON structure. + * @returns A promise that resolves to the parsed JSON object matching the schema. + * @throws Throws an error if the API call fails or the response is not valid JSON. + */ + public async generateJson(contents: Content[], schema: SchemaUnion): Promise { + try { + const result = await this.ai.models.generateContent({ + model: 'gemini-2.0-flash', // Using flash for potentially faster structured output + config: { + ...this.defaultHyperParameters, + systemInstruction: CoreSystemPrompt, + responseSchema: schema, + responseMimeType: 'application/json', + }, + contents: contents, // Pass the full Content array + }); + + const responseText = result.text; + if (!responseText) { + throw new Error("API returned an empty response."); + } + + try { + const parsedJson = JSON.parse(responseText); + // TODO: Add schema validation if needed + return parsedJson; + } catch (parseError) { + console.error("Failed to parse JSON response:", responseText); + throw new Error(`Failed to parse API response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`); + } + } catch (error) { + console.error("Error generating JSON content:", error); + const message = error instanceof Error ? error.message : "Unknown API error."; + throw new Error(`Failed to generate JSON content: ${message}`); + } + } +} diff --git a/packages/cli/src/core/gemini-stream.ts b/packages/cli/src/core/gemini-stream.ts new file mode 100644 index 00000000..b47eb1c6 --- /dev/null +++ b/packages/cli/src/core/gemini-stream.ts @@ -0,0 +1,167 @@ +import { ToolCallEvent } from "../ui/types.js"; +import { Part } from '@google/genai'; +import { HistoryItem } from '../ui/types.js'; +import { handleToolCallChunk, addErrorMessageToHistory } from './history-updater.js'; + +export enum GeminiEventType { + Content, + ToolCallInfo, +} + +export interface GeminiContentEvent { + type: GeminiEventType.Content; + value: string; +} + +export interface GeminiToolCallInfoEvent { + type: GeminiEventType.ToolCallInfo; + value: ToolCallEvent; +} + +export type GeminiEvent = + | GeminiContentEvent + | GeminiToolCallInfoEvent; + +export type GeminiStream = AsyncIterable; + +export enum StreamingState { + Idle, + Responding, +} + +interface StreamProcessorParams { + stream: GeminiStream; + signal: AbortSignal; + setHistory: React.Dispatch>; + submitQuery: (query: Part) => Promise, + getNextMessageId: () => number; + addHistoryItem: (itemData: Omit, id: number) => void; + currentToolGroupIdRef: React.MutableRefObject; +} + +/** + * Processes the Gemini stream, managing text buffering, adaptive rendering, + * and delegating history updates for tool calls and errors. + */ +export const processGeminiStream = async ({ // Renamed function for clarity + stream, + signal, + setHistory, + submitQuery, + getNextMessageId, + addHistoryItem, + currentToolGroupIdRef, +}: StreamProcessorParams): Promise => { + // --- State specific to this stream processing invocation --- + let textBuffer = ''; + let renderTimeoutId: NodeJS.Timeout | null = null; + let isStreamComplete = false; + let currentGeminiMessageId: number | null = null; + + const render = (content: string) => { + if (currentGeminiMessageId === null) { + return; + } + setHistory(prev => prev.map(item => + item.id === currentGeminiMessageId && item.type === 'gemini' + ? { ...item, text: (item.text ?? '') + content } + : item + )); + } + // --- Adaptive Rendering Logic (nested) --- + const renderBufferedText = () => { + if (signal.aborted) { + if (renderTimeoutId) clearTimeout(renderTimeoutId); + renderTimeoutId = null; + return; + } + + const bufferLength = textBuffer.length; + let chunkSize = 0; + let delay = 50; + + if (bufferLength > 150) { + chunkSize = Math.min(bufferLength, 30); delay = 5; + } else if (bufferLength > 30) { + chunkSize = Math.min(bufferLength, 10); delay = 10; + } else if (bufferLength > 0) { + chunkSize = 2; delay = 20; + } + + if (chunkSize > 0) { + const chunkToRender = textBuffer.substring(0, chunkSize); + textBuffer = textBuffer.substring(chunkSize); + render(chunkToRender); + + renderTimeoutId = setTimeout(renderBufferedText, delay); + } else { + renderTimeoutId = null; // Clear timeout ID if nothing to render + if (!isStreamComplete) { + // Buffer empty, but stream might still send data, check again later + renderTimeoutId = setTimeout(renderBufferedText, 50); + } + } + }; + + const scheduleRender = () => { + if (renderTimeoutId === null) { + renderTimeoutId = setTimeout(renderBufferedText, 0); + } + }; + + // --- Stream Processing Loop --- + try { + for await (const chunk of stream) { + if (signal.aborted) break; + + if (chunk.type === GeminiEventType.Content) { + currentToolGroupIdRef.current = null; // Reset tool group on text + + if (currentGeminiMessageId === null) { + currentGeminiMessageId = getNextMessageId(); + addHistoryItem({ type: 'gemini', text: '' }, currentGeminiMessageId); + textBuffer = ''; + } + textBuffer += chunk.value; + scheduleRender(); + + } else if (chunk.type === GeminiEventType.ToolCallInfo) { + if (renderTimeoutId) { // Stop rendering loop + clearTimeout(renderTimeoutId); + renderTimeoutId = null; + } + // Flush any text buffer content. + render(textBuffer); + currentGeminiMessageId = null; // End text message context + textBuffer = ''; // Clear buffer + + // Delegate history update for tool call + handleToolCallChunk( + chunk.value, + setHistory, + submitQuery, + getNextMessageId, + currentToolGroupIdRef + ); + } + } + if (signal.aborted) { + throw new Error("Request cancelled by user"); + } + } catch (error: any) { + if (renderTimeoutId) { // Ensure render loop stops on error + clearTimeout(renderTimeoutId); + renderTimeoutId = null; + } + // Delegate history update for error message + addErrorMessageToHistory(error, setHistory, getNextMessageId); + } finally { + isStreamComplete = true; // Signal stream end for render loop completion + if (renderTimeoutId) { + clearTimeout(renderTimeoutId); + renderTimeoutId = null; + } + + renderBufferedText(); // Force final render + } +}; \ No newline at end of file diff --git a/packages/cli/src/core/geminiStreamProcessor.ts b/packages/cli/src/core/geminiStreamProcessor.ts deleted file mode 100644 index 12de49cb..00000000 --- a/packages/cli/src/core/geminiStreamProcessor.ts +++ /dev/null @@ -1,142 +0,0 @@ -import { Part } from '@google/genai'; -import { HistoryItem } from '../ui/types.js'; -import { GeminiEventType, GeminiStream } from './GeminiStream.js'; -import { handleToolCallChunk, addErrorMessageToHistory } from './historyUpdater.js'; - -interface StreamProcessorParams { - stream: GeminiStream; - signal: AbortSignal; - setHistory: React.Dispatch>; - submitQuery: (query: Part) => Promise, - getNextMessageId: () => number; - addHistoryItem: (itemData: Omit, id: number) => void; - currentToolGroupIdRef: React.MutableRefObject; -} - -/** - * Processes the Gemini stream, managing text buffering, adaptive rendering, - * and delegating history updates for tool calls and errors. - */ -export const processGeminiStream = async ({ // Renamed function for clarity - stream, - signal, - setHistory, - submitQuery, - getNextMessageId, - addHistoryItem, - currentToolGroupIdRef, -}: StreamProcessorParams): Promise => { - // --- State specific to this stream processing invocation --- - let textBuffer = ''; - let renderTimeoutId: NodeJS.Timeout | null = null; - let isStreamComplete = false; - let currentGeminiMessageId: number | null = null; - - const render = (content: string) => { - if (currentGeminiMessageId === null) { - return; - } - setHistory(prev => prev.map(item => - item.id === currentGeminiMessageId && item.type === 'gemini' - ? { ...item, text: (item.text ?? '') + content } - : item - )); - } - // --- Adaptive Rendering Logic (nested) --- - const renderBufferedText = () => { - if (signal.aborted) { - if (renderTimeoutId) clearTimeout(renderTimeoutId); - renderTimeoutId = null; - return; - } - - const bufferLength = textBuffer.length; - let chunkSize = 0; - let delay = 50; - - if (bufferLength > 150) { - chunkSize = Math.min(bufferLength, 30); delay = 5; - } else if (bufferLength > 30) { - chunkSize = Math.min(bufferLength, 10); delay = 10; - } else if (bufferLength > 0) { - chunkSize = 2; delay = 20; - } - - if (chunkSize > 0) { - const chunkToRender = textBuffer.substring(0, chunkSize); - textBuffer = textBuffer.substring(chunkSize); - render(chunkToRender); - - renderTimeoutId = setTimeout(renderBufferedText, delay); - } else { - renderTimeoutId = null; // Clear timeout ID if nothing to render - if (!isStreamComplete) { - // Buffer empty, but stream might still send data, check again later - renderTimeoutId = setTimeout(renderBufferedText, 50); - } - } - }; - - const scheduleRender = () => { - if (renderTimeoutId === null) { - renderTimeoutId = setTimeout(renderBufferedText, 0); - } - }; - - // --- Stream Processing Loop --- - try { - for await (const chunk of stream) { - if (signal.aborted) break; - - if (chunk.type === GeminiEventType.Content) { - currentToolGroupIdRef.current = null; // Reset tool group on text - - if (currentGeminiMessageId === null) { - currentGeminiMessageId = getNextMessageId(); - addHistoryItem({ type: 'gemini', text: '' }, currentGeminiMessageId); - textBuffer = ''; - } - textBuffer += chunk.value; - scheduleRender(); - - } else if (chunk.type === GeminiEventType.ToolCallInfo) { - if (renderTimeoutId) { // Stop rendering loop - clearTimeout(renderTimeoutId); - renderTimeoutId = null; - } - - // Flush any text buffer content. - render(textBuffer); - currentGeminiMessageId = null; // End text message context - textBuffer = ''; // Clear buffer - - // Delegate history update for tool call - handleToolCallChunk( - chunk.value, - setHistory, - submitQuery, - getNextMessageId, - currentToolGroupIdRef - ); - } - } - if (signal.aborted) { - throw new Error("Request cancelled by user"); - } - } catch (error: any) { - if (renderTimeoutId) { // Ensure render loop stops on error - clearTimeout(renderTimeoutId); - renderTimeoutId = null; - } - // Delegate history update for error message - addErrorMessageToHistory(error, setHistory, getNextMessageId); - } finally { - isStreamComplete = true; // Signal stream end for render loop completion - if (renderTimeoutId) { - clearTimeout(renderTimeoutId); - renderTimeoutId = null; - } - - renderBufferedText(); // Force final render - } -}; \ No newline at end of file diff --git a/packages/cli/src/core/history-updater.ts b/packages/cli/src/core/history-updater.ts new file mode 100644 index 00000000..4013728f --- /dev/null +++ b/packages/cli/src/core/history-updater.ts @@ -0,0 +1,168 @@ +import { Part } from "@google/genai"; +import { toolRegistry } from "../tools/tool-registry.js"; +import { HistoryItem, IndividualToolCallDisplay, ToolCallEvent, ToolCallStatus, ToolConfirmationOutcome, ToolEditConfirmationDetails, ToolExecuteConfirmationDetails } from "../ui/types.js"; +import { ToolResultDisplay } from "../tools/tool.js"; + +/** + * Processes a tool call chunk and updates the history state accordingly. + * Manages adding new tool groups or updating existing ones. + * Resides here as its primary effect is updating history based on tool events. + */ +export const handleToolCallChunk = ( + chunk: ToolCallEvent, + setHistory: React.Dispatch>, + submitQuery: (query: Part) => Promise, + getNextMessageId: () => number, + currentToolGroupIdRef: React.MutableRefObject +): void => { + const toolDefinition = toolRegistry.getTool(chunk.name); + const description = toolDefinition?.getDescription + ? toolDefinition.getDescription(chunk.args) + : ''; + const toolDisplayName = toolDefinition?.displayName ?? chunk.name; + let confirmationDetails = chunk.confirmationDetails; + if (confirmationDetails) { + const originalConfirmationDetails = confirmationDetails; + const historyUpdatingConfirm = async (outcome: ToolConfirmationOutcome) => { + originalConfirmationDetails.onConfirm(outcome); + + if (outcome === ToolConfirmationOutcome.Cancel) { + let resultDisplay: ToolResultDisplay | undefined; + if ('fileDiff' in originalConfirmationDetails) { + resultDisplay = { fileDiff: (originalConfirmationDetails as ToolEditConfirmationDetails).fileDiff }; + } else { + resultDisplay = `~~${(originalConfirmationDetails as ToolExecuteConfirmationDetails).command}~~`; + } + handleToolCallChunk({ ...chunk, status: ToolCallStatus.Canceled, confirmationDetails: undefined, resultDisplay, }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef); + const functionResponse: Part = { + functionResponse: { + name: chunk.name, + response: { "error": "User rejected function call." }, + }, + } + await submitQuery(functionResponse); + } else { + const tool = toolRegistry.getTool(chunk.name) + if (!tool) { + throw new Error(`Tool "${chunk.name}" not found or is not registered.`); + } + handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: "Executing...", confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef); + const result = await tool.execute(chunk.args); + handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: result.returnDisplay, confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef); + const functionResponse: Part = { + functionResponse: { + name: chunk.name, + id: chunk.callId, + response: { "output": result.llmContent }, + }, + } + await submitQuery(functionResponse); + } + } + + confirmationDetails = { + ...originalConfirmationDetails, + onConfirm: historyUpdatingConfirm, + }; + } + const toolDetail: IndividualToolCallDisplay = { + callId: chunk.callId, + name: toolDisplayName, + description, + resultDisplay: chunk.resultDisplay, + status: chunk.status, + confirmationDetails: confirmationDetails, + }; + + const activeGroupId = currentToolGroupIdRef.current; + setHistory(prev => { + if (chunk.status === ToolCallStatus.Pending) { + if (activeGroupId === null) { + // Start a new tool group + const newGroupId = getNextMessageId(); + currentToolGroupIdRef.current = newGroupId; + return [ + ...prev, + { id: newGroupId, type: 'tool_group', tools: [toolDetail] } as HistoryItem + ]; + } + + // Add to existing tool group + return prev.map(item => + item.id === activeGroupId && item.type === 'tool_group' + ? item.tools.some(t => t.callId === toolDetail.callId) + ? item // Tool already listed as pending + : { ...item, tools: [...item.tools, toolDetail] } + : item + ); + } + + // Update the status of a pending tool within the active group + if (activeGroupId === null) { + // Log if an invoked tool arrives without an active group context + console.warn("Received invoked tool status without an active tool group ID:", chunk); + return prev; + } + + return prev.map(item => + item.id === activeGroupId && item.type === 'tool_group' + ? { + ...item, + tools: item.tools.map(t => + t.callId === toolDetail.callId + ? { ...t, ...toolDetail, status: chunk.status } // Update details & status + : t + ) + } + : item + ); + }); +}; + +/** + * Appends an error or informational message to the history, attempting to attach + * it to the last non-user message or creating a new entry. + */ +export const addErrorMessageToHistory = ( + error: any, + setHistory: React.Dispatch>, + getNextMessageId: () => number +): void => { + const isAbort = error.name === 'AbortError'; + const errorType = isAbort ? 'info' : 'error'; + const errorText = isAbort + ? '[Request cancelled by user]' + : `[Error: ${error.message || 'Unknown error'}]`; + + setHistory(prev => { + const reversedHistory = [...prev].reverse(); + // Find the last message that isn't from the user to append the error/info to + const lastBotMessageIndex = reversedHistory.findIndex(item => item.type !== 'user'); + const originalIndex = lastBotMessageIndex !== -1 ? prev.length - 1 - lastBotMessageIndex : -1; + + if (originalIndex !== -1) { + // Append error to the last relevant message + return prev.map((item, index) => { + if (index === originalIndex) { + let baseText = ''; + // Determine base text based on item type + if (item.type === 'gemini') baseText = item.text ?? ''; + else if (item.type === 'tool_group') baseText = `Tool execution (${item.tools.length} calls)`; + else if (item.type === 'error' || item.type === 'info') baseText = item.text ?? ''; + // Safely handle potential undefined text + + const updatedText = (baseText + (baseText && !baseText.endsWith('\n') ? '\n' : '') + errorText).trim(); + // Reuse existing ID, update type and text + return { ...item, type: errorType, text: updatedText }; + } + return item; + }); + } else { + // No previous message to append to, add a new error item + return [ + ...prev, + { id: getNextMessageId(), type: errorType, text: errorText } as HistoryItem + ]; + } + }); +}; \ No newline at end of file diff --git a/packages/cli/src/core/historyUpdater.ts b/packages/cli/src/core/historyUpdater.ts deleted file mode 100644 index 39eaca6a..00000000 --- a/packages/cli/src/core/historyUpdater.ts +++ /dev/null @@ -1,173 +0,0 @@ -import { Part } from "@google/genai"; -import { toolRegistry } from "../tools/tool-registry.js"; -import { HistoryItem, IndividualToolCallDisplay, ToolCallEvent, ToolCallStatus, ToolConfirmationOutcome, ToolEditConfirmationDetails, ToolExecuteConfirmationDetails } from "../ui/types.js"; -import { ToolResultDisplay } from "../tools/ToolResult.js"; - -/** - * Processes a tool call chunk and updates the history state accordingly. - * Manages adding new tool groups or updating existing ones. - * Resides here as its primary effect is updating history based on tool events. - */ -export const handleToolCallChunk = ( - chunk: ToolCallEvent, - setHistory: React.Dispatch>, - submitQuery: (query: Part) => Promise, - getNextMessageId: () => number, - currentToolGroupIdRef: React.MutableRefObject -): void => { - const toolDefinition = toolRegistry.getTool(chunk.name); - const description = toolDefinition?.getDescription - ? toolDefinition.getDescription(chunk.args) - : ''; - const toolDisplayName = toolDefinition?.displayName ?? chunk.name; - let confirmationDetails = chunk.confirmationDetails; - if (confirmationDetails) { - const originalConfirmationDetails = confirmationDetails; - const historyUpdatingConfirm = async (outcome: ToolConfirmationOutcome) => { - originalConfirmationDetails.onConfirm(outcome); - - if (outcome === ToolConfirmationOutcome.Cancel) { - let resultDisplay: ToolResultDisplay | undefined; - if ('fileDiff' in originalConfirmationDetails) { - resultDisplay = { fileDiff: (originalConfirmationDetails as ToolEditConfirmationDetails).fileDiff }; - } else { - resultDisplay = `~~${(originalConfirmationDetails as ToolExecuteConfirmationDetails).command}~~`; - } - handleToolCallChunk({ ...chunk, status: ToolCallStatus.Canceled, confirmationDetails: undefined, resultDisplay, }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef); - const functionResponse: Part = { - functionResponse: { - name: chunk.name, - response: { "error": "User rejected function call." }, - }, - } - await submitQuery(functionResponse); - } else { - const tool = toolRegistry.getTool(chunk.name) - if (!tool) { - throw new Error(`Tool "${chunk.name}" not found or is not registered.`); - } - - handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: "Executing...", confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef); - - const result = await tool.execute(chunk.args); - - handleToolCallChunk({ ...chunk, status: ToolCallStatus.Invoked, resultDisplay: result.returnDisplay, confirmationDetails: undefined }, setHistory, submitQuery, getNextMessageId, currentToolGroupIdRef); - - const functionResponse: Part = { - functionResponse: { - name: chunk.name, - id: chunk.callId, - response: { "output": result.llmContent }, - }, - } - - await submitQuery(functionResponse); - } - } - - confirmationDetails = { - ...originalConfirmationDetails, - onConfirm: historyUpdatingConfirm, - }; - } - const toolDetail: IndividualToolCallDisplay = { - callId: chunk.callId, - name: toolDisplayName, - description, - resultDisplay: chunk.resultDisplay, - status: chunk.status, - confirmationDetails: confirmationDetails, - }; - - const activeGroupId = currentToolGroupIdRef.current; - setHistory(prev => { - if (chunk.status === ToolCallStatus.Pending) { - if (activeGroupId === null) { - // Start a new tool group - const newGroupId = getNextMessageId(); - currentToolGroupIdRef.current = newGroupId; - return [ - ...prev, - { id: newGroupId, type: 'tool_group', tools: [toolDetail] } as HistoryItem - ]; - } - - // Add to existing tool group - return prev.map(item => - item.id === activeGroupId && item.type === 'tool_group' - ? item.tools.some(t => t.callId === toolDetail.callId) - ? item // Tool already listed as pending - : { ...item, tools: [...item.tools, toolDetail] } - : item - ); - } - - // Update the status of a pending tool within the active group - if (activeGroupId === null) { - // Log if an invoked tool arrives without an active group context - console.warn("Received invoked tool status without an active tool group ID:", chunk); - return prev; - } - - return prev.map(item => - item.id === activeGroupId && item.type === 'tool_group' - ? { - ...item, - tools: item.tools.map(t => - t.callId === toolDetail.callId - ? { ...t, ...toolDetail, status: chunk.status } // Update details & status - : t - ) - } - : item - ); - }); -}; - -/** - * Appends an error or informational message to the history, attempting to attach - * it to the last non-user message or creating a new entry. - */ -export const addErrorMessageToHistory = ( - error: any, - setHistory: React.Dispatch>, - getNextMessageId: () => number -): void => { - const isAbort = error.name === 'AbortError'; - const errorType = isAbort ? 'info' : 'error'; - const errorText = isAbort - ? '[Request cancelled by user]' - : `[Error: ${error.message || 'Unknown error'}]`; - - setHistory(prev => { - const reversedHistory = [...prev].reverse(); - // Find the last message that isn't from the user to append the error/info to - const lastBotMessageIndex = reversedHistory.findIndex(item => item.type !== 'user'); - const originalIndex = lastBotMessageIndex !== -1 ? prev.length - 1 - lastBotMessageIndex : -1; - - if (originalIndex !== -1) { - // Append error to the last relevant message - return prev.map((item, index) => { - if (index === originalIndex) { - let baseText = ''; - // Determine base text based on item type - if (item.type === 'gemini') baseText = item.text ?? ''; - else if (item.type === 'tool_group') baseText = `Tool execution (${item.tools.length} calls)`; - else if (item.type === 'error' || item.type === 'info') baseText = item.text ?? ''; - // Safely handle potential undefined text - - const updatedText = (baseText + (baseText && !baseText.endsWith('\n') ? '\n' : '') + errorText).trim(); - // Reuse existing ID, update type and text - return { ...item, type: errorType, text: updatedText }; - } - return item; - }); - } else { - // No previous message to append to, add a new error item - return [ - ...prev, - { id: getNextMessageId(), type: errorType, text: errorText } as HistoryItem - ]; - } - }); -}; \ No newline at end of file diff --git a/packages/cli/src/core/prompts.ts b/packages/cli/src/core/prompts.ts index 9e1f994f..92a6708d 100644 --- a/packages/cli/src/core/prompts.ts +++ b/packages/cli/src/core/prompts.ts @@ -1,6 +1,7 @@ import { ReadFileTool } from "../tools/read-file.tool.js"; import { TerminalTool } from "../tools/terminal.tool.js"; -import { MEMORY_FILE_NAME } from "./constants.js"; + +const MEMORY_FILE_NAME = 'GEMINI.md'; const contactEmail = 'ntaylormullen@google.com'; export const CoreSystemPrompt = ` -- cgit v1.2.3