diff --git a/src-tauri/src/bin/codex_monitor_daemon.rs b/src-tauri/src/bin/codex_monitor_daemon.rs index 3a2cd0ce3..a37376fe5 100644 --- a/src-tauri/src/bin/codex_monitor_daemon.rs +++ b/src-tauri/src/bin/codex_monitor_daemon.rs @@ -1317,6 +1317,25 @@ impl DaemonState { .await } + async fn predict_response( + &self, + workspace_id: String, + context: String, + model: Option, + ) -> Result { + codex_aux_core::predict_response_core( + &self.sessions, + &self.workspaces, + workspace_id, + &context, + model, + |workspace_id, thread_id| { + emit_background_thread_hide(&self.event_sink, workspace_id, thread_id); + }, + ) + .await + } + async fn local_usage_snapshot( &self, days: Option, diff --git a/src-tauri/src/bin/codex_monitor_daemon/rpc/codex.rs b/src-tauri/src/bin/codex_monitor_daemon/rpc/codex.rs index e55774371..de404488a 100644 --- a/src-tauri/src/bin/codex_monitor_daemon/rpc/codex.rs +++ b/src-tauri/src/bin/codex_monitor_daemon/rpc/codex.rs @@ -492,6 +492,23 @@ pub(super) async fn try_handle( .and_then(|value| serde_json::to_value(value).map_err(|err| err.to_string())), ) } + "predict_response" => { + let workspace_id = match parse_string(params, "workspaceId") { + Ok(value) => value, + Err(err) => return Some(Err(err)), + }; + let context = match parse_string(params, "context") { + Ok(value) => value, + Err(err) => return Some(Err(err)), + }; + let model = parse_optional_string(params, "model"); + Some( + state + .predict_response(workspace_id, context, model) + .await + .map(Value::String), + ) + } _ => None, } } diff --git a/src-tauri/src/codex/mod.rs b/src-tauri/src/codex/mod.rs index 8bbb7757c..2919d607a 100644 --- a/src-tauri/src/codex/mod.rs +++ b/src-tauri/src/codex/mod.rs @@ -992,3 +992,51 @@ pub(crate) async fn generate_agent_description( ) .await } + +#[tauri::command] +pub(crate) async fn predict_response( + workspace_id: String, + context: String, + model: Option, + state: State<'_, AppState>, + app: AppHandle, +) -> Result { + if remote_backend::is_remote_mode(&*state).await { + let mut payload = json!({ "workspaceId": workspace_id, "context": context }); + if let Some(ref model_id) = model { + payload.as_object_mut().unwrap().insert("model".to_string(), json!(model_id)); + } + let value = remote_backend::call_remote( + &*state, + app, + "predict_response", + payload, + ) + .await?; + return serde_json::from_value(value).map_err(|err| err.to_string()); + } + + crate::shared::codex_aux_core::predict_response_core( + &state.sessions, + &state.workspaces, + workspace_id, + &context, + model, + |workspace_id, thread_id| { + let _ = app.emit( + "app-server-event", + AppServerEvent { + workspace_id: workspace_id.to_string(), + message: json!({ + "method": "codex/backgroundThread", + "params": { + "threadId": thread_id, + "action": "hide" + } + }), + }, + ); + }, + ) + .await +} diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index c57bcd9a6..4e4f2e118 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -199,6 +199,7 @@ pub fn run() { codex::generate_commit_message, codex::generate_run_metadata, codex::generate_agent_description, + codex::predict_response, codex::resume_thread, codex::thread_live_subscribe, codex::thread_live_unsubscribe, diff --git a/src-tauri/src/shared/codex_aux_core.rs b/src-tauri/src/shared/codex_aux_core.rs index 12f985ade..539cd9e1c 100644 --- a/src-tauri/src/shared/codex_aux_core.rs +++ b/src-tauri/src/shared/codex_aux_core.rs @@ -398,6 +398,7 @@ pub(crate) async fn run_background_prompt_core( workspace_id: String, prompt: String, model: Option<&str>, + max_tokens: Option, on_hide_thread: F, timeout_error: &str, turn_error_fallback: &str, @@ -472,6 +473,9 @@ where if let Some(model_id) = model { turn_params["model"] = json!(model_id); } + if let Some(max) = max_tokens { + turn_params["maxTokens"] = json!(max); + } let turn_result = session .send_request_for_workspace(&workspace_id, "turn/start", turn_params) .await; @@ -580,6 +584,7 @@ where workspace_id, prompt, model, + None, on_hide_thread, "Timeout waiting for commit message generation", "Unknown error during commit message generation", @@ -609,6 +614,7 @@ where workspace_id, metadata_prompt, None, + None, on_hide_thread, "Timeout waiting for metadata generation", "Unknown error during metadata generation", @@ -618,6 +624,51 @@ where parse_run_metadata_value(&response) } +pub(crate) fn build_predict_response_prompt(context: &str) -> String { + format!( + "Predict the user's next message in a coding conversation. \ +Only suggest something when the assistant is clearly asking a question, \ +offering options, requesting confirmation, or there is an obvious natural follow-up. \ +If the assistant just delivered a result (code, explanation, completed task) with no \ +question or choice, output NONE.\n\ +When you do predict, be extremely concise — a few words or a short phrase. \ +Examples: \"yes\", \"do it\", \"use async instead\", \"add tests for that\". \ +Output ONLY the predicted text or NONE, nothing else.\n\n\ +Context:\n{context}" + ) +} + +pub(crate) async fn predict_response_core( + sessions: &Mutex>>, + workspaces: &Mutex>, + workspace_id: String, + context: &str, + model: Option, + on_hide_thread: F, +) -> Result +where + F: Fn(&str, &str), +{ + let cleaned_context = context.trim(); + if cleaned_context.is_empty() { + return Err("Context is required.".to_string()); + } + + let prompt = build_predict_response_prompt(cleaned_context); + run_background_prompt_core( + sessions, + workspaces, + workspace_id, + prompt, + model.as_deref(), + Some(30), + on_hide_thread, + "Timeout waiting for response prediction", + "Unknown error during response prediction", + ) + .await +} + pub(crate) async fn generate_agent_description_core( sessions: &Mutex>>, workspaces: &Mutex>, @@ -640,6 +691,7 @@ where workspace_id, prompt, None, + None, on_hide_thread, "Timeout waiting for agent configuration generation", "Unknown error during agent configuration generation", @@ -652,8 +704,8 @@ where #[cfg(test)] mod tests { use super::{ - build_commit_message_prompt_for_diff, parse_agent_description_value, - parse_run_metadata_value, + build_commit_message_prompt_for_diff, build_predict_response_prompt, + parse_agent_description_value, parse_run_metadata_value, }; #[test] @@ -723,4 +775,17 @@ mod tests { ); assert_eq!(parsed.developer_instructions, ""); } + + #[test] + fn build_predict_response_prompt_includes_context() { + let prompt = build_predict_response_prompt("User: hello\n\nAssistant: hi there"); + assert!(prompt.contains("User: hello")); + assert!(prompt.contains("Assistant: hi there")); + } + + #[test] + fn build_predict_response_prompt_instructs_none_for_no_followup() { + let prompt = build_predict_response_prompt("some context"); + assert!(prompt.contains("NONE")); + } } diff --git a/src-tauri/src/types.rs b/src-tauri/src/types.rs index cbb8afbaa..f65abfcfb 100644 --- a/src-tauri/src/types.rs +++ b/src-tauri/src/types.rs @@ -578,6 +578,11 @@ pub(crate) struct AppSettings { rename = "experimentalAppsEnabled" )] pub(crate) experimental_apps_enabled: bool, + #[serde( + default = "default_prompt_suggestions_enabled", + rename = "promptSuggestionsEnabled" + )] + pub(crate) prompt_suggestions_enabled: bool, #[serde(default = "default_personality", rename = "personality")] pub(crate) personality: String, #[serde(default = "default_dictation_enabled", rename = "dictationEnabled")] @@ -931,6 +936,10 @@ fn default_experimental_apps_enabled() -> bool { false } +fn default_prompt_suggestions_enabled() -> bool { + false +} + fn default_personality() -> String { "friendly".to_string() } @@ -1165,6 +1174,7 @@ impl Default for AppSettings { default_pause_queued_messages_when_response_required(), unified_exec_enabled: true, experimental_apps_enabled: false, + prompt_suggestions_enabled: false, personality: default_personality(), dictation_enabled: false, dictation_model_id: default_dictation_model_id(), @@ -1327,6 +1337,7 @@ mod tests { assert!(settings.pause_queued_messages_when_response_required); assert!(settings.unified_exec_enabled); assert!(!settings.experimental_apps_enabled); + assert!(!settings.prompt_suggestions_enabled); assert_eq!(settings.personality, "friendly"); assert!(!settings.dictation_enabled); assert_eq!(settings.dictation_model_id, "base"); diff --git a/src/App.tsx b/src/App.tsx index 750410cd8..bdf0893dd 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -88,6 +88,7 @@ import { useComposerMenuActions } from "@/features/composer/hooks/useComposerMen import { useComposerEditorState } from "@/features/composer/hooks/useComposerEditorState"; import { useComposerController } from "@app/hooks/useComposerController"; import { useComposerInsert } from "@app/hooks/useComposerInsert"; +import { usePredictedResponse } from "@/features/composer/hooks/usePredictedResponse"; import { useRenameThreadPrompt } from "@threads/hooks/useRenameThreadPrompt"; import { useWorktreePrompt } from "@/features/workspaces/hooks/useWorktreePrompt"; import { useClonePrompt } from "@/features/workspaces/hooks/useClonePrompt"; @@ -1514,6 +1515,16 @@ function MainApp() { onDraftChange: showWorkspaceHome ? setWorkspacePrompt : handleDraftChange, textareaRef: showWorkspaceHome ? workspaceHomeTextareaRef : composerInputRef, }); + const { ghostText, acceptPrediction } = usePredictedResponse({ + workspaceId: activeWorkspaceId, + threadId: activeThreadId, + composerText: activeDraft, + disabled: isReviewing || !appSettings.promptSuggestionsEnabled, + isProcessing, + items: activeItems, + models, + }); + const RECENT_THREAD_LIMIT = 8; const { recentThreadInstances, recentThreadsUpdatedAt } = useMemo(() => { if (!activeWorkspaceId) { @@ -2444,6 +2455,8 @@ function MainApp() { dictationHint, onDismissDictationHint: clearDictationHint, composerContextActions, + ghostText, + onAcceptGhostText: acceptPrediction, composerSendLabel, showComposer, plan: activePlan, diff --git a/src/features/composer/components/Composer.tsx b/src/features/composer/components/Composer.tsx index 85c483868..400ed356c 100644 --- a/src/features/composer/components/Composer.tsx +++ b/src/features/composer/components/Composer.tsx @@ -139,6 +139,8 @@ type ComposerProps = { onReviewPromptUpdateCustomInstructions?: (value: string) => void; onReviewPromptConfirmCustom?: () => Promise; onFileAutocompleteActiveChange?: (active: boolean) => void; + ghostText?: string | null; + onAcceptGhostText?: () => string | null; contextActions?: { id: string; label: string; @@ -241,6 +243,8 @@ export const Composer = memo(function Composer({ onReviewPromptUpdateCustomInstructions, onReviewPromptConfirmCustom, onFileAutocompleteActiveChange, + ghostText = null, + onAcceptGhostText, contextActions = [], }: ComposerProps) { const [text, setText] = useState(draftText); @@ -684,6 +688,7 @@ export const Composer = memo(function Composer({ ) : null} { + const ta = textareaRef.current; + if (ta) { + ta.selectionStart = accepted.length; + ta.selectionEnd = accepted.length; + ta.focus(); + } + }); + return; + } + } if (reviewPromptOpen && onReviewPromptKeyDown) { const handled = onReviewPromptKeyDown(event); if (handled) { diff --git a/src/features/composer/components/ComposerInput.tsx b/src/features/composer/components/ComposerInput.tsx index 6b9f43212..944c2fe40 100644 --- a/src/features/composer/components/ComposerInput.tsx +++ b/src/features/composer/components/ComposerInput.tsx @@ -35,6 +35,7 @@ import { getFileTypeIconUrl } from "../../../utils/fileTypeIcons"; type ComposerInputProps = { text: string; + ghostText?: string | null; disabled: boolean; sendLabel: string; canStop: boolean; @@ -137,6 +138,7 @@ const fileTitle = (path: string) => { export function ComposerInput({ text, + ghostText = null, disabled, sendLabel, canStop, @@ -494,24 +496,41 @@ export function ComposerInput({ )} -