diff --git a/src-tauri/src/commands/codex_cli.rs b/src-tauri/src/commands/codex_cli.rs new file mode 100644 index 00000000..d01dc48b --- /dev/null +++ b/src-tauri/src/commands/codex_cli.rs @@ -0,0 +1,234 @@ +//! Codex CLI subprocess transport. +//! +//! This mirrors the Claude Code CLI transport, but treats `codex` as a +//! local completion engine via `codex exec --json`. The webview can only +//! spawn this fixed command; it cannot execute arbitrary shell commands. + +use std::collections::HashMap; +use std::path::PathBuf; +use std::process::Stdio; +use std::sync::Arc; +use std::time::Duration; + +use serde::Serialize; +use tauri::{AppHandle, Emitter, State}; +use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader}; +use tokio::process::{Child, Command}; +use tokio::sync::Mutex; + +#[derive(Default)] +pub struct CodexCliState { + children: Arc>>, +} + +#[derive(Serialize)] +pub struct DetectResult { + installed: bool, + version: Option, + path: Option, + error: Option, +} + +fn find_codex_command() -> Result { + #[cfg(windows)] + { + if let Ok(path) = which::which("codex.cmd") { + return Ok(path); + } + if let Ok(path) = which::which("codex.exe") { + return Ok(path); + } + } + + which::which("codex").map_err(|_| "`codex` not found on PATH".to_string()) +} + +#[tauri::command] +pub async fn codex_cli_detect() -> Result { + let path = match find_codex_command() { + Ok(p) => p, + Err(error) => { + return Ok(DetectResult { + installed: false, + version: None, + path: None, + error: Some(error), + }); + } + }; + + let path_str = path.to_string_lossy().to_string(); + let output = tokio::time::timeout( + Duration::from_secs(3), + Command::new(&path).arg("--version").output(), + ) + .await; + + match output { + Ok(Ok(out)) if out.status.success() => { + let stdout = String::from_utf8_lossy(&out.stdout).trim().to_string(); + Ok(DetectResult { + installed: true, + version: Some(stdout), + path: Some(path_str), + error: None, + }) + } + Ok(Ok(out)) => { + let stderr = String::from_utf8_lossy(&out.stderr).trim().to_string(); + Ok(DetectResult { + installed: false, + version: None, + path: Some(path_str), + error: Some(if stderr.is_empty() { + format!("`codex --version` exited with {}", out.status) + } else { + stderr + }), + }) + } + Ok(Err(e)) => Ok(DetectResult { + installed: false, + version: None, + path: Some(path_str), + error: Some(format!("Failed to spawn `codex`: {e}")), + }), + Err(_) => Ok(DetectResult { + installed: false, + version: None, + path: Some(path_str), + error: Some("`codex --version` timed out after 3s".to_string()), + }), + } +} + +#[tauri::command] +pub async fn codex_cli_spawn( + app: AppHandle, + state: State<'_, CodexCliState>, + stream_id: String, + model: String, + prompt: String, +) -> Result<(), String> { + if prompt.trim().is_empty() { + return Err("No prompt to send to codex CLI".to_string()); + } + + let codex = find_codex_command()?; + let mut cmd = Command::new(&codex); + cmd.arg("-a") + .arg("never") + .arg("exec") + .arg("--json") + .arg("--skip-git-repo-check") + .arg("--sandbox") + .arg("read-only") + .arg("--ephemeral") + .arg("--model") + .arg(&model) + .arg("-"); + + cmd.stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true); + + let mut child = cmd + .spawn() + .map_err(|e| format!("Failed to spawn codex: {e}"))?; + + let mut stdin = child + .stdin + .take() + .ok_or_else(|| "Missing stdin handle".to_string())?; + let stdout = child + .stdout + .take() + .ok_or_else(|| "Missing stdout handle".to_string())?; + let stderr = child + .stderr + .take() + .ok_or_else(|| "Missing stderr handle".to_string())?; + + stdin + .write_all(prompt.as_bytes()) + .await + .map_err(|e| format!("Failed to write to codex stdin: {e}"))?; + stdin + .flush() + .await + .map_err(|e| format!("Failed to flush codex stdin: {e}"))?; + drop(stdin); + + state.children.lock().await.insert(stream_id.clone(), child); + + let children = Arc::clone(&state.children); + let app_for_task = app.clone(); + let stream_id_task = stream_id.clone(); + let topic = format!("codex-cli:{stream_id}"); + let done_topic = format!("codex-cli:{stream_id}:done"); + + tokio::spawn(async move { + let mut reader = BufReader::new(stdout).lines(); + let mut stderr_reader = BufReader::new(stderr).lines(); + let app = app_for_task; + + let stderr_task = tokio::spawn(async move { + let mut collected = String::new(); + while let Ok(Some(line)) = stderr_reader.next_line().await { + eprintln!("[codex-cli stderr] {line}"); + collected.push_str(&line); + collected.push('\n'); + } + collected + }); + + loop { + match reader.next_line().await { + Ok(Some(line)) => { + if app.emit(&topic, line).is_err() { + break; + } + } + Ok(None) => break, + Err(e) => { + eprintln!("[codex-cli stdout] read error: {e}"); + break; + } + } + } + + let child_opt = children.lock().await.remove(&stream_id_task); + let exit_code = if let Some(mut child) = child_opt { + match child.wait().await { + Ok(status) => status.code(), + Err(_) => None, + } + } else { + None + }; + + let stderr_text = stderr_task.await.unwrap_or_default(); + + let _ = app.emit( + &done_topic, + serde_json::json!({ + "code": exit_code, + "stderr": stderr_text, + }), + ); + }); + + Ok(()) +} + +#[tauri::command] +pub async fn codex_cli_kill( + state: State<'_, CodexCliState>, + stream_id: String, +) -> Result<(), String> { + if let Some(mut child) = state.children.lock().await.remove(&stream_id) { + let _ = child.start_kill(); + } + Ok(()) +} diff --git a/src-tauri/src/commands/mod.rs b/src-tauri/src/commands/mod.rs index 4b1004ed..d0a5362b 100644 --- a/src-tauri/src/commands/mod.rs +++ b/src-tauri/src/commands/mod.rs @@ -1,4 +1,5 @@ pub mod claude_cli; +pub mod codex_cli; pub mod extract_images; pub mod file_sync; pub mod fs; diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 7d849c8b..cbfc3f7f 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -73,6 +73,7 @@ pub fn run() { // frontend-generated stream id. Populated by claude_cli_spawn, // drained on process exit or by claude_cli_kill. app.manage(commands::claude_cli::ClaudeCliState::default()); + app.manage(commands::codex_cli::CodexCliState::default()); app.manage(commands::file_sync::FileSyncState::default()); Ok(()) }) @@ -105,6 +106,9 @@ pub fn run() { commands::claude_cli::claude_cli_detect, commands::claude_cli::claude_cli_spawn, commands::claude_cli::claude_cli_kill, + commands::codex_cli::codex_cli_detect, + commands::codex_cli::codex_cli_spawn, + commands::codex_cli::codex_cli_kill, commands::extract_images::extract_pdf_images_cmd, commands::extract_images::extract_office_images_cmd, commands::extract_images::extract_and_save_pdf_images_cmd, diff --git a/src/components/settings/llm-presets.ts b/src/components/settings/llm-presets.ts index 6a441317..7e363684 100644 --- a/src/components/settings/llm-presets.ts +++ b/src/components/settings/llm-presets.ts @@ -16,6 +16,7 @@ export type Provider = | "custom" | "minimax" | "claude-code" + | "codex-cli" export interface LlmPreset { /** Stable id used as the dropdown value. */ @@ -91,6 +92,21 @@ export const LLM_PRESETS: LlmPreset[] = [ ], suggestedContextSize: 200000, }, + { + id: "codex-cli", + label: "Codex CLI (local)", + hint: "Uses the local `codex` binary — no API key needed", + provider: "codex-cli", + defaultModel: "gpt-5.4-mini", + suggestedModels: [ + "gpt-5.4-mini", + "gpt-5.4", + "gpt-5.3-codex", + "gpt-5.3-codex-spark", + "gpt-5.2", + ], + suggestedContextSize: 200000, + }, { id: "openai", label: "OpenAI (GPT)", diff --git a/src/components/settings/preset-resolver.ts b/src/components/settings/preset-resolver.ts index 77188535..ae6b81f2 100644 --- a/src/components/settings/preset-resolver.ts +++ b/src/components/settings/preset-resolver.ts @@ -44,11 +44,11 @@ export function resolveConfig( } } - if (preset.provider === "claude-code") { + if (preset.provider === "claude-code" || preset.provider === "codex-cli") { // Subprocess transport — no apiKey, no endpoint URL. Model id is - // passed straight to `claude --model`. + // passed straight to the local CLI's model flag. return { - provider: "claude-code", + provider: preset.provider, apiKey: "", model, ollamaUrl: fallback.ollamaUrl, diff --git a/src/components/settings/sections/llm-provider-section.tsx b/src/components/settings/sections/llm-provider-section.tsx index d3903870..005a163d 100644 --- a/src/components/settings/sections/llm-provider-section.tsx +++ b/src/components/settings/sections/llm-provider-section.tsx @@ -120,10 +120,13 @@ function PresetRow({ const context = ov.maxContextSize ?? preset.suggestedContextSize ?? 131072 const reasoning = ov.reasoning ?? { mode: "auto" as const } const hasConfig = !!apiKey || !!ov.baseUrl || !!ov.model - // Claude Code CLI authenticates via the user's existing ~/.claude OAuth - // (inherited from the spawned subprocess), so no API key field is - // shown. Ollama ditto for its local-only model. - const needsApiKey = preset.provider !== "ollama" && preset.provider !== "claude-code" + // Local CLI providers authenticate via their own existing login state + // (inherited by the spawned subprocess), so no API key field is shown. + // Ollama ditto for its local-only model. + const needsApiKey = + preset.provider !== "ollama" && + preset.provider !== "claude-code" && + preset.provider !== "codex-cli" return (
} + {preset.provider === "codex-cli" && } {needsApiKey && (
@@ -599,3 +603,93 @@ function ClaudeCliStatusPill() {
) } + +function CodexCliStatusPill() { + const [state, setState] = useState<"loading" | "ok" | "err">("loading") + const [result, setResult] = useState(null) + + async function detect() { + setState("loading") + try { + const r = await invoke("codex_cli_detect") + setResult(r) + setState(r.installed ? "ok" : "err") + } catch (e) { + setResult({ + installed: false, + version: null, + path: null, + error: e instanceof Error ? e.message : String(e), + }) + setState("err") + } + } + + useEffect(() => { + void detect() + }, []) + + return ( +
+
+ + +
+
+ {state === "loading" && } + {state === "ok" && } + {state === "err" && } +
+ {state === "loading" &&
Detecting local codex binary…
} + {state === "ok" && ( + <> +
+ Detected{result?.version ? ` ${result.version}` : ""}. Ready to use your local + Codex login — no API key needed. +
+ {result?.path && ( +
+ {result.path} +
+ )} +
+ If chat fails with an authentication error, run{" "} + + codex + {" "} + in a terminal to refresh the login. +
+ + )} + {state === "err" && ( + <> +
{result?.error ?? "codex CLI not available."}
+
+ Install from{" "} + + npm install -g @openai/codex + {" "} + then re-check. +
+ + )} +
+
+
+ ) +} diff --git a/src/components/settings/settings-types.ts b/src/components/settings/settings-types.ts index 30030386..1f2fb4cd 100644 --- a/src/components/settings/settings-types.ts +++ b/src/components/settings/settings-types.ts @@ -9,7 +9,7 @@ import type { ReasoningConfig } from "@/stores/wiki-store" */ export interface SettingsDraft { // LLM provider - provider: "openai" | "anthropic" | "google" | "ollama" | "custom" | "minimax" | "claude-code" + provider: "openai" | "anthropic" | "google" | "ollama" | "custom" | "minimax" | "claude-code" | "codex-cli" apiKey: string model: string ollamaUrl: string @@ -31,7 +31,7 @@ export interface SettingsDraft { // Multimodal (image captioning at ingest time) multimodalEnabled: boolean multimodalUseMainLlm: boolean - multimodalProvider: "openai" | "anthropic" | "google" | "ollama" | "custom" | "minimax" | "claude-code" + multimodalProvider: "openai" | "anthropic" | "google" | "ollama" | "custom" | "minimax" | "claude-code" | "codex-cli" multimodalApiKey: string multimodalModel: string multimodalOllamaUrl: string diff --git a/src/lib/__tests__/llm-providers.test.ts b/src/lib/__tests__/llm-providers.test.ts index f8c28bc1..76c13161 100644 --- a/src/lib/__tests__/llm-providers.test.ts +++ b/src/lib/__tests__/llm-providers.test.ts @@ -212,6 +212,21 @@ describe("Claude Code CLI provider — not reachable via getProviderConfig", () }) }) +describe("Codex CLI provider — not reachable via getProviderConfig", () => { + it("throws, because the subprocess transport dispatches one layer up in streamChat", () => { + expect(() => + getProviderConfig({ + provider: "codex-cli", + apiKey: "", + model: "gpt-5.4-mini", + ollamaUrl: "", + customEndpoint: "", + maxContextSize: 200000, + } as RealLlmConfig), + ).toThrow(/subprocess transport/) + }) +}) + describe("Google provider URL — model path encoding", () => { const makeGoogleConfig = (model: string): RealLlmConfig => ({ provider: "google", diff --git a/src/lib/codex-cli-transport.test.ts b/src/lib/codex-cli-transport.test.ts new file mode 100644 index 00000000..902de68e --- /dev/null +++ b/src/lib/codex-cli-transport.test.ts @@ -0,0 +1,20 @@ +import { describe, expect, it } from "vitest" +import { parseCodexCliLine } from "./codex-cli-transport" + +describe("parseCodexCliLine", () => { + it("extracts completed agent messages from Codex JSONL", () => { + expect( + parseCodexCliLine( + JSON.stringify({ + type: "item.completed", + item: { type: "agent_message", text: "pong" }, + }), + ), + ).toBe("pong") + }) + + it("ignores lifecycle events and malformed lines", () => { + expect(parseCodexCliLine('{"type":"turn.started"}')).toBeNull() + expect(parseCodexCliLine("not json")).toBeNull() + }) +}) diff --git a/src/lib/codex-cli-transport.ts b/src/lib/codex-cli-transport.ts new file mode 100644 index 00000000..e9add47e --- /dev/null +++ b/src/lib/codex-cli-transport.ts @@ -0,0 +1,161 @@ +/** + * Codex CLI subprocess transport. + * + * Rust-side counterpart: src-tauri/src/commands/codex_cli.rs. The Rust + * command spawns `codex exec --json`, sends a single reconstructed prompt + * over stdin, and emits each JSONL stdout line back as `codex-cli:{streamId}`. + */ + +import { invoke } from "@tauri-apps/api/core" +import { listen, type UnlistenFn } from "@tauri-apps/api/event" +import type { LlmConfig } from "@/stores/wiki-store" +import type { ChatMessage, ContentBlock, RequestOverrides } from "./llm-providers" +import type { StreamCallbacks } from "./llm-client" + +export function parseCodexCliLine(rawLine: string): string | null { + const line = rawLine.trim() + if (!line) return null + + let evt: unknown + try { + evt = JSON.parse(line) + } catch { + return null + } + + if (!evt || typeof evt !== "object") return null + const obj = evt as Record + if (obj.type !== "item.completed") return null + + const item = obj.item as Record | undefined + if (item?.type !== "agent_message") return null + return typeof item.text === "string" && item.text.length > 0 ? item.text : null +} + +function contentToText(content: string | ContentBlock[]): string { + if (typeof content === "string") return content + return content + .map((block) => { + if (block.type === "text") return block.text + return `[Image omitted: ${block.mediaType}]` + }) + .join("\n") +} + +function buildPrompt(messages: ChatMessage[]): string { + return messages + .map((message) => { + const role = message.role.toUpperCase() + return `<${role}>\n${contentToText(message.content)}\n` + }) + .join("\n\n") +} + +type SpawnPayload = Record & { + streamId: string + model: string + prompt: string +} + +export async function streamCodexCli( + config: LlmConfig, + messages: ChatMessage[], + callbacks: StreamCallbacks, + signal?: AbortSignal, + overrides?: RequestOverrides, +): Promise { + const { onToken, onDone, onError } = callbacks + + if (import.meta.env?.DEV && overrides) { + for (const key of ["temperature", "top_p", "top_k", "max_tokens", "stop"] as const) { + if (overrides[key] !== undefined) { + // eslint-disable-next-line no-console + console.warn(`[codex-cli] ignoring unsupported override "${key}": CLI has no equivalent flag`) + } + } + } + + const streamId = crypto.randomUUID() + let unlistenData: UnlistenFn | undefined + let unlistenDone: UnlistenFn | undefined + let finished = false + + const unparsedLines: string[] = [] + let unparsedSize = 0 + function captureUnparsed(line: string) { + if (unparsedSize >= 4096) return + const trimmed = line.trim() + if (!trimmed) return + unparsedLines.push(line) + unparsedSize += line.length + 1 + } + + const cleanup = () => { + unlistenData?.() + unlistenDone?.() + } + + const finishWith = (cb: () => void) => { + if (finished) return + finished = true + cleanup() + cb() + } + + const abortListener = () => { + void invoke("codex_cli_kill", { streamId }).catch(() => {}) + finishWith(onDone) + } + signal?.addEventListener("abort", abortListener) + + try { + unlistenData = await listen(`codex-cli:${streamId}`, (event) => { + const token = parseCodexCliLine(event.payload) + if (token !== null) { + onToken(token) + } else { + captureUnparsed(event.payload) + } + }) + + unlistenDone = await listen<{ code: number | null; stderr: string }>( + `codex-cli:${streamId}:done`, + (event) => { + const code = event.payload?.code + const stderr = event.payload?.stderr?.trim() ?? "" + if (code !== null && code !== undefined && code !== 0) { + const details = stderr || unparsedLines.join("\n") + finishWith(() => + onError(new Error( + details + ? `Codex CLI exited with code ${code}:\n${details}` + : `Codex CLI exited with code ${code}. Run \`codex\` in a terminal to inspect the problem.`, + )), + ) + } else { + finishWith(onDone) + } + }, + ) + + const payload: SpawnPayload = { + streamId, + model: config.model, + prompt: buildPrompt(messages), + } + await invoke("codex_cli_spawn", payload) + } catch (err) { + finishWith(() => { + const message = err instanceof Error ? err.message : String(err) + if (/not found|No such file|executable file not found/i.test(message)) { + onError(new Error( + "Codex CLI not found. Install `codex` with `npm install -g @openai/codex` or pick a different provider.", + )) + } else { + onError(err instanceof Error ? err : new Error(message)) + } + }) + } finally { + signal?.removeEventListener("abort", abortListener) + } +} diff --git a/src/lib/has-usable-llm.test.ts b/src/lib/has-usable-llm.test.ts index 35ce7534..f7499e49 100644 --- a/src/lib/has-usable-llm.test.ts +++ b/src/lib/has-usable-llm.test.ts @@ -37,6 +37,12 @@ describe("hasUsableLlm", () => { ).toBe(true) }) + it("returns true for codex-cli with no API key", () => { + expect( + hasUsableLlm({ provider: "codex-cli", apiKey: "" }), + ).toBe(true) + }) + it("returns false for openai with no API key", () => { expect( hasUsableLlm({ provider: "openai", apiKey: "" }), @@ -74,6 +80,7 @@ describe("hasUsableLlm", () => { expect(PROVIDERS_WITHOUT_KEY.has("ollama")).toBe(true) expect(PROVIDERS_WITHOUT_KEY.has("custom")).toBe(true) expect(PROVIDERS_WITHOUT_KEY.has("claude-code")).toBe(true) + expect(PROVIDERS_WITHOUT_KEY.has("codex-cli")).toBe(true) }) it("PROVIDERS_WITHOUT_KEY does not include hosted-API providers", () => { @@ -96,6 +103,7 @@ describe("hasUsableLlm", () => { "custom", "minimax", "claude-code", + "codex-cli", ] for (const p of allProviders) { const inNoKey = PROVIDERS_WITHOUT_KEY.has(p) diff --git a/src/lib/has-usable-llm.ts b/src/lib/has-usable-llm.ts index 36f69be7..b631b635 100644 --- a/src/lib/has-usable-llm.ts +++ b/src/lib/has-usable-llm.ts @@ -12,6 +12,8 @@ export type LlmProvider = LlmConfig["provider"] * - `claude-code` spawns the Claude Code CLI subprocess, which * authenticates via the user's existing ~/.claude OAuth — no * API key is needed (or accepted) at this layer. + * - `codex-cli` spawns the Codex CLI subprocess, which authenticates + * via the user's existing Codex/ChatGPT login. * * Hosted providers (openai, anthropic, google, minimax) require a * key from the user. @@ -20,6 +22,7 @@ export const PROVIDERS_WITHOUT_KEY: ReadonlySet = new Set