types.ts
1 /** 2 * Types for Codex CLI notify hook integration. 3 * 4 * Codex CLI fires a `notify` hook after each agent turn, passing a JSON 5 * argument with the turn data. This is configured in ~/.codex/config.toml: 6 * notify = ["node", "/path/to/stop.js"] 7 * 8 * The JSON is passed as the first command-line argument (argv[2]). 9 * 10 * Reference: https://developers.openai.com/codex/hooks 11 */ 12 13 /** 14 * Notify hook payload — passed as a CLI argument JSON string. 15 * Fired after each agent turn completes. 16 */ 17 export interface NotifyPayload { 18 type: 'agent-turn-complete'; 19 'thread-id': string; 20 'turn-id': string; 21 cwd: string; 22 client: string; 23 'input-messages': string[]; 24 'last-assistant-message': string; 25 } 26 27 /** 28 * Types below are for transcript parsing (rollout JSONL files). 29 * Defined in codex-rs/protocol/src/protocol.rs (tagged enum `RolloutItem`). 30 * Stored at ~/.codex/sessions/YYYY/MM/DD/rollout-<timestamp>-<session_id>.jsonl. 31 */ 32 33 /** 34 * A single line in the Codex rollout JSONL transcript. 35 */ 36 export interface RolloutLine { 37 timestamp: string; 38 type: 'session_meta' | 'response_item' | 'event_msg' | 'turn_context' | 'compacted'; 39 payload: SessionMetaPayload | ResponseItemPayload | EventMsgPayload | Record<string, unknown>; 40 } 41 42 export interface SessionMetaPayload { 43 id: string; 44 timestamp: string; 45 cwd: string; 46 originator: string; 47 cli_version: string; 48 source: string; 49 model_provider?: string; 50 } 51 52 export interface ResponseItemPayload { 53 type: 'message' | 'function_call' | 'function_call_output' | 'reasoning'; 54 role?: 'user' | 'assistant' | 'developer'; 55 content?: ContentBlock[]; 56 name?: string; 57 call_id?: string; 58 arguments?: string; 59 output?: string; 60 } 61 62 export interface ContentBlock { 63 type: 'input_text' | 'output_text'; 64 text: string; 65 } 66 67 export interface EventMsgPayload { 68 type: string; 69 info?: TokenCountInfo; 70 } 71 72 export interface TokenCountInfo { 73 last_token_usage?: TokenUsage; 74 total_token_usage?: TokenUsage; 75 } 76 77 export interface TokenUsage { 78 input_tokens: number; 79 output_tokens: number; 80 total_tokens: number; 81 cached_input_tokens?: number; 82 reasoning_output_tokens?: number; 83 } 84 85 /** 86 * OpenAI chat-format tool call, used on assistant messages. 87 */ 88 export interface ToolCall { 89 id: string; 90 type: 'function'; 91 function: { 92 name: string; 93 arguments: string; 94 }; 95 } 96 97 /** 98 * OpenAI chat-format message used in LLM span inputs. Matches the message 99 * structure the MLflow UI Chat view renders. 100 */ 101 export interface ChatMessage { 102 role: 'user' | 'assistant' | 'system' | 'tool'; 103 content: string | null; 104 tool_calls?: ToolCall[]; 105 tool_call_id?: string; 106 }