/ extensions / btw.ts
btw.ts
  1  /**
  2   * /btw command — run a subagent in the background while continuing to work.
  3   *
  4   * Usage:
  5   *   /btw check if there are any TODO comments in src/
  6   *   /btw --mode rush summarize the README
  7   *   /btw --model anthropic/claude-haiku-4-5 count lines of code
  8   *
  9   * Fires off an in-process subagent (same infra as the subagent tool) and
 10   * shows live progress in a widget above the editor. When finished, the
 11   * widget is replaced by a fully rendered custom message in the chat
 12   * (identical to the subagent tool's result rendering).
 13   */
 14  
 15  import * as fs from "node:fs";
 16  import * as os from "node:os";
 17  import * as path from "node:path";
 18  
 19  import type { AgentTool } from "@mariozechner/pi-agent-core";
 20  
 21  import type { ExtensionAPI, SessionEntry } from "@mariozechner/pi-coding-agent";
 22  import {
 23  	convertToLlm,
 24  	createBashTool,
 25  	createEditTool,
 26  	createReadTool,
 27  	createWriteTool,
 28  	getMarkdownTheme,
 29  	serializeConversation,
 30  } from "@mariozechner/pi-coding-agent";
 31  import { Box, Markdown, Spacer, Text } from "@mariozechner/pi-tui";
 32  
 33  import { resolveModelAndThinking } from "./lib/mode-utils.js";
 34  import {
 35  	type SingleResult,
 36  	formatToolCall,
 37  	formatUsage,
 38  	btwTaskPreview,
 39  	renderProgressPlainLines,
 40  	runSubagent,
 41  } from "./lib/subagent-core.js";
 42  
 43  // ---------------------------------------------------------------------------
 44  // Custom message type
 45  // ---------------------------------------------------------------------------
 46  
 47  const BTW_MESSAGE_TYPE = "btw-result";
 48  
 49  interface BtwMessageDetails {
 50  	task: string;
 51  	result: SingleResult;
 52  }
 53  
 54  type ScopedModelCandidate = {
 55  	model: any;
 56  	thinkingLevel?: string;
 57  };
 58  
 59  const THINKING_LEVELS = new Set(["off", "minimal", "low", "medium", "high", "xhigh"]);
 60  
 61  function parseBtwArgs(rawArgs: string): { modeOpt?: string; modelOpt?: string; task: string } {
 62  	let remaining = rawArgs.trim();
 63  	let modeOpt: string | undefined;
 64  	let modelOpt: string | undefined;
 65  
 66  	while (remaining.startsWith("--")) {
 67  		const modeMatch = remaining.match(/^--mode\s+(\S+)(?:\s+|$)/);
 68  		if (modeMatch) {
 69  			modeOpt = modeMatch[1];
 70  			remaining = remaining.slice(modeMatch[0].length).trimStart();
 71  			continue;
 72  		}
 73  
 74  		const modelMatch = remaining.match(/^--model\s+(\S+)(?:\s+|$)/);
 75  		if (modelMatch) {
 76  			modelOpt = modelMatch[1];
 77  			remaining = remaining.slice(modelMatch[0].length).trimStart();
 78  			continue;
 79  		}
 80  
 81  		break;
 82  	}
 83  
 84  	return { modeOpt, modelOpt, task: remaining.trim() };
 85  }
 86  
 87  function normalizeModelText(value: string): string {
 88  	return value.trim().toLowerCase();
 89  }
 90  
 91  function collapseModelText(value: string): string {
 92  	return normalizeModelText(value).replace(/[^a-z0-9]+/g, "");
 93  }
 94  
 95  function expandUserPath(p: string): string {
 96  	if (p === "~") return os.homedir();
 97  	if (p.startsWith("~/")) return path.join(os.homedir(), p.slice(2));
 98  	return p;
 99  }
100  
101  function loadEnabledModelPatterns(cwd: string): string[] | undefined {
102  	const agentDir = process.env.PI_CODING_AGENT_DIR
103  		? expandUserPath(process.env.PI_CODING_AGENT_DIR)
104  		: path.join(os.homedir(), ".pi", "agent");
105  	const globalSettingsPath = path.join(agentDir, "settings.json");
106  	const projectSettingsPath = path.join(cwd, ".pi", "settings.json");
107  
108  	const readEnabledModels = (settingsPath: string): string[] | undefined => {
109  		try {
110  			const raw = fs.readFileSync(settingsPath, "utf8");
111  			const parsed = JSON.parse(raw);
112  			return Array.isArray(parsed?.enabledModels)
113  				? parsed.enabledModels.filter((value: unknown): value is string => typeof value === "string")
114  				: undefined;
115  		} catch {
116  			return undefined;
117  		}
118  	};
119  
120  	const globalModels = readEnabledModels(globalSettingsPath);
121  	const projectModels = readEnabledModels(projectSettingsPath);
122  	return projectModels ?? globalModels;
123  }
124  
125  function splitPatternThinkingLevel(pattern: string): { pattern: string; thinkingLevel?: string } {
126  	const lastColonIndex = pattern.lastIndexOf(":");
127  	if (lastColonIndex === -1) {
128  		return { pattern };
129  	}
130  
131  	const suffix = pattern.slice(lastColonIndex + 1).trim().toLowerCase();
132  	if (!THINKING_LEVELS.has(suffix)) {
133  		return { pattern };
134  	}
135  
136  	return {
137  		pattern: pattern.slice(0, lastColonIndex),
138  		thinkingLevel: suffix,
139  	};
140  }
141  
142  function matchesGlob(pattern: string, value: string): boolean {
143  	const escaped = pattern
144  		.replace(/[.+^${}()|[\]\\]/g, "\\$&")
145  		.replace(/\*/g, ".*")
146  		.replace(/\?/g, ".");
147  	return new RegExp(`^${escaped}$`, "i").test(value);
148  }
149  
150  function scoreModelCandidate(
151  	query: string,
152  	candidate: ScopedModelCandidate,
153  	options?: { preferredProvider?: string },
154  ): number {
155  	const normalizedQuery = normalizeModelText(query);
156  	if (!normalizedQuery) return 0;
157  
158  	const fullId = normalizeModelText(`${candidate.model.provider}/${candidate.model.id}`);
159  	const id = normalizeModelText(candidate.model.id);
160  	const name = normalizeModelText(candidate.model.name ?? "");
161  	const providerBonus = options?.preferredProvider
162  		&& normalizeModelText(candidate.model.provider) === normalizeModelText(options.preferredProvider)
163  		? 750
164  		: 0;
165  
166  	if (fullId === normalizedQuery) return 12_000 + providerBonus;
167  	if (id === normalizedQuery) return 11_500 + providerBonus;
168  	if (fullId.endsWith(`/${normalizedQuery}`)) return 11_000 + providerBonus;
169  	if (id.startsWith(normalizedQuery)) return 10_000 - (id.length - normalizedQuery.length) + providerBonus;
170  	if (fullId.startsWith(normalizedQuery)) return 9_500 - (fullId.length - normalizedQuery.length) + providerBonus;
171  
172  	const idIndex = id.indexOf(normalizedQuery);
173  	if (idIndex !== -1) return 9_000 - idIndex * 10 - (id.length - normalizedQuery.length) + providerBonus;
174  
175  	const fullIdIndex = fullId.indexOf(normalizedQuery);
176  	if (fullIdIndex !== -1) {
177  		return 8_000 - fullIdIndex * 10 - (fullId.length - normalizedQuery.length) + providerBonus;
178  	}
179  
180  	const nameIndex = name.indexOf(normalizedQuery);
181  	if (nameIndex !== -1) return 7_000 - nameIndex * 10 - (name.length - normalizedQuery.length) + providerBonus;
182  
183  	const collapsedQuery = collapseModelText(query);
184  	if (!collapsedQuery) return 0;
185  
186  	const collapsedId = collapseModelText(candidate.model.id);
187  	const collapsedFullId = collapseModelText(`${candidate.model.provider}/${candidate.model.id}`);
188  	const collapsedName = collapseModelText(candidate.model.name ?? "");
189  
190  	if (collapsedId === collapsedQuery) return 6_500 + providerBonus;
191  	if (collapsedFullId === collapsedQuery) return 6_250 + providerBonus;
192  	if (collapsedId.startsWith(collapsedQuery)) return 6_000 - (collapsedId.length - collapsedQuery.length) + providerBonus;
193  	if (collapsedFullId.startsWith(collapsedQuery)) {
194  		return 5_500 - (collapsedFullId.length - collapsedQuery.length) + providerBonus;
195  	}
196  
197  	const collapsedIdIndex = collapsedId.indexOf(collapsedQuery);
198  	if (collapsedIdIndex !== -1) {
199  		return 5_000 - collapsedIdIndex * 10 - (collapsedId.length - collapsedQuery.length) + providerBonus;
200  	}
201  
202  	const collapsedFullIdIndex = collapsedFullId.indexOf(collapsedQuery);
203  	if (collapsedFullIdIndex !== -1) {
204  		return 4_500 - collapsedFullIdIndex * 10 - (collapsedFullId.length - collapsedQuery.length) + providerBonus;
205  	}
206  
207  	const collapsedNameIndex = collapsedName.indexOf(collapsedQuery);
208  	if (collapsedNameIndex !== -1) {
209  		return 4_000 - collapsedNameIndex * 10 - (collapsedName.length - collapsedQuery.length) + providerBonus;
210  	}
211  
212  	return 0;
213  }
214  
215  function resolveScopedCandidatesFromSettings(ctx: any): ScopedModelCandidate[] {
216  	const patterns = loadEnabledModelPatterns(ctx.cwd);
217  	if (!patterns || patterns.length === 0) {
218  		return [];
219  	}
220  
221  	const availableModels = ctx.modelRegistry.getAvailable();
222  	const preferredProvider = ctx.model?.provider;
223  	const resolved: ScopedModelCandidate[] = [];
224  	const seen = new Set<string>();
225  	const addCandidate = (candidate: ScopedModelCandidate) => {
226  		const key = `${candidate.model.provider}/${candidate.model.id}`;
227  		if (seen.has(key)) return;
228  		seen.add(key);
229  		resolved.push(candidate);
230  	};
231  
232  	for (const rawPattern of patterns) {
233  		const { pattern, thinkingLevel } = splitPatternThinkingLevel(rawPattern.trim());
234  		if (!pattern) continue;
235  
236  		if (pattern.includes("*") || pattern.includes("?")) {
237  			for (const model of availableModels) {
238  				const fullId = `${model.provider}/${model.id}`;
239  				if (matchesGlob(pattern, fullId) || matchesGlob(pattern, model.id)) {
240  					addCandidate({ model, thinkingLevel });
241  				}
242  			}
243  			continue;
244  		}
245  
246  		// Exact match: add every model whose id or provider/id matches the pattern.
247  		// Multiple providers may share the same model id; keep all of them in scope
248  		// so the --model resolver can pick the right one with provider bias.
249  		const normalizedPattern = normalizeModelText(pattern);
250  		let anyMatch = false;
251  		for (const model of availableModels) {
252  			const fullId = normalizeModelText(`${model.provider}/${model.id}`);
253  			const id = normalizeModelText(model.id);
254  			if (fullId === normalizedPattern || id === normalizedPattern) {
255  				addCandidate({ model, thinkingLevel });
256  				anyMatch = true;
257  			}
258  		}
259  		if (!anyMatch) {
260  			// Fall back to closest-match if there is no exact hit (handles minor
261  			// typos or id aliases in settings).
262  			const match = resolveClosestModelCandidate(
263  				pattern,
264  				availableModels.map((model: any) => ({ model })),
265  				{ preferredProvider },
266  			);
267  			if (match) addCandidate({ model: match.model, thinkingLevel });
268  		}
269  	}
270  
271  	return resolved;
272  }
273  
274  function getModelCandidates(ctx: any): ScopedModelCandidate[] {
275  	const scopedModels = ctx.scopedModels;
276  	if (Array.isArray(scopedModels) && scopedModels.length > 0) {
277  		return scopedModels as ScopedModelCandidate[];
278  	}
279  
280  	const settingsScopedModels = resolveScopedCandidatesFromSettings(ctx);
281  	if (settingsScopedModels.length > 0) {
282  		return settingsScopedModels;
283  	}
284  
285  	if (ctx.model) {
286  		return ctx.modelRegistry.getAvailable()
287  			.filter((model: any) => model.provider === ctx.model.provider)
288  			.map((model: any) => ({ model }));
289  	}
290  
291  	return [];
292  }
293  
294  function rankModelCandidates(
295  	modelQuery: string,
296  	candidates: ScopedModelCandidate[],
297  	options?: { preferredProvider?: string },
298  ): ScopedModelCandidate[] {
299  	const normalizedQuery = normalizeModelText(modelQuery);
300  	if (!normalizedQuery) return [];
301  
302  	const slashIndex = normalizedQuery.indexOf("/");
303  	const providerQuery = slashIndex > 0 ? normalizedQuery.slice(0, slashIndex) : undefined;
304  	const idQuery = slashIndex > 0 ? normalizedQuery.slice(slashIndex + 1) : normalizedQuery;
305  
306  	const searchSpace = providerQuery
307  		? candidates.filter((candidate) => normalizeModelText(candidate.model.provider) === providerQuery)
308  		: candidates;
309  	if (searchSpace.length === 0) return [];
310  
311  	return searchSpace
312  		.map((candidate) => ({
313  			candidate,
314  			score: Math.max(
315  				scoreModelCandidate(normalizedQuery, candidate, options),
316  				providerQuery ? scoreModelCandidate(idQuery, candidate, options) + 50 : 0,
317  			),
318  		}))
319  		.filter((entry) => entry.score > 0)
320  		.sort((a, b) => (
321  			b.score - a.score
322  			|| a.candidate.model.id.length - b.candidate.model.id.length
323  			|| `${a.candidate.model.provider}/${a.candidate.model.id}`.localeCompare(
324  				`${b.candidate.model.provider}/${b.candidate.model.id}`,
325  			)
326  		))
327  		.map((entry) => entry.candidate);
328  }
329  
330  function resolveClosestModelCandidate(
331  	modelQuery: string,
332  	candidates: ScopedModelCandidate[],
333  	options?: { preferredProvider?: string },
334  ): ScopedModelCandidate | undefined {
335  	return rankModelCandidates(modelQuery, candidates, options)[0];
336  }
337  
338  // ---------------------------------------------------------------------------
339  // Extension
340  // ---------------------------------------------------------------------------
341  
342  let btwCounter = 0;
343  
344  export default function (pi: ExtensionAPI) {
345  	// Track btw widgets waiting for turn_end to remove themselves
346  	const pendingWidgetRemovals = new Map<string, () => void>();
347  
348  	pi.on("turn_end", () => {
349  		// Resolve all pending widget removal promises — the steered custom
350  		// messages render at turn boundary, so widgets can now be removed.
351  		for (const [, resolve] of pendingWidgetRemovals) resolve();
352  		pendingWidgetRemovals.clear();
353  	});
354  
355  	// --- Filter btw messages out of LLM context (user-facing only) ---
356  	pi.on("context", (event) => {
357  		const filtered = event.messages.filter(
358  			(m: any) => !(m.role === "custom" && m.customType === BTW_MESSAGE_TYPE),
359  		);
360  		if (filtered.length !== event.messages.length) {
361  			return { messages: filtered };
362  		}
363  	});
364  
365  	// --- Shared rendering logic for btw results ---
366  	function renderBtwResult(r: SingleResult, theme: any): InstanceType<typeof Box> {
367  		const icon = r.exitCode === 0
368  			? theme.fg("success", "✓")
369  			: theme.fg("error", "✗");
370  
371  		const box = new Box(1, 1, (t: string) => theme.bg("customMessageBg", t));
372  
373  		// Single merged header: ✓ btw: <task>
374  		box.addChild(
375  			new Text(`${icon} ${theme.fg("toolTitle", theme.bold("btw: "))}${theme.fg("dim", r.task)}`, 0, 0),
376  		);
377  
378  		if (r.exitCode > 0 && r.errorMessage) {
379  			box.addChild(new Text(theme.fg("error", `Error: ${r.errorMessage}`), 0, 0));
380  		}
381  
382  		// Tool calls
383  		for (const item of r.displayItems) {
384  			if (item.type === "toolCall") {
385  				box.addChild(new Text(
386  					theme.fg("muted", "→ ") +
387  						formatToolCall(item.name, item.args, theme.fg.bind(theme)),
388  					0, 0,
389  				));
390  			}
391  		}
392  
393  		// Markdown output
394  		if (r.finalOutput) {
395  			const mdTheme = getMarkdownTheme();
396  			box.addChild(new Spacer(1));
397  			box.addChild(new Markdown(r.finalOutput.trim(), 0, 0, mdTheme));
398  		}
399  
400  		// Usage
401  		const usageStr = formatUsage(r.usage, r.model);
402  		if (usageStr) box.addChild(new Text(theme.fg("dim", usageStr), 0, 0));
403  
404  		return box;
405  	}
406  
407  	// --- Custom message renderer: always shows full markdown output ---
408  	pi.registerMessageRenderer<BtwMessageDetails>(BTW_MESSAGE_TYPE, (message, _opts, theme) => {
409  		const details = message.details;
410  		if (!details?.result) return undefined;
411  		return renderBtwResult(details.result, theme);
412  	});
413  
414  	// --- /btw command ---
415  	pi.registerCommand("btw", {
416  		description: "Run a single-shot subagent in the background (--mode <name>, --model <provider/id|partial>)",
417  		handler: async (args, ctx) => {
418  			const { modeOpt, modelOpt, task } = parseBtwArgs(args);
419  			if (!task) {
420  				ctx.ui.notify("Usage: /btw [--mode <name>] [--model <provider/id|partial>] <prompt>", "error");
421  				return;
422  			}
423  
424  			const resolved = await resolveModelAndThinking(
425  				ctx.cwd,
426  				ctx.modelRegistry,
427  				ctx.model,
428  				pi.getThinkingLevel(),
429  				{ mode: modeOpt },
430  			);
431  
432  			let targetModel = resolved.model;
433  			let thinkingLevel = resolved.thinkingLevel;
434  
435  			if (modelOpt) {
436  				const rankedModels = rankModelCandidates(
437  					modelOpt,
438  					getModelCandidates(ctx),
439  					{ preferredProvider: ctx.model?.provider },
440  				);
441  
442  				let matchedModel: ScopedModelCandidate | undefined;
443  				for (const candidate of rankedModels) {
444  					const auth = await ctx.modelRegistry.getApiKeyAndHeaders(candidate.model);
445  					if (auth.ok) {
446  						matchedModel = candidate;
447  						break;
448  					}
449  				}
450  
451  				if (!matchedModel) {
452  					ctx.ui.notify(`No matching model with request auth found for "${modelOpt}".`, "error");
453  					return;
454  				}
455  
456  				targetModel = matchedModel.model as any;
457  				if (!modeOpt && matchedModel.thinkingLevel) {
458  					thinkingLevel = matchedModel.thinkingLevel;
459  				}
460  			}
461  
462  			if (!targetModel) {
463  				ctx.ui.notify("No model available.", "error");
464  				return;
465  			}
466  
467  			// Build tools
468  			const tools: AgentTool<any>[] = [
469  				createReadTool(ctx.cwd),
470  				createBashTool(ctx.cwd),
471  				createEditTool(ctx.cwd),
472  				createWriteTool(ctx.cwd),
473  			];
474  
475  			const systemPrompt = ctx.getSystemPrompt();
476  			const authResolver = async (_provider: string) => {
477  				const auth = await ctx.modelRegistry.getApiKeyAndHeaders(targetModel!);
478  				if (!auth.ok) {
479  					throw new Error(auth.error);
480  				}
481  				return { apiKey: auth.apiKey, headers: auth.headers };
482  			};
483  
484  			// Serialize current conversation context for the subagent
485  			const branch = ctx.sessionManager.getBranch();
486  			const messages = branch
487  				.filter((entry): entry is SessionEntry & { type: "message" } => entry.type === "message")
488  				.map((entry) => entry.message);
489  			const conversationContext = messages.length > 0
490  				? serializeConversation(convertToLlm(messages))
491  				: "";
492  
493  			// Build enriched task with conversation context
494  			const taskWithContext = conversationContext
495  				? `## Conversation Context\n\n${conversationContext}\n\n## Task or question (FOCUS SOLELY ON THIS)\n\n${task}`
496  				: task;
497  
498  			// Unique widget key per invocation so multiple /btw's don't clobber each other
499  			const widgetKey = `btw-${++btwCounter}`;
500  
501  			// Show initial status widget
502  			const taskPreview = btwTaskPreview(task);
503  			ctx.ui.setWidget(widgetKey, [`⏳ btw: ${taskPreview}`], { placement: "aboveEditor" });
504  
505  			// Fire and forget — run in background, update widget on progress
506  			runSubagent(
507  				systemPrompt,
508  				taskWithContext,
509  				tools,
510  				targetModel,
511  				thinkingLevel,
512  				authResolver,
513  				undefined, // no abort signal — runs to completion
514  				(progressResult) => {
515  					// Update widget with live tool call feed
516  					ctx.ui.setWidget(widgetKey, renderProgressPlainLines(task, progressResult), { placement: "aboveEditor" });
517  				},
518  			).then(async (result) => {
519  				// Override result.task with the short user prompt (not the context-enriched one)
520  				result.task = task;
521  
522  				// Send fully rendered result as a custom message in the chat.
523  				// Filtered out of LLM context by the context event handler above.
524  				// triggerTurn: false is critical — without it, sendMessage mid-stream
525  				// tries to start a new turn which corrupts conversation state.
526  				const icon = result.exitCode === 0 ? "✓" : "✗";
527  				pi.sendMessage({
528  					customType: BTW_MESSAGE_TYPE,
529  					content: [{ type: "text", text: `[btw ${icon}] ${task}` }],
530  					display: true,
531  					details: { task, result } satisfies BtwMessageDetails,
532  				}, { triggerTurn: false });
533  
534  				// If the agent is busy (tool call running), the custom message won't
535  				// render in chat until the turn ends.  Show the full rendered result
536  				// as a component widget so it appears immediately; remove once idle.
537  				if (!ctx.isIdle()) {
538  					ctx.ui.setWidget(widgetKey, (_tui, theme) => renderBtwResult(result, theme), { placement: "aboveEditor" });
539  					// Wait for current turn to end — the steered custom message
540  					// renders at that point, so we can remove the widget.
541  					await new Promise<void>((resolve) => {
542  						pendingWidgetRemovals.set(widgetKey, resolve);
543  					});
544  				}
545  				ctx.ui.setWidget(widgetKey, undefined);
546  			}).catch((err) => {
547  				ctx.ui.setWidget(widgetKey, undefined);
548  				ctx.ui.notify(`btw failed: ${err instanceof Error ? err.message : String(err)}`, "error");
549  			});
550  
551  			// Command returns immediately — subagent runs in background
552  		},
553  	});
554  }