When Subplanning Triggers
Complex tasks are routed through the subplanner instead of going directly to workers:export function shouldDecompose(
task: Task,
config: SubplannerConfig,
currentDepth: number,
): boolean {
if (currentDepth >= config.maxDepth) {
return false; // Max recursion depth reached
}
if (task.scope.length < config.scopeThreshold) {
return false; // Too small to decompose
}
return true;
}
export const DEFAULT_SUBPLANNER_CONFIG: SubplannerConfig = {
maxDepth: 3, // Maximum recursion levels
scopeThreshold: 4, // Minimum files to trigger decomposition
maxSubtasks: 10, // Maximum subtasks per batch
};
- Task scope ≥ 4 files
- Current decomposition depth < 3
Implementation
Location:packages/orchestrator/src/subplanner.ts
Class Structure
export class Subplanner {
private config: OrchestratorConfig;
private subplannerConfig: SubplannerConfig;
private taskQueue: TaskQueue;
private workerPool: WorkerPool;
private mergeQueue: MergeQueue;
private monitor: Monitor;
private systemPrompt: string;
private targetRepoPath: string;
private tracer: Tracer | null = null;
private dispatchLimiter: ConcurrencyLimiter;
constructor(
config: OrchestratorConfig,
subplannerConfig: SubplannerConfig,
taskQueue: TaskQueue,
workerPool: WorkerPool,
mergeQueue: MergeQueue,
monitor: Monitor,
systemPrompt: string,
) { /* ... */ }
async decomposeAndExecute(
parentTask: Task,
depth: number = 0,
parentSpan?: Span,
): Promise<Handoff> { /* ... */ }
}
Decomposition Loop
The subplanner operates its own iterative planning loop (similar to root planner):async decomposeAndExecute(
parentTask: Task,
depth: number = 0,
parentSpan?: Span,
): Promise<Handoff> {
const pendingHandoffs: { subtask: Task; handoff: Handoff }[] = [];
const allHandoffs: Handoff[] = [];
let handoffsSinceLastPlan: Handoff[] = [];
const activeTasks = new Set<string>();
const dispatchedTaskIds = new Set<string>();
const allSubtasks: Task[] = [];
let piSession: PiSessionResult | null = null;
let iteration = 0;
let planningDone = false;
try {
const initialRepoState = await readRepoState(this.targetRepoPath);
piSession = await createPlannerPiSession({
systemPrompt: this.systemPrompt,
targetRepoPath: this.targetRepoPath,
llmConfig: this.config.llm,
});
while (iteration < MAX_SUBPLANNER_ITERATIONS) {
try {
collectCompletedHandoffs(pendingHandoffs, allHandoffs, handoffsSinceLastPlan);
const hasCapacity = activeTasks.size < this.config.maxWorkers;
const hasEnoughHandoffs = handoffsSinceLastPlan.length >= MIN_HANDOFFS_FOR_REPLAN;
const noActiveWork = activeTasks.size === 0 && iteration > 0;
const needsPlan =
hasCapacity && (iteration === 0 || hasEnoughHandoffs || noActiveWork) && !planningDone;
if (needsPlan && piSession) {
const session = piSession.session;
const repoState =
iteration === 0 ? initialRepoState : await readRepoState(this.targetRepoPath);
const message =
iteration === 0
? this.buildInitialMessage(parentTask, repoState, depth)
: this.buildFollowUpMessage(repoState, handoffsSinceLastPlan, activeTasks, dispatchedTaskIds);
await session.prompt(message);
const stats = session.getSessionStats();
const tokenDelta = stats.tokens.total - lastTotalTokens;
this.monitor.recordTokenUsage(tokenDelta);
const responseText = session.getLastAssistantText();
const { scratchpad, tasks: rawTasks } = parsePlannerResponse(responseText);
const tasks = this.buildSubtasksFromRaw(rawTasks, parentTask, dispatchedTaskIds);
handoffsSinceLastPlan = [];
iteration++;
if (tasks.length === 0 && activeTasks.size === 0) {
if (iteration === 1) {
// Atomic task — send directly to worker
cleanupPiSession(session, piSession.tempDir);
return await this.executeAsWorkerTask(parentTask, span);
} else {
planningDone = true;
}
} else if (tasks.length > 0) {
allSubtasks.push(...tasks);
this.dispatchSubtasksBatch(tasks, parentTask, depth, pendingHandoffs, activeTasks, dispatchedTaskIds, parentSpan);
}
}
if (planningDone && activeTasks.size === 0) {
break;
}
await sleep(LOOP_SLEEP_MS);
} catch (error) {
// Backoff retry logic
}
}
// Wait for all subtasks to complete
collectCompletedHandoffs(pendingHandoffs, allHandoffs, handoffsSinceLastPlan);
while (activeTasks.size > 0) {
collectCompletedHandoffs(pendingHandoffs, allHandoffs, handoffsSinceLastPlan);
await sleep(LOOP_SLEEP_MS);
}
// Aggregate subtask handoffs into parent handoff
return aggregateHandoffs(parentTask, allSubtasks, allHandoffs);
} catch (error) {
return createFailureHandoff(parentTask, error);
} finally {
if (piSession) {
cleanupPiSession(piSession.session, piSession.tempDir);
}
}
}
Subtask Dispatch with Recursive Decomposition
private dispatchSubtasksBatch(
tasks: Task[],
parentTask: Task,
currentDepth: number,
pendingHandoffs: { subtask: Task; handoff: Handoff }[],
activeTasks: Set<string>,
dispatchedTaskIds: Set<string>,
parentSpan?: Span,
): void {
for (const subtask of tasks) {
this.taskQueue.enqueue(subtask);
dispatchedTaskIds.add(subtask.id);
activeTasks.add(subtask.id);
const promise = (async () => {
await this.dispatchLimiter.acquire();
try {
let handoff: Handoff;
// Check if subtask still needs decomposition (recursive case)
if (shouldDecompose(subtask, this.subplannerConfig, currentDepth + 1)) {
logger.info("Subtask still complex — recursing", {
subtaskId: subtask.id,
scopeSize: subtask.scope.length,
nextDepth: currentDepth + 1,
});
this.taskQueue.assignTask(subtask.id, "subplanner");
this.taskQueue.startTask(subtask.id);
// Recursive decomposition
handoff = await this.decomposeAndExecute(subtask, currentDepth + 1, parentSpan);
} else {
// Atomic subtask → send to worker
this.taskQueue.assignTask(subtask.id, `ephemeral-${subtask.id}`);
this.taskQueue.startTask(subtask.id);
handoff = await this.workerPool.assignTask(subtask, parentSpan);
if (handoff.filesChanged.length === 0) {
this.monitor.recordEmptyDiff(subtask.assignedTo || "unknown", subtask.id);
}
this.monitor.recordTokenUsage(handoff.metrics.tokensUsed);
}
if (handoff.status === "complete") {
this.taskQueue.completeTask(subtask.id);
} else {
this.taskQueue.failTask(subtask.id);
}
pendingHandoffs.push({ subtask, handoff });
} catch (error) {
this.taskQueue.failTask(subtask.id);
pendingHandoffs.push({ subtask, handoff: createFailureHandoff(subtask, error) });
} finally {
this.dispatchLimiter.release();
activeTasks.delete(subtask.id);
}
})();
}
}
- Subtasks are checked against
shouldDecompose()at depthcurrentDepth + 1 - If still complex: recurse (max depth: 3)
- If atomic: dispatch to worker
Handoff Aggregation
Subtask handoffs are aggregated into a single parent handoff:export function aggregateHandoffs(
parentTask: Task,
subtasks: Task[],
handoffs: Handoff[],
): Handoff {
const completedCount = handoffs.filter((h) => h.status === "complete").length;
const failedCount = handoffs.filter((h) => h.status === "failed").length;
const totalSubtasks = subtasks.length;
let status: Handoff["status"];
if (completedCount === totalSubtasks) {
status = "complete";
} else if (failedCount === totalSubtasks) {
status = "failed";
} else if (completedCount > 0) {
status = "partial";
} else {
status = "blocked";
}
const summaryParts = handoffs.map((h) => `[${h.taskId}] (${h.status}): ${h.summary}`);
const summary =
`Decomposed "${parentTask.description}" into ${totalSubtasks} subtasks. ` +
`${completedCount} complete, ${failedCount} failed, ` +
`${totalSubtasks - completedCount - failedCount} other.\n\n` +
summaryParts.join("\n");
// Merge file changes from all subtasks
const filesChangedSet = new Set<string>();
for (const h of handoffs) {
for (const f of h.filesChanged) {
filesChangedSet.add(f);
}
}
// Aggregate concerns and suggestions
const allConcerns: string[] = [];
const allSuggestions: string[] = [];
for (const h of handoffs) {
for (const c of h.concerns) allConcerns.push(`[${h.taskId}] ${c}`);
for (const s of h.suggestions) allSuggestions.push(`[${h.taskId}] ${s}`);
}
// Sum metrics
const metrics = {
linesAdded: 0,
linesRemoved: 0,
filesCreated: 0,
filesModified: 0,
tokensUsed: 0,
toolCallCount: 0,
durationMs: 0,
};
for (const h of handoffs) {
metrics.linesAdded += h.metrics.linesAdded;
metrics.linesRemoved += h.metrics.linesRemoved;
metrics.filesCreated += h.metrics.filesCreated;
metrics.filesModified += h.metrics.filesModified;
metrics.tokensUsed += h.metrics.tokensUsed;
metrics.toolCallCount += h.metrics.toolCallCount;
metrics.durationMs = Math.max(metrics.durationMs, h.metrics.durationMs);
}
return {
taskId: parentTask.id,
status,
summary,
diff: handoffs.map((h) => h.diff).filter(Boolean).join("\n"),
filesChanged: Array.from(filesChangedSet),
concerns: allConcerns,
suggestions: allSuggestions,
metrics,
};
}
Prompt Engineering
Location:prompts/subplanner.md
1. Core Identity
You are a subplanner. You fully own a delegated slice of a larger project.
You receive a parent task, break it into independent subtasks, and emit them **iteratively**
— not all at once. You do no coding, but you can **explore the codebase** using read-only
tools before decomposing.
Your purpose: increase throughput by fanning out workers while maintaining full ownership
over your slice.
2. When NOT to Decompose
Return `{ "scratchpad": "Task is atomic — sending directly to worker.", "tasks": [] }` if:
- The parent task has 3 or fewer files in scope with a single clear objective
- All changes are in one file and can't be meaningfully parallelized
- Decomposition would produce subtasks with fewer than 20 lines of meaningful change each
When you return empty tasks on the first iteration, the parent task goes directly to a
worker as-is.
if (tasks.length === 0 && activeTasks.size === 0) {
if (iteration === 1) {
// First iteration returned no subtasks → atomic task
taskLogger.info("LLM returned no subtasks — task is atomic, dispatching to worker directly");
cleanupPiSession(session, piSession.tempDir);
piSession = null;
const handoff = await this.executeAsWorkerTask(parentTask, span);
return handoff;
}
}
3. Hard Constraints (Scope Containment)
- Maximum 10 subtasks per batch.
- Subtask scopes MUST be subsets of parent scope -- scope expansion breaks the merge
pipeline and causes untraceable conflicts. This is the single most destructive failure
mode in the system.
- No overlapping scopes between subtasks.
- No sequential dependencies between subtasks at the same priority level.
- Every subtask must have `acceptance` criteria and `scope`.
- Every subtask description must be self-contained -- include parent context.
private buildSubtasksFromRaw(
rawTasks: RawTaskInput[],
parentTask: Task,
dispatchedTaskIds: Set<string>,
): Task[] {
const subtasks: Task[] = [];
for (const raw of rawTasks) {
let validScope = raw.scope || [];
// Reject files outside parent scope
const invalidFiles = validScope.filter((f) => !parentTask.scope.includes(f));
if (invalidFiles.length > 0) {
logger.warn("Subtask scope contains files outside parent scope — removing them", {
parentTaskId: parentTask.id,
subtaskId: raw.id,
invalidFiles,
});
validScope = validScope.filter((f) => parentTask.scope.includes(f));
if (validScope.length === 0) {
logger.warn("Subtask has no valid scope files after filtering — skipping", {
subtaskId: raw.id,
});
continue;
}
}
subtasks.push({
id: raw.id || `${parentTask.id}-sub-${subCounter}`,
parentId: parentTask.id,
description: raw.description,
scope: validScope,
acceptance: raw.acceptance || "",
branch: raw.branch || `${this.config.git.branchPrefix}${id}-${slugifyForBranch(raw.description)}`,
status: "pending",
createdAt: Date.now(),
priority: raw.priority || parentTask.priority,
});
}
return subtasks;
}
4. Subtask Design Principles
**Scope containment** — Subtask scopes must be subsets of the parent scope. No files outside
the parent's scope, ever.
**No overlapping scopes** — Two subtasks must not touch the same file. This causes merge
conflicts and is the #1 system-breaking failure.
**Independence** — All subtasks at the same priority level must be fully parallel. No subtask
should need another's output to begin.
**Completeness** — The union of all subtask scopes should cover the parent scope. Don't leave
files unaddressed — that work gets dropped.
**Self-contained descriptions** — Workers know nothing about the parent task, the project
architecture, or your existence. Each description must include the "why," the relevant
patterns, and full context.
5. Iterative Decomposition Example
From the prompt:{
"scratchpad": "First decomposition. Parent scope: 4 files.\n\nPARENT GOAL: Chunk generation + meshing for voxel engine.\n\nSCOPE COVERAGE:\n- [THIS BATCH] chunk.ts + constants.ts — foundational types, no dependencies\n- [THIS BATCH] noise.ts — terrain generation, depends only on constants\n- [DEFERRED] mesher.ts — needs to consume Chunk's actual data structure. I'll specify this after I see what chunk.ts worker produces.\n\nDEFERRAL REASON: The mesher must iterate over the chunk's 3D voxel data. I don't want to guess the storage format — I'll see the actual Chunk class from the handoff and write a precise mesher spec.",
"tasks": [
{
"id": "task-005-sub-1",
"description": "Define chunk data structures and constants for the voxel engine. Create the Chunk class in chunk.ts with a 3D array of block IDs, chunk coordinates, and a dirty flag. Define world constants in constants.ts: CHUNK_SIZE=16, WORLD_HEIGHT=256, and a block type enum (AIR=0, STONE=1, DIRT=2, GRASS=3)...",
"scope": ["src/world/chunk.ts", "src/world/constants.ts"],
"acceptance": "Chunk class is instantiable with (x, z) coordinates. get/setBlock(localX, localY, localZ) work correctly and throw RangeError for out-of-bounds...",
"branch": "worker/task-005-sub-1-chunk-data-structures-constants",
"priority": 1
}
]
}
- Only emits foundations first (chunk.ts, constants.ts, noise.ts)
- Defers mesher.ts because it depends on seeing the actual Chunk data structure
- Explicitly states why deferral is necessary
Message Building
Initial Message
private buildInitialMessage(parentTask: Task, repoState: RepoState, depth: number): string {
let msg = `## Parent Task\n`;
msg += `- **ID**: ${parentTask.id}\n`;
msg += `- **Description**: ${parentTask.description}\n`;
msg += `- **Scope**: ${parentTask.scope.join(", ")}\n`;
msg += `- **Acceptance**: ${parentTask.acceptance}\n`;
msg += `- **Priority**: ${parentTask.priority}\n`;
msg += `- **Decomposition Depth**: ${depth}\n\n`;
msg += `## Repository File Tree\n${repoState.fileTree.join("\n")}\n\n`;
msg += `## Recent Commits\n${repoState.recentCommits.join("\n")}\n\n`;
if (repoState.featuresJson) {
msg += `## FEATURES.json\n${repoState.featuresJson}\n\n`;
}
msg += `This is the initial planning call. Respond with a JSON object: { "scratchpad": "your analysis and plan", "tasks": [...] }. If the task is atomic (no decomposition needed), return an empty tasks array.\n`;
return msg;
}
Follow-up Message
private buildFollowUpMessage(
repoState: RepoState,
newHandoffs: Handoff[],
activeTasks: Set<string>,
dispatchedTaskIds: Set<string>,
): string {
let msg = `## Updated Repository State\n`;
msg += `File tree:\n${repoState.fileTree.join("\n")}\n\n`;
msg += `Recent commits:\n${repoState.recentCommits.join("\n")}\n\n`;
if (newHandoffs.length > 0) {
msg += `## New Subtask Handoffs (${newHandoffs.length} since last plan)\n`;
for (const h of newHandoffs) {
msg += `### Task ${h.taskId} — ${h.status}\n`;
msg += `Summary: ${h.summary.slice(0, MAX_HANDOFF_SUMMARY_CHARS)}\n`;
msg += `Files changed: ${h.filesChanged.slice(0, MAX_FILES_PER_HANDOFF).join(", ")}\n`;
if (h.concerns.length > 0) msg += `Concerns: ${h.concerns.join("; ")}\n`;
if (h.suggestions.length > 0) msg += `Suggestions: ${h.suggestions.join("; ")}\n`;
}
}
if (activeTasks.size > 0) {
msg += `## Currently Active Subtasks (${activeTasks.size})\n`;
for (const id of activeTasks) {
const t = this.taskQueue.getById(id);
if (t) msg += `- ${id}: ${t.description.slice(0, 120)}\n`;
}
}
msg += `Continue planning. Review new handoffs and current state. Rewrite your scratchpad and emit the next batch of subtasks as JSON. Return empty tasks array if all work is done.\n`;
return msg;
}
Configuration
export interface SubplannerConfig {
maxDepth: number; // Maximum recursion depth
scopeThreshold: number; // Minimum files to trigger decomposition
maxSubtasks: number; // Maximum subtasks per batch
}
export const DEFAULT_SUBPLANNER_CONFIG: SubplannerConfig = {
maxDepth: 3,
scopeThreshold: 4,
maxSubtasks: 10,
};
const LOOP_SLEEP_MS = 500;
const MIN_HANDOFFS_FOR_REPLAN = 1; // Subplanner replans after every handoff
const MAX_SUBPLANNER_ITERATIONS = 20;
const MAX_CONSECUTIVE_ERRORS = 5;
Anti-Patterns (from Prompt)
- Forced upfront decomposition — “Trying to emit subtasks for every file in scope on the first batch when some depend on foundations being built first.”
- Trivial splits — “Splitting a 1-file task into 3 subtasks wastes coordination overhead.”
- Context-less descriptions — “‘Implement the mesher’ means nothing to a worker who doesn’t know what project this is.”
- Incomplete coverage — “Files in parent scope with no subtask AND no explicit deferral reason = work silently dropped.”
- Ignoring handoff intelligence — “If a worker established a pattern you didn’t anticipate, subsequent subtasks must reference it.”
Best Practices
1. Defer Confidently
Explicitly state what you’re deferring and why:{
"scratchpad": "[DEFERRED] mesher.ts — needs to see the Chunk class data structure from chunk.ts worker. Will specify in next batch after handoff."
}
2. Validate Scope Inheritance
Every subtask scope must be a strict subset of parent scope:const invalidFiles = subtask.scope.filter((f) => !parentTask.scope.includes(f));
if (invalidFiles.length > 0) {
throw new Error(`Subtask ${subtask.id} includes files outside parent scope: ${invalidFiles}`);
}
3. Prevent Scope Overlap
Track which files are assigned to subtasks:const assignedFiles = new Set<string>();
for (const subtask of subtasks) {
for (const file of subtask.scope) {
if (assignedFiles.has(file)) {
throw new Error(`File ${file} assigned to multiple subtasks (overlap violation)`);
}
assignedFiles.add(file);
}
}
4. Include Parent Context in Descriptions
Workers see only the subtask description, not the parent:{
"description": "Implement greedy meshing for the voxel engine. Create a Mesher class in mesher.ts that takes a Chunk instance (which stores voxel data as a flat Uint8Array with index math: x + z * 16 + y * 16 * 16) and produces vertex/index arrays for WebGL rendering. This is the rendering pipeline's geometry generation step."
}
Next Steps
- Worker Agent — See how subtasks are implemented in sandboxes
- Root Planner Agent — Understand parent planning behavior
- Task Decomposition — Learn the decomposition strategy