export async function transformRequestBody(
body: RequestBody,
codexInstructions: string,
userConfig: UserConfig = { global: {}, models: {} },
codexMode = true,
fastSession = false,
fastSessionStrategy: FastSessionStrategy = "hybrid",
fastSessionMaxInputItems = 30,
): Promise<RequestBody> {
const originalModel = body.model;
const normalizedModel = normalizeModel(body.model);
const modelConfig = getModelConfig(originalModel || normalizedModel, userConfig);
// Set normalized model
body.model = normalizedModel;
// Codex required fields
body.store = false; // Stateless (required by ChatGPT backend)
body.stream = true; // Always stream (SSE)
// Inject Codex instructions
body.instructions = shouldApplyFastSessionTuning
? compactInstructionsForFastSession(codexInstructions, isTrivialTurn)
: codexInstructions;
// Filter input array
if (body.input) {
// Apply fast-session input trimming
if (fastSession) {
body.input = trimInputForFastSession(
body.input,
fastSessionMaxInputItems,
{ preferLatestUserOnly: isTrivialTurn }
);
}
// Remove item_reference (AI SDK construct, not supported by Codex)
// Strip IDs from all items (stateless mode)
body.input = filterInput(body.input);
// Add bridge/tool-remap message
if (codexMode) {
body.input = await filterHostSystemPrompts(body.input);
body.input = addCodexBridgeMessage(body.input, !!body.tools);
} else {
body.input = addToolRemapMessage(body.input, !!body.tools);
}
// Handle orphaned tool outputs
body.input = normalizeOrphanedToolOutputs(body.input);
body.input = injectMissingToolOutputs(body.input);
}
// Configure reasoning
const reasoningConfig = resolveReasoningConfig(normalizedModel, modelConfig, body);
body.reasoning = {
...body.reasoning,
...reasoningConfig,
};
// Fast-session overrides
if (fastSession && shouldApplyFastSessionTuning) {
body.reasoning.effort = "none"; // or "low" for Codex models
body.reasoning.summary = "auto";
body.text.verbosity = "low";
}
// Configure text verbosity
body.text = {
...body.text,
verbosity: resolveTextVerbosity(modelConfig, body),
};
// Add include for encrypted reasoning content
body.include = resolveInclude(modelConfig, body);
// Always includes "reasoning.encrypted_content" for stateless continuity
// Remove unsupported parameters
body.max_output_tokens = undefined;
body.max_completion_tokens = undefined;
return body;
}