A child workflow is a workflow triggered from inside a running task. The parent task waits for the child to complete and receives its output, or it can fire-and-forget if the result is not needed.
Child workflows are useful when you need to:
Break a large workload into independently observable units
Fan out to a dynamic number of parallel workers
Reuse a workflow definition across multiple callers
Apply separate concurrency or retry policies to a sub-unit of work
Define a child and a parent
Any task or workflow can be used as a child. Define it the same way as any other task, then call it from the parent with await child_task.aio_run(input=...). from pydantic import BaseModel
from hatchet_sdk import Context, Hatchet
hatchet = Hatchet( debug = True )
class SimpleInput ( BaseModel ):
message: str
class SimpleOutput ( BaseModel ):
transformed_message: str
child_task = hatchet.workflow( name = "SimpleWorkflow" , input_validator = SimpleInput)
@child_task.task ( name = "step1" )
def step1 ( input : SimpleInput, ctx : Context) -> SimpleOutput:
print ( "executed step1: " , input .message)
return SimpleOutput( transformed_message = input .message.upper())
Define the child as a regular task. Call child.run(input) from the parent’s fn to spawn it and await its result. import { hatchet } from './hatchet-client' ;
type ChildInput = { N : number };
export const child = hatchet . task ({
name: 'child' ,
fn : ( input : ChildInput ) => ({
Value: input . N ,
}),
});
type ParentInput = { N : number };
export const parent = hatchet . task ({
name: 'parent' ,
fn : async ( input : ParentInput , ctx ) => {
const n = input . N ;
const promises = [];
for ( let i = 0 ; i < n ; i ++ ) {
promises . push ( child . run ({ N: i }));
}
const childRes = await Promise . all ( promises );
const sum = childRes . reduce (( acc , curr ) => acc + curr . Value , 0 );
return { Result: sum };
},
});
Define child and parent as standalone tasks. Call childTask.Run(ctx, input) from inside the parent handler. package main
import (
" fmt "
" log "
hatchet " github.com/hatchet-dev/hatchet/sdks/go "
)
type ChildInput struct {
Value int `json:"value"`
}
type ChildOutput struct {
Result int `json:"result"`
}
func Child ( client * hatchet . Client ) * hatchet . StandaloneTask {
return client . NewStandaloneTask ( "child-task" ,
func ( ctx hatchet . Context , input ChildInput ) ( ChildOutput , error ) {
return ChildOutput { Result : input . Value * 2 }, nil
},
)
}
Spawn a child workflow from a task
Call await child_task.aio_run(input=...) from inside any async task function. The call returns the child’s typed output. from typing import Any
from hatchet_sdk import Context, Hatchet
from hatchet_sdk.runnables.types import EmptyModel
from worker import SimpleInput, child_task
hatchet = Hatchet( debug = True )
@hatchet.task ( name = "SpawnTask" )
async def spawn ( input : EmptyModel, ctx : Context) -> dict[ str , Any]:
result = await child_task.aio_run(
input = SimpleInput( message = "Hello, World!" ),
)
return { "results" : result}
For a single child with a blocking call: from worker import SimpleInput, child_task
# Synchronous — blocks until the child completes
result = child_task.run(SimpleInput( message = "Hello, World!" ))
Call child.run(input) from inside the parent’s fn. TypeScript’s await naturally waits for the child to finish. export const parentSingleChild = hatchet . task ({
name: 'parent-single-child' ,
fn : async () => {
const childRes = await child . run ({ N: 1 });
return {
Result: childRes . Value ,
};
},
});
Call childWorkflow.Run(ctx, input) where ctx is the hatchet.Context passed to the parent task. This schedules the child on the cluster and blocks until it finishes. func Parent ( client * hatchet . Client ) * hatchet . StandaloneTask {
return client . NewStandaloneTask ( "parent-task" ,
func ( ctx hatchet . Context , input ParentInput ) ( ParentOutput , error ) {
childResult , err := Child ( client ). Run ( ctx , ChildInput { Value : 1 })
if err != nil {
return ParentOutput {}, fmt . Errorf (
"child failed: %w " , err ,
)
}
var childOutput ChildOutput
err = childResult . Into ( & childOutput )
if err != nil {
return ParentOutput {}, fmt . Errorf (
"failed to parse child result: %w " , err ,
)
}
return ParentOutput { Sum : childOutput . Result }, nil
},
)
}
Fan-out: spawn multiple children in parallel
Fan-out spawns many child workflows concurrently and collects all results before continuing.
Use asyncio.gather to run multiple children simultaneously: import asyncio
from worker import SimpleInput, child_task
result1 = child_task.aio_run(SimpleInput( message = "Hello, World!" ))
result2 = child_task.aio_run(SimpleInput( message = "Hello, Moon!" ))
results = await asyncio.gather(result1, result2)
print (results[ 0 ][ "transformed_message" ])
print (results[ 1 ][ "transformed_message" ])
Or submit a batch in one call with aio_run_many: greetings = [ "Hello, World!" , "Hello, Moon!" , "Hello, Mars!" ]
results = await child_task.aio_run_many(
[
child_task.create_bulk_run_item(
input = SimpleInput( message = greeting),
)
for greeting in greetings
]
)
print (results)
Collect child.run(...) calls into an array and resolve them with Promise.all: export const parent = hatchet . task ({
name: 'parent' ,
fn : async ( input : ParentInput , ctx ) => {
const n = input . N ;
const promises = [];
for ( let i = 0 ; i < n ; i ++ ) {
promises . push ( child . run ({ N: i }));
}
const childRes = await Promise . all ( promises );
const sum = childRes . reduce (( acc , curr ) => acc + curr . Value , 0 );
return { Result: sum };
},
});
Use goroutines and a sync.WaitGroup to run children in parallel: import " sync "
// Inside a parent task handler:
n := 5
var wg sync . WaitGroup
var mu sync . Mutex
results := make ([] * ChildOutput , 0 , n )
wg . Add ( n )
for i := 0 ; i < n ; i ++ {
go func ( index int ) {
defer wg . Done ()
result , err := childWorkflow . Run ( hCtx , ChildInput { Value : index })
if err != nil {
return
}
var childOutput ChildOutput
err = result . Into ( & childOutput )
if err != nil {
return
}
mu . Lock ()
results = append ( results , & childOutput )
mu . Unlock ()
}( i )
}
wg . Wait ()
Register child and parent on the same worker
Both the child and parent must be registered on a worker for the workflow to execute. The worker needs enough slots to run children concurrently.
from hatchet_sdk import Hatchet
from worker import child_task
hatchet = Hatchet( debug = True )
def main () -> None :
worker = hatchet.worker( "test-worker" , slots = 1 , workflows = [child_task])
worker.start()
if __name__ == "__main__" :
main()
import { hatchet } from './hatchet-client' ;
import { parent , child } from './workflow' ;
async function main () {
const worker = await hatchet . worker ( 'child-workflow-worker' , {
workflows: [ parent , child ],
slots: 100 ,
});
await worker . start ();
}
if ( require . main === module ) { main (); }
worker , err := client . NewWorker (
"child-workflow-worker" ,
hatchet . WithWorkflows ( childWorkflow , parentWorkflow ),
hatchet . WithSlots ( 10 ),
)
if err != nil {
log . Fatalf ( "failed to create worker: %v " , err )
}
interruptCtx , cancel := cmdutils . NewInterruptContext ()
defer cancel ()
if err := worker . StartBlocking ( interruptCtx ); err != nil {
log . Fatalf ( "failed to start worker: %v " , err )
}
Trigger the parent
from worker import SimpleInput, child_task
child_task.run(SimpleInput( message = "Hello, World!" ))
import { parent } from './workflow' ;
async function main () {
const res = await parent . run ({ N: 10 });
console . log ( res . Result );
}
if ( require . main === module ) {
main ()
. then (() => process . exit ( 0 ))
. catch (( error ) => {
console . error ( 'Error:' , error );
process . exit ( 1 );
});
}
_ , err := parentWorkflow . Run ( context . Background (), ParentInput {
Count : 5 ,
})
if err != nil {
log . Printf ( "failed to run parent workflow: %v " , err )
}
If the parent and child are on the same worker with limited slots, a deadlock can occur: the parent holds a slot and waits for a child that cannot start because all slots are occupied. Increase slots or run child workflows on a separate worker to avoid this.
Next steps
Durable execution Spawn children from durable tasks that survive restarts.
DAG workflows Build task graphs with explicit dependencies.