Aeolos implements a two-tier memory management system consisting of a Physical Memory Manager (PMM) and a Virtual Memory Manager (VMM). The PMM tracks physical page frames, while the VMM manages virtual address spaces using x86_64 paging.
Memory Layout
Aeolos uses a higher-half kernel design with the following virtual address layout:
0x0000000000000000 - 0x00007FFFFFFFFFFF User space (128 TB)
0xFFFF800000000000 - 0xFFFFFFFFFFFFFFFF Kernel space (128 TB)
0xFFFF800000000000 - ... Physical memory mapping
0xFFFFFFFF80000000 - 0xFFFFFFFFFFFFFFFF Kernel code/data (2 GB)
Key address translation macros from kernel/mm/vmm.h:19:
#define MEM_VIRT_OFFSET 0x ffff800000000000
#define VIRT_TO_PHYS ( a ) ((( uint64_t )(a)) - MEM_VIRT_OFFSET)
#define PHYS_TO_VIRT ( a ) ((( uint64_t )(a)) + MEM_VIRT_OFFSET)
Physical Memory Manager (PMM)
The PMM uses a bitmap to track the allocation status of physical page frames. Each bit represents one 4 KiB page.
Data Structures
From kernel/mm/pmm.h:13:
typedef struct {
uint64_t phys_limit; // Highest available physical address
uint64_t total_mem; // Total usable memory
uint64_t free_mem; // Currently free memory
} mem_info;
Initialization
The PMM is initialized in kernel/mm/pmm.c:86 using the memory map from the bootloader:
void pmm_init (stv2_struct_tag_mmap * map )
{
mmap = map;
// calculate memory statistics
klog_info ( "bootloader provided memory map: \n " );
for ( size_t i = 0 ; i < map -> entries ; i ++ ) {
struct stivale2_mmap_entry entry = map -> memmap [i];
if ( entry . base + entry . length <= 0x 100000 )
continue ;
uint64_t newphyslimit = entry . base + entry . length ;
if (newphyslimit > memstats . phys_limit )
memstats . phys_limit = newphyslimit;
klog_printf ( " base: %x , length: %x ( %d KiB), type: %d \n " ,
entry . base , entry . length , entry . length / 1024 , entry . type );
if ( entry . type == STIVALE2_MMAP_USABLE ||
entry . type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE ||
entry . type == STIVALE2_MMAP_ACPI_RECLAIMABLE ||
entry . type == STIVALE2_MMAP_KERNEL_AND_MODULES)
memstats . total_mem += entry . length ;
}
// look for a good place to keep our bitmap
uint64_t bm_size = memstats . phys_limit / (PAGE_SIZE * 8 );
for ( size_t i = 0 ; i < map -> entries ; i ++ ) {
struct stivale2_mmap_entry entry = map -> memmap [i];
if ( entry . base + entry . length <= 0x 100000 )
continue ;
if ( entry . length >= bm_size && entry . type == STIVALE2_MMAP_USABLE) {
bitmap = ( uint8_t * ) PHYS_TO_VIRT ( entry . base );
break ;
}
}
// zero it out
memset (bitmap, 0 , bm_size);
// now populate the bitmap
for ( size_t i = 0 ; i < map -> entries ; i ++ ) {
struct stivale2_mmap_entry entry = map -> memmap [i];
if ( entry . base + entry . length <= 0x 100000 )
continue ;
if ( entry . type == STIVALE2_MMAP_USABLE)
pmm_free ( entry . base , NUM_PAGES ( entry . length ));
}
// mark the bitmap as used
pmm_alloc ( VIRT_TO_PHYS (bitmap), NUM_PAGES (bm_size));
}
The PMM bitmap is placed in the first suitable memory region found, then marked as allocated to prevent overwriting itself.
Memory Allocation
The pmm_get() function allocates physical pages at kernel/mm/pmm.c:64:
uint64_t pmm_get ( uint64_t numpages )
{
static uint64_t lastusedpage;
for ( uint64_t i = lastusedpage; i < memstats . phys_limit ; i += PAGE_SIZE) {
if ( pmm_alloc (i, numpages)) {
lastusedpage = i + (numpages * PAGE_SIZE);
return i;
}
}
for ( uint64_t i = 0 ; i < lastusedpage; i += PAGE_SIZE) {
if ( pmm_alloc (i, numpages)) {
lastusedpage = i + (numpages * PAGE_SIZE);
return i;
}
}
kernel_panic ( "Out of Physical Memory" );
return 0 ;
}
The allocator uses a “next-fit” strategy, starting the search from the last allocation point to reduce fragmentation.
Page Operations
pmm_alloc Marks specific physical pages as allocated. Returns false if already in use.
pmm_free Marks physical pages as free and available for reuse.
pmm_get Finds and allocates contiguous physical pages, returns physical address.
pmm_reclaim_bootloader_mem Frees memory regions marked as bootloader-reclaimable.
Bitmap Operations
The bitmap uses bit manipulation for efficient tracking at kernel/mm/pmm.c:23:
static void bmp_markused ( uint64_t addr , uint64_t numpages )
{
for ( uint64_t i = addr; i < addr + (numpages * PAGE_SIZE); i += PAGE_SIZE) {
bitmap [i / (PAGE_SIZE * 8 )] &= ~ (( 1 << ((i / PAGE_SIZE) % 8 )));
}
}
static bool bmp_isfree ( uint64_t addr , uint64_t numpages )
{
bool free = true ;
for ( uint64_t i = addr; i < addr + (numpages * PAGE_SIZE); i += PAGE_SIZE) {
free = bitmap [i / (PAGE_SIZE * 8 )] & ( 1 << ((i / PAGE_SIZE) % 8 ));
if ( ! free)
break ;
}
return free;
}
Virtual Memory Manager (VMM)
The VMM manages virtual address spaces using x86_64 4-level paging (PML4 → PDPT → PD → PT).
Address Space Structure
From kernel/mm/vmm.h:22:
typedef struct {
uint64_t * PML4; // Physical address of PML4 table
lock_t lock; // Lock for concurrent access
} addrspace_t ;
VMM Initialization
The kernel creates its own page tables in kernel/mm/vmm.c:122:
void vmm_init ()
{
// create the kernel address space
kaddrspace . PML4 = kmalloc (PAGE_SIZE);
memset ( kaddrspace . PML4 , 0 , PAGE_SIZE);
vmm_map ( & kaddrspace, 0x ffffffff80000000 , 0 , NUM_PAGES ( 0x 80000000 ), VMM_FLAGS_DEFAULT);
klog_info ( "mapped lower 2GB to 0xFFFFFFFF80000000 \n " );
vmm_map ( & kaddrspace, 0x ffff800000000000 , 0 , NUM_PAGES ( pmm_getstats ()-> phys_limit ), VMM_FLAGS_DEFAULT);
klog_info ( "mapped all memory to 0xFFFF800000000000 \n " );
write_cr ( "cr3" , VIRT_TO_PHYS ( kaddrspace . PML4 ));
klog_ok ( "done \n " );
}
The kernel maps all physical memory into the virtual address space at 0xFFFF800000000000. This allows direct access to any physical address but requires sufficient virtual address space.
Page Mapping
The map_page() function at kernel/mm/vmm.c:12 handles the complex process of mapping a virtual page to a physical page:
static void map_page ( addrspace_t * addrspace , uint64_t vaddr , uint64_t paddr , uint64_t flags )
{
uint16_t pte = (vaddr >> 12 ) & 0x 1ff ;
uint16_t pde = (vaddr >> 21 ) & 0x 1ff ;
uint16_t pdpe = (vaddr >> 30 ) & 0x 1ff ;
uint16_t pml4e = (vaddr >> 39 ) & 0x 1ff ;
uint64_t * pml4 = addrspace -> PML4 ;
uint64_t * pdpt;
uint64_t * pd;
uint64_t * pt;
pdpt = ( uint64_t * ) PHYS_TO_VIRT ( pml4 [pml4e] & ~ ( 0x fff ));
if ( ! ( pml4 [pml4e] & VMM_FLAG_PRESENT)) {
pdpt = ( uint64_t * ) PHYS_TO_VIRT ( pmm_get ( 1 ));
memset (pdpt, 0 , PAGE_SIZE);
pml4 [pml4e] = MAKE_TABLE_ENTRY ( VIRT_TO_PHYS (pdpt), VMM_FLAGS_USERMODE);
}
pd = ( uint64_t * ) PHYS_TO_VIRT ( pdpt [pdpe] & ~ ( 0x fff ));
if ( ! ( pdpt [pdpe] & VMM_FLAG_PRESENT)) {
pd = ( uint64_t * ) PHYS_TO_VIRT ( pmm_get ( 1 ));
memset (pd, 0 , PAGE_SIZE);
pdpt [pdpe] = MAKE_TABLE_ENTRY ( VIRT_TO_PHYS (pd), VMM_FLAGS_USERMODE);
}
pt = ( uint64_t * ) PHYS_TO_VIRT ( pd [pde] & ~ ( 0x fff ));
if ( ! ( pd [pde] & VMM_FLAG_PRESENT)) {
pt = ( uint64_t * ) PHYS_TO_VIRT ( pmm_get ( 1 ));
memset (pt, 0 , PAGE_SIZE);
pd [pde] = MAKE_TABLE_ENTRY ( VIRT_TO_PHYS (pt), VMM_FLAGS_USERMODE);
}
pt [pte] = MAKE_TABLE_ENTRY (paddr & ~ ( 0x fff ), flags);
// Invalidate TLB entry if currently active
uint64_t cr3val;
read_cr ( "cr3" , & cr3val);
if (cr3val == ( uint64_t )( VIRT_TO_PHYS ( addrspace -> PML4 )))
asm volatile ( " invlpg (%0) " :: " r " (vaddr));
}
The mapping process:
Extracts page table indices from virtual address
Walks through PML4 → PDPT → PD → PT hierarchy
Allocates missing page table levels on demand
Sets the final page table entry with physical address and flags
Invalidates TLB if mapping affects the current address space
Page Unmapping
The unmap_page() function at kernel/mm/vmm.c:54 removes mappings and frees unused page tables:
static void unmap_page ( addrspace_t * addrspace , uint64_t vaddr )
{
uint16_t pte = (vaddr >> 12 ) & 0x 1ff ;
uint16_t pde = (vaddr >> 21 ) & 0x 1ff ;
uint16_t pdpe = (vaddr >> 30 ) & 0x 1ff ;
uint16_t pml4e = (vaddr >> 39 ) & 0x 1ff ;
uint64_t * pml4 = addrspace -> PML4 ;
if ( ! ( pml4 [pml4e] & VMM_FLAG_PRESENT))
return ;
uint64_t * pdpt = ( uint64_t * ) PHYS_TO_VIRT ( pml4 [pml4e] & ~ ( 0x fff ));
if ( ! ( pdpt [pdpe] & VMM_FLAG_PRESENT))
return ;
uint64_t * pd = ( uint64_t * ) PHYS_TO_VIRT ( pdpt [pdpe] & ~ ( 0x fff ));
if ( ! ( pd [pde] & VMM_FLAG_PRESENT))
return ;
uint64_t * pt = ( uint64_t * ) PHYS_TO_VIRT ( pd [pde] & ~ ( 0x fff ));
if ( ! ( pt [pte] & VMM_FLAG_PRESENT))
return ;
pt [pte] = 0 ;
// Invalidate TLB
uint64_t cr3val;
read_cr ( "cr3" , & cr3val);
if (cr3val == ( uint64_t )( VIRT_TO_PHYS ( addrspace -> PML4 )))
asm volatile ( " invlpg (%0) " :: " r " (vaddr));
// Free empty page tables
for ( int i = 0 ; i < 512 ; i ++ )
if ( pt [i] != 0 )
goto done;
pd [pde] = 0 ;
pmm_free ( VIRT_TO_PHYS (pt), 1 );
for ( int i = 0 ; i < 512 ; i ++ )
if ( pd [i] != 0 )
goto done;
pdpt [pdpe] = 0 ;
pmm_free ( VIRT_TO_PHYS (pd), 1 );
for ( int i = 0 ; i < 512 ; i ++ )
if ( pdpt [i] != 0 )
goto done;
pml4 [pml4e] = 0 ;
pmm_free ( VIRT_TO_PHYS (pdpt), 1 );
done:
return ;
}
The unmapping code automatically reclaims page table pages that become empty, preventing memory leaks.
Page Flags
Page table entries can have various flags defined in kernel/mm/vmm.h:8:
#define VMM_FLAG_PRESENT 1 << 0 // Page is present in memory
#define VMM_FLAG_READWRITE 1 << 1 // Page is writable
#define VMM_FLAG_USER 1 << 2 // Usermode can access
#define VMM_FLAG_WRITETHROUGH 1 << 3 // Write-through caching
#define VMM_FLAG_CACHE_DISABLE 1 << 4 // Disable caching
#define VMM_FLAG_WRITECOMBINE 1 << 7 // Write-combining (for framebuffer)
#define VMM_FLAGS_DEFAULT (VMM_FLAG_PRESENT | VMM_FLAG_READWRITE)
#define VMM_FLAGS_MMIO (VMM_FLAGS_DEFAULT | VMM_FLAG_CACHE_DISABLE)
#define VMM_FLAGS_USERMODE (VMM_FLAGS_DEFAULT | VMM_FLAG_USER)
Public API
The VMM exposes two main functions:
void vmm_map ( addrspace_t * addrspace , uint64_t vaddr , uint64_t paddr , uint64_t np , uint64_t flags )
{
addrspace_t * as = addrspace ? addrspace : & kaddrspace;
for ( size_t i = 0 ; i < np * PAGE_SIZE; i += PAGE_SIZE)
map_page (as, vaddr + i, paddr + i, flags);
}
void vmm_unmap ( addrspace_t * addrspace , uint64_t vaddr , uint64_t np )
{
addrspace_t * as = addrspace ? addrspace : & kaddrspace;
for ( size_t i = 0 ; i < np * PAGE_SIZE; i += PAGE_SIZE)
unmap_page (as, vaddr + i);
}
Passing NULL for addrspace operates on the kernel address space.
Memory Statistics
The PMM tracks memory usage statistics that can be retrieved with pmm_getstats() or displayed with pmm_dumpstats() at kernel/mm/pmm.c:162:
void pmm_dumpstats () {
uint64_t t = memstats . total_mem , f = memstats . free_mem ,
u = t - f, h = memstats . phys_limit ;
klog_info ( " \n " );
klog_printf ( " \t \t Total: %d KiB ( %d MiB) \n " , t / 1024 , t / ( 1024 * 1024 ));
klog_printf ( " \t \t Free: %d KiB ( %d MiB) \n " , f / 1024 , f / ( 1024 * 1024 ));
klog_printf ( " \t \t Used: %d KiB ( %d MiB) \n " , u / 1024 , u / ( 1024 * 1024 ));
klog_printf ( " \t \t The highest available physical address is %x . \n\n " , h);
}