Skip to main content
Aeolos implements a two-tier memory management system: the Physical Memory Manager (PMM) handles allocation of physical memory frames, while the Virtual Memory Manager (VMM) manages virtual address spaces and paging structures.

Physical Memory Manager (PMM)

The PMM tracks available physical memory using a bitmap allocator. Each bit in the bitmap represents one physical page (4096 bytes).

Initialization

The PMM is initialized early in the boot process with the memory map from the bootloader:
kernel/mm/pmm.c
void pmm_init(stv2_struct_tag_mmap* map)
{
    mmap = map;

    // calculate memory statistics
    klog_info("bootloader provided memory map: \n");
    for (size_t i = 0; i < map->entries; i++) {
        struct stivale2_mmap_entry entry = map->memmap[i];

        if (entry.base + entry.length <= 0x100000)
            continue;

        uint64_t newphyslimit = entry.base + entry.length;
        if (newphyslimit > memstats.phys_limit)
            memstats.phys_limit = newphyslimit;

        klog_printf("  base: %x, length: %x (%d KiB), type: %d\n", 
                    entry.base, entry.length, entry.length/1024, entry.type);
        if (entry.type == STIVALE2_MMAP_USABLE || 
            entry.type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE ||
            entry.type == STIVALE2_MMAP_ACPI_RECLAIMABLE || 
            entry.type == STIVALE2_MMAP_KERNEL_AND_MODULES)
            memstats.total_mem += entry.length;
    }

Bitmap Allocation

The PMM stores its bitmap in the first available usable memory region:
kernel/mm/pmm.c
// look for a good place to keep our bitmap
uint64_t bm_size = memstats.phys_limit / (PAGE_SIZE * 8);
for (size_t i = 0; i < map->entries; i++) {
    struct stivale2_mmap_entry entry = map->memmap[i];

    if (entry.base + entry.length <= 0x100000)
        continue;

    if (entry.length >= bm_size && entry.type == STIVALE2_MMAP_USABLE) {
        bitmap = (uint8_t*)PHYS_TO_VIRT(entry.base);
        break;
    }
}
// zero it out
memset(bitmap, 0, bm_size);
Each byte in the bitmap represents 8 pages. For a system with 4GB of RAM, the bitmap itself occupies approximately 128KB.

Core PMM Functions

uint64_t pmm_get(uint64_t numpages)
{
    static uint64_t lastusedpage;

    for (uint64_t i = lastusedpage; i < memstats.phys_limit; i += PAGE_SIZE) {
        if (pmm_alloc(i, numpages)) {
            lastusedpage = i + (numpages*PAGE_SIZE);
            return i;
        }
    }

    for (uint64_t i = 0; i < lastusedpage; i += PAGE_SIZE) {
        if (pmm_alloc(i, numpages)) {
            lastusedpage = i + (numpages*PAGE_SIZE);
            return i;
        }
    }

    kernel_panic("Out of Physical Memory");
    return 0;
}

Memory Statistics

The PMM tracks memory usage through the mem_info structure:
kernel/mm/pmm.h
typedef struct {
    uint64_t phys_limit;  // Highest physical address
    uint64_t total_mem;   // Total usable memory
    uint64_t free_mem;    // Currently free memory
} mem_info;
Statistics can be dumped at runtime:
kernel/mm/pmm.c
void pmm_dumpstats() {
    uint64_t t = memstats.total_mem, f = memstats.free_mem,
             u = t - f, h = memstats.phys_limit;

    klog_info("\n");
    klog_printf(" \t \tTotal: %d KiB (%d MiB)\n", t / 1024, t / (1024 * 1024));
    klog_printf(" \t \tFree:  %d KiB (%d MiB)\n", f / 1024, f / (1024 * 1024));
    klog_printf(" \t \tUsed:  %d KiB (%d MiB)\n", u / 1024, u / (1024 * 1024));
    klog_printf(" \t \tThe highest available physical address is %x.\n\n", h);
}

Reclaiming Bootloader Memory

After initialization, the kernel reclaims memory used by the bootloader:
kernel/mm/pmm.c
void pmm_reclaim_bootloader_mem()
{
    for (size_t i = 0; i < mmap->entries; i++) {
        struct stivale2_mmap_entry entry = mmap->memmap[i];

        if (entry.type == STIVALE2_MMAP_BOOTLOADER_RECLAIMABLE)
            pmm_free(entry.base, NUM_PAGES(entry.length));
    }
}

Virtual Memory Manager (VMM)

The VMM implements x86_64 4-level paging with support for multiple address spaces.

Address Space Structure

kernel/mm/vmm.h
typedef struct {
    uint64_t* PML4;  // Page Map Level 4 (root page table)
    lock_t lock;     // Synchronization lock
} addrspace_t;

Memory Layout

Aeolos uses these virtual memory regions:
kernel/mm/vmm.h
#define MEM_VIRT_OFFSET 0xffff800000000000     // Physical memory base
#define HIGHERHALF_OFFSET 0xffffffff80000000   // Kernel code/data base

#define VIRT_TO_PHYS(a) (((uint64_t)(a)) - MEM_VIRT_OFFSET)
#define PHYS_TO_VIRT(a) (((uint64_t)(a)) + MEM_VIRT_OFFSET)
Virtual Address RangePurpose
0x0000000000000000 - 0x00007FFFFFFFFFFFUser space
0xFFFF800000000000 - 0xFFFF8FFFFFFFFFFAll physical memory (direct map)
0xFFFFFFFF80000000 - 0xFFFFFFFFFFFFFFFFKernel code and data (2GB)

VMM Initialization

The VMM creates new paging structures to replace the bootloader’s mappings:
kernel/mm/vmm.c
void vmm_init()
{
    // create the kernel address space
    kaddrspace.PML4 = kmalloc(PAGE_SIZE);
    memset(kaddrspace.PML4, 0, PAGE_SIZE);

    vmm_map(&kaddrspace, 0xffffffff80000000, 0, NUM_PAGES(0x80000000), VMM_FLAGS_DEFAULT);
    klog_info("mapped lower 2GB to 0xFFFFFFFF80000000\n");

    vmm_map(&kaddrspace, 0xffff800000000000, 0, NUM_PAGES(pmm_getstats()->phys_limit), VMM_FLAGS_DEFAULT);
    klog_info("mapped all memory to 0xFFFF800000000000\n");

    write_cr("cr3", VIRT_TO_PHYS(kaddrspace.PML4));
    klog_ok("done\n");
}
The VMM maps all physical memory to the virtual address space. In the future, this should skip large reserved zones to reduce page table overhead.

Page Mapping

The core mapping function walks the 4-level page table hierarchy:
kernel/mm/vmm.c
static void map_page(addrspace_t* addrspace, uint64_t vaddr, uint64_t paddr, uint64_t flags)
{
    uint16_t pte = (vaddr >> 12) & 0x1ff;
    uint16_t pde = (vaddr >> 21) & 0x1ff;
    uint16_t pdpe = (vaddr >> 30) & 0x1ff;
    uint16_t pml4e = (vaddr >> 39) & 0x1ff;

    uint64_t* pml4 = addrspace->PML4;
    uint64_t* pdpt;
    uint64_t* pd;
    uint64_t* pt;

    pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4e] & ~(0xfff));
    if (!(pml4[pml4e] & VMM_FLAG_PRESENT)) {
        pdpt = (uint64_t*)PHYS_TO_VIRT(pmm_get(1));
        memset(pdpt, 0, PAGE_SIZE);
        pml4[pml4e] = MAKE_TABLE_ENTRY(VIRT_TO_PHYS(pdpt), VMM_FLAGS_USERMODE);
    }

    pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpe] & ~(0xfff));
    if (!(pdpt[pdpe] & VMM_FLAG_PRESENT)) {
        pd = (uint64_t*)PHYS_TO_VIRT(pmm_get(1));
        memset(pd, 0, PAGE_SIZE);
        pdpt[pdpe] = MAKE_TABLE_ENTRY(VIRT_TO_PHYS(pd), VMM_FLAGS_USERMODE);
    }

    pt = (uint64_t*)PHYS_TO_VIRT(pd[pde] & ~(0xfff));
    if (!(pd[pde] & VMM_FLAG_PRESENT)) {
        pt = (uint64_t*)PHYS_TO_VIRT(pmm_get(1));
        memset(pt, 0, PAGE_SIZE);
        pd[pde] = MAKE_TABLE_ENTRY(VIRT_TO_PHYS(pt), VMM_FLAGS_USERMODE);
    }

    pt[pte] = MAKE_TABLE_ENTRY(paddr & ~(0xfff), flags);

    // TODO: shootdown?
    uint64_t cr3val;
    read_cr("cr3", &cr3val);
    if (cr3val == (uint64_t)(VIRT_TO_PHYS(addrspace->PML4)))
        asm volatile("invlpg (%0)" ::"r"(vaddr));
}

Page Table Hierarchy

The x86_64 paging structure has four levels:
1

PML4 (Page Map Level 4)

Root table with 512 entries. Bits 39-47 of the virtual address index into PML4.
2

PDPT (Page Directory Pointer Table)

512 entries per PML4 entry. Bits 30-38 index into PDPT.
3

PD (Page Directory)

512 entries per PDPT entry. Bits 21-29 index into PD.
4

PT (Page Table)

512 entries per PD entry. Bits 12-20 index into PT. Each entry points to a 4KB physical page.

Page Unmapping

Unmapping includes automatic page table cleanup:
kernel/mm/vmm.c
static void unmap_page(addrspace_t* addrspace, uint64_t vaddr)
{
    uint16_t pte = (vaddr >> 12) & 0x1ff;
    uint16_t pde = (vaddr >> 21) & 0x1ff;
    uint16_t pdpe = (vaddr >> 30) & 0x1ff;
    uint16_t pml4e = (vaddr >> 39) & 0x1ff;

    uint64_t* pml4 = addrspace->PML4;
    if (!(pml4[pml4e] & VMM_FLAG_PRESENT))
        return;

    uint64_t* pdpt = (uint64_t*)PHYS_TO_VIRT(pml4[pml4e] & ~(0xfff));
    if (!(pdpt[pdpe] & VMM_FLAG_PRESENT))
        return;

    uint64_t* pd = (uint64_t*)PHYS_TO_VIRT(pdpt[pdpe] & ~(0xfff));
    if (!(pd[pde] & VMM_FLAG_PRESENT))
        return;

    uint64_t* pt = (uint64_t*)PHYS_TO_VIRT(pd[pde] & ~(0xfff));
    if (!(pt[pte] & VMM_FLAG_PRESENT))
        return;

    pt[pte] = 0;

    // TODO: shootdown
    uint64_t cr3val;
    read_cr("cr3", &cr3val);
    if (cr3val == (uint64_t)(VIRT_TO_PHYS(addrspace->PML4)))
        asm volatile("invlpg (%0)" ::"r"(vaddr));

    for (int i = 0; i < 512; i++)
        if (pt[i] != 0)
            goto done;
    pd[pde] = 0;
    pmm_free(VIRT_TO_PHYS(pt), 1);

    for (int i = 0; i < 512; i++)
        if (pd[i] != 0)
            goto done;
    pdpt[pdpe] = 0;
    pmm_free(VIRT_TO_PHYS(pd), 1);

    for (int i = 0; i < 512; i++)
        if (pdpt[i] != 0)
            goto done;
    pml4[pml4e] = 0;
    pmm_free(VIRT_TO_PHYS(pdpt), 1);

done:
    return;
}
When unmapping, the VMM checks if page tables become empty and frees them automatically to prevent memory leaks.

Page Flags

The VMM supports various page attributes:
kernel/mm/vmm.h
#define VMM_FLAG_PRESENT       1 << 0  // Page is present in memory
#define VMM_FLAG_READWRITE     1 << 1  // Page is writable
#define VMM_FLAG_USER          1 << 2  // Page is accessible from user mode
#define VMM_FLAG_WRITETHROUGH  1 << 3  // Write-through caching
#define VMM_FLAG_CACHE_DISABLE 1 << 4  // Disable caching
#define VMM_FLAG_WRITECOMBINE  1 << 7  // Write combining (for framebuffers)

#define VMM_FLAGS_DEFAULT  (VMM_FLAG_PRESENT | VMM_FLAG_READWRITE)
#define VMM_FLAGS_MMIO     (VMM_FLAGS_DEFAULT | VMM_FLAG_CACHE_DISABLE)
#define VMM_FLAGS_USERMODE (VMM_FLAGS_DEFAULT | VMM_FLAG_USER)

Public API

void vmm_map(addrspace_t* addrspace, uint64_t vaddr, uint64_t paddr, uint64_t np, uint64_t flags)
{
    addrspace_t* as = addrspace ? addrspace : &kaddrspace;
    for (size_t i = 0; i < np * PAGE_SIZE; i += PAGE_SIZE)
        map_page(as, vaddr + i, paddr + i, flags);
}

Helper Macros

kernel/mm/pmm.h
#define PAGE_SIZE 4096
#define NUM_PAGES(num) (((num) + PAGE_SIZE - 1) / PAGE_SIZE)
#define PAGE_ALIGN_UP(num) (NUM_PAGES(num) * PAGE_SIZE)

TLB Management

Both mapping and unmapping operations invalidate TLB entries when modifying the current address space:
uint64_t cr3val;
read_cr("cr3", &cr3val);
if (cr3val == (uint64_t)(VIRT_TO_PHYS(addrspace->PML4)))
    asm volatile("invlpg (%0)" ::"r"(vaddr));
TODO: TLB shootdown is not yet implemented for SMP systems. Modifying page tables used by other CPUs requires inter-processor interrupts.

Memory Management Summary

  • Bitmap-based physical memory allocator
  • Tracks 4KB pages with one bit per page
  • First-fit allocation strategy with cursor optimization
  • Supports reclaimable memory regions
  • 4-level x86_64 paging structure
  • Higher-half kernel at 0xFFFFFFFF80000000
  • Direct map of all physical memory at 0xFFFF800000000000
  • Automatic page table cleanup on unmap
  • VMM depends on PMM for page table allocation
  • PMM initialized first during kernel startup
  • Both use locks for SMP safety (TODO: verify implementation)

Next Steps

Boot Process

Learn how memory managers are initialized

Process Management

See how tasks use virtual memory

Build docs developers (and LLMs) love