// SPDX-License-Identifier: GPL-3.0-or-later // Copyright (c) 2026 0xKSor #include #include #include #include #include #include #include "../Common/bootinfo.h" static const UInt64 kPTEAddressMask = 0x0000FFFFFFFFF000ULL; static inline Address GetPTEAddress(UInt64 entry) { return entry & kPTEAddressMask; } static inline UInt16 GetL0Index(Address virt) { return (virt >> 39) & 0x1FF; } static inline UInt16 GetL1Index(Address virt) { return (virt >> 30) & 0x1FF; } static inline UInt16 GetL2Index(Address virt) { return (virt >> 21) & 0x1FF; } static inline UInt16 GetL3Index(Address virt) { return (virt >> 12) & 0x1FF; } static Boolean isInitialized = false; Address* gVMKernelL0Table = nullptr; Address gVMKernelL0Physical = 0; extern char _kernelStart[]; extern char _kernelEnd[]; static Address* GetVirtualTable(Address phys) { if (isInitialized) return (Address*)VMPhysToHHDM(phys); return (Address*)phys; } static inline Address* GetOrAllocateTable(Address* parentTable, Size index, UInt64 flags, UInt64 directoryFlags) { if (!(parentTable[index] & kPTEValid)) { Pointer newTable = PMMAllocatePage(); if (!newTable) return nullptr; Address* newTableVirt = GetVirtualTable((Address)newTable); MemorySet(newTableVirt, 0, kVMPageSize); parentTable[index] = (Address)newTable | directoryFlags; return newTableVirt; } // if user access requested, clear APTable bit to allow EL0 table walk. // otherwise leave APTable as-is (kernel-only tables keep kPTETableNoEL0). if (flags & kPTEUser) { parentTable[index] &= ~kPTETableNoEL0; } Address physAddress = GetPTEAddress(parentTable[index]); return GetVirtualTable(physAddress); } static Address GetMappedPhysicalAddress(Address* l0Table, Address virt) { // A little bit of Monica in my life UInt16 l0Index = GetL0Index(virt); // A little bit of Erica by my side UInt16 l1Index = GetL1Index(virt); // A little bit of Rita's all I need UInt16 l2Index = GetL2Index(virt); // A little bit of Tina's what I see UInt16 l3Index = GetL3Index(virt); // A little bit of Sandra in the sun Address* l0Virt = l0Table; if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table); if (!(l0Virt[l0Index] & kPTEValid)) return 0; // A little bit of Mary all night long... Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index])); if (!(l1Virt[l1Index] & kPTEValid)) return 0; // A little bit of Jessica, here I am! Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index])); if (!(l2Virt[l2Index] & kPTEValid)) return 0; // A little bit of you makes me your man Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index])); if (!(l3Virt[l3Index] & kPTEValid)) return 0; return GetPTEAddress(l3Virt[l3Index]); } Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags) { UInt16 l0Index = GetL0Index(virt); UInt16 l1Index = GetL1Index(virt); UInt16 l2Index = GetL2Index(virt); UInt16 l3Index = GetL3Index(virt); Address* l0Virt = l0Table; if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table); // build directory flags for table descriptors // APTable=01 (kPTETableNoEL0) blocks EL0 entirely - used for kernel-only subtrees. // APTable=00 allows EL0 access by leaf page permissions - used for user mappings. UInt64 directoryFlags = kPTEValid | kPTETable; if (!(flags & kPTEUser)) { directoryFlags |= kPTETableNoEL0; } Address* l1Virt = GetOrAllocateTable(l0Virt, l0Index, flags, directoryFlags); if (!l1Virt) return nullptr; Address* l2Virt = GetOrAllocateTable(l1Virt, l1Index, flags, directoryFlags); if (!l2Virt) return nullptr; Address* l3Virt = GetOrAllocateTable(l2Virt, l2Index, flags, directoryFlags); if (!l3Virt) return nullptr; l3Virt[l3Index] = phys | flags | kPTEPage | kPTEAccessFlag | kPTEValid; if (isInitialized) CPUInvalidateTLB(virt); return l3Virt; } void VMMUnmapPage(Address* l0Table, Address virt) { UInt16 l0Index = GetL0Index(virt); UInt16 l1Index = GetL1Index(virt); UInt16 l2Index = GetL2Index(virt); UInt16 l3Index = GetL3Index(virt); Address* l0Virt = l0Table; if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table); if (!(l0Virt[l0Index] & kPTEValid)) return; Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index])); if (!(l1Virt[l1Index] & kPTEValid)) return; Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index])); if (!(l2Virt[l2Index] & kPTEValid)) return; Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index])); l3Virt[l3Index] = 0; CPUInvalidateTLB(virt); } Pointer VMMGetOrAllocatePage(Address* l0Table, Address virt, UInt64 flags) { Address existingPhys = GetMappedPhysicalAddress(l0Table, virt); if (existingPhys) return (Pointer)GetVirtualTable(existingPhys); Pointer newPhys = PMMAllocatePage(); if (!newPhys) return nullptr; // OOM Address* mappedVirt = VMMMapPage(l0Table, (Address) newPhys, virt, flags); if (!mappedVirt) return nullptr; Pointer finalVirtAddress = (Pointer)GetVirtualTable((Address)newPhys); MemorySet(finalVirtAddress, 0, kVMPageSize); return finalVirtAddress; } void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) { gVMKernelL0Physical = (Address)PMMAllocatePage(); gVMKernelL0Table = (Address*)gVMKernelL0Physical; if (!gVMKernelL0Physical) OSPanic("Failed to allocate kernel L0 table"); MemorySet(gVMKernelL0Table, 0, kVMPageSize); OSLog("Mapping RAM.. Can take a while\n"); Size totalRAM = bootMap->totalRAM.size; Size ramEnd = bootMap->totalRAM.base + totalRAM; for (Address phys = bootMap->totalRAM.base; phys < ramEnd; phys += kVMPageSize) { VMMMapPage( gVMKernelL0Table, phys, VMPhysToHHDM(phys), kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX ); } OSLog("RAM mapped\n"); Size totalPages = bootMap->totalRAM.size / kVMPageSize; Size pmmBitmapSize = (totalPages + kVMBlocksPerByte - 1) / kVMBlocksPerByte; Size kernelSize = ((Address)_kernelEnd - (Address)_kernelStart) + pmmBitmapSize; kernelSize = (kernelSize + kVMPageSize - 1) & ~(kVMPageSize - 1); Address kernelPhysStart = kKernelPhysBase; for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) { Address phys = kernelPhysStart + offset; Address virt = (Address)_kernelStart + offset; VMMMapPage(gVMKernelL0Table, phys, virt, kPTENormalMem | kPTEAccessRW); } OSLog("Kernel mapped to HHDM\n"); for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) { VMMMapPage(gVMKernelL0Table, kernelPhysStart + offset, kernelPhysStart + offset, kPTENormalMem | kPTEAccessRW); } OSLog("Kernel Identity mapped\n"); Address fbPhys = (Address)info->framebuffer.base; Size fbSize = info->framebuffer.baseSize; for (Address offset = 0; offset < fbSize; offset += kVMPageSize) { VMMMapPage( gVMKernelL0Table, fbPhys + offset, kVMFbVirtBase + offset, kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX ); } OSLog("Framebuffer mapped\n"); Address UARTPhys = bootMap->UART.base; if (!UARTPhys) UARTPhys = 0x09000000; VMMMapPage( gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys), kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX ); VMMMapPage( gVMKernelL0Table, UARTPhys, UARTPhys, kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX ); OSLog("UART mapped\n"); Address gicdPhys = bootMap->GIC.GICD.base; Size gicdSize = bootMap->GIC.GICD.size; if (!gicdPhys) { gicdPhys = 0x08000000; // QEMU fallback gicdSize = 0x10000; } for (Address offset = 0; offset < gicdSize; offset += kVMPageSize) { VMMMapPage( gVMKernelL0Table, gicdPhys + offset, VMPhysToHHDM(gicdPhys + offset), kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX ); } OSLog("GICD mapped\n"); Address giccPhys = bootMap->GIC.GICC.base; Size giccSize = bootMap->GIC.GICC.size; if (!giccPhys) { giccPhys = 0x08001000; // QEMU fallback giccSize = 0x10000; } for (Address offset = 0; offset < giccSize; offset += kVMPageSize) { VMMMapPage( gVMKernelL0Table, giccPhys + offset, VMPhysToHHDM(giccPhys + offset), kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX ); } OSLog("GICC mapped\n"); info->framebuffer.base = (BIUInt32*)kVMFbVirtBase; OSLog("Enabling MMU...\n"); CPUEnableMMU(gVMKernelL0Physical); isInitialized = true; }