fix: use rounded bitmap size in VMM kernel mapping calculation

This commit is contained in:
karina
2026-05-02 23:17:09 +04:00
parent 5673c44a99
commit 7ff9f4ad4c
+27 -26
View File
@@ -8,9 +8,9 @@
static const UInt64 kPTEAddressMask = 0x0000FFFFFFFFF000ULL;
static inline Address GetPTEAddress(UInt64 entry) { return entry & kPTEAddressMask; }
static inline UInt16 GetL0Index(Address virt) { return (virt >> 39) & 0x1FF; }
static inline UInt16 GetL1Index(Address virt) { return (virt >> 30) & 0x1FF; }
static inline UInt16 GetL2Index(Address virt) { return (virt >> 21) & 0x1FF; }
static inline UInt16 GetL0Index(Address virt) { return (virt >> 39) & 0x1FF; }
static inline UInt16 GetL1Index(Address virt) { return (virt >> 30) & 0x1FF; }
static inline UInt16 GetL2Index(Address virt) { return (virt >> 21) & 0x1FF; }
static inline UInt16 GetL3Index(Address virt) { return (virt >> 12) & 0x1FF; }
static Boolean isInitialized = false;
@@ -33,13 +33,13 @@ static inline Address* GetOrAllocateTable(Address* parentTable, Size index, UInt
Address* newTableVirt = GetVirtualTable((Address)newTable);
MemorySet(newTableVirt, 0, kVMPageSize);
parentTable[index] = (Address)newTable | directoryFlags;
return newTableVirt;
}
parentTable[index] |= (flags & kPTEUser);
Address physAddress = GetPTEAddress(parentTable[index]);
return GetVirtualTable(physAddress);
}
@@ -58,11 +58,11 @@ static Address GetMappedPhysicalAddress(Address* l0Table, Address virt) {
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
if (!(l0Virt[l0Index] & kPTEValid)) return 0;
// A little bit of Mary all night long...
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
if (!(l1Virt[l1Index] & kPTEValid)) return 0;
// A little bit of Jessica, here I am!
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
if (!(l2Virt[l2Index] & kPTEValid)) return 0;
@@ -70,7 +70,7 @@ static Address GetMappedPhysicalAddress(Address* l0Table, Address virt) {
// A little bit of you makes me your man
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
if (!(l3Virt[l3Index] & kPTEValid)) return 0;
return GetPTEAddress(l3Virt[l3Index]);
}
@@ -84,7 +84,7 @@ Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags)
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
UInt64 directoryFlags = kPTEValid | kPTETable | (flags & kPTEUser);
Address* l1Virt = GetOrAllocateTable(l0Virt, l0Index, flags, directoryFlags);
if (!l1Virt) return nullptr;
@@ -108,7 +108,7 @@ void VMMUnmapPage(Address* l0Table, Address virt) {
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
if (!(l0Virt[l0Index] & kPTEValid)) return;
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
if (!(l1Virt[l1Index] & kPTEValid)) return;
@@ -149,15 +149,16 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
for (Address phys = bootMap->totalRAM.base; phys < ramEnd; phys += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table,
phys, VMPhysToHHDM(phys),
phys, VMPhysToHHDM(phys),
kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX
);
}
OSLog("RAM mapped\n");
Size pmmBitmapSize = (bootMap->totalRAM.size / kVMPageSize) / 8;
Size totalPages = bootMap->totalRAM.size / kVMPageSize;
Size pmmBitmapSize = (totalPages + kVMBlocksPerByte - 1) / kVMBlocksPerByte;
Size kernelSize = ((Address)_kernelEnd - (Address)_kernelStart) + pmmBitmapSize;
kernelSize = (kernelSize + kVMPageSize - 1) & ~(kVMPageSize - 1);
kernelSize = (kernelSize + kVMPageSize - 1) & ~(kVMPageSize - 1);
Address kernelPhysStart = kKernelPhysBase;
@@ -177,8 +178,8 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
Size fbSize = info->framebuffer.baseSize;
for (Address offset = 0; offset < fbSize; offset += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table, fbPhys + offset,
kVMFbVirtBase + offset,
gVMKernelL0Table, fbPhys + offset,
kVMFbVirtBase + offset,
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
}
@@ -188,26 +189,26 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
if (!UARTPhys) UARTPhys = 0x09000000;
VMMMapPage(
gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys),
gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys),
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
VMMMapPage(
gVMKernelL0Table, UARTPhys, UARTPhys,
gVMKernelL0Table, UARTPhys, UARTPhys,
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
OSLog("UART mapped\n");
Address gicdPhys = bootMap->GIC.GICD.base;
Size gicdSize = bootMap->GIC.GICD.size;
if (!gicdPhys) {
if (!gicdPhys) {
gicdPhys = 0x08000000; // QEMU fallback
gicdSize = 0x10000;
}
for (Address offset = 0; offset < gicdSize; offset += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table, gicdPhys + offset,
VMPhysToHHDM(gicdPhys + offset),
gVMKernelL0Table, gicdPhys + offset,
VMPhysToHHDM(gicdPhys + offset),
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
}
@@ -215,15 +216,15 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
Address giccPhys = bootMap->GIC.GICC.base;
Size giccSize = bootMap->GIC.GICC.size;
if (!giccPhys) {
if (!giccPhys) {
giccPhys = 0x08001000; // QEMU fallback
giccSize = 0x10000;
}
for (Address offset = 0; offset < giccSize; offset += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table, giccPhys + offset,
VMPhysToHHDM(giccPhys + offset),
gVMKernelL0Table, giccPhys + offset,
VMPhysToHHDM(giccPhys + offset),
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
}
@@ -233,4 +234,4 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
OSLog("Enabling MMU...\n");
CPUEnableMMU(gVMKernelL0Physical);
isInitialized = true;
}
}