feat(arm64): higher-half kernel, early MMU in boot, and VMM
This commit is contained in:
+18
-23
@@ -1,19 +1,21 @@
|
||||
#include <VM/PMM.h>
|
||||
#include <Lib/String.h>
|
||||
#include "../Common/bootinfo.h"
|
||||
|
||||
extern char _kernelStart[];
|
||||
extern char _kernelEnd[];
|
||||
|
||||
static Address sPMMRamBase = 0;
|
||||
static MemoryPointer sPMMBitmap;
|
||||
static Size sPMMBitmapSize;
|
||||
static Size sPMMTotalPages;
|
||||
|
||||
static inline Size BitmapGetByteIndex(Address address) {
|
||||
return (address / kVMPageSize) / kVMBlocksPerByte;
|
||||
return ((address - sPMMRamBase) / kVMPageSize) / kVMBlocksPerByte;
|
||||
}
|
||||
|
||||
static inline UInt8 BitmapGetBitOffset(Address address) {
|
||||
return (UInt8)((address / kVMPageSize) % kVMBlocksPerByte);
|
||||
}
|
||||
|
||||
static inline Boolean BitmapTest(const MemoryPointer bitmap, Address address) {
|
||||
return (bitmap[BitmapGetByteIndex(address)] & (1U << BitmapGetBitOffset(address))) != 0;
|
||||
return (UInt8)(((address - sPMMRamBase) / kVMPageSize) % kVMBlocksPerByte);
|
||||
}
|
||||
|
||||
static inline void BitmapSet(MemoryPointer bitmap, Address address) {
|
||||
@@ -24,27 +26,18 @@ static inline void BitmapUnset(MemoryPointer bitmap, Address address) {
|
||||
bitmap[BitmapGetByteIndex(address)] &= ~(1U << BitmapGetBitOffset(address));
|
||||
}
|
||||
|
||||
static MemoryPointer sPMMBitmap;
|
||||
static Size sPMMBitmapSize;
|
||||
static Size sPMMTotalPages;
|
||||
void PMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
|
||||
sPMMRamBase = bootMap->totalRAM.base;
|
||||
sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
|
||||
sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
|
||||
sPMMBitmap = (MemoryPointer)_kernelEnd;
|
||||
MemorySet(sPMMBitmap, 0, sPMMBitmapSize);
|
||||
|
||||
void PMMInitialize(VMBootMemoryMap* bootMap) {
|
||||
UInt32 vIndex = bootMap->reservedCount;
|
||||
bootMap->reserved[vIndex].base = 0x0;
|
||||
bootMap->reserved[vIndex].size = bootMap->totalRAM.base;
|
||||
bootMap->reservedCount++;
|
||||
|
||||
UInt32 kIndex = bootMap->reservedCount;
|
||||
bootMap->reserved[kIndex].base = (Address)_kernelStart;
|
||||
bootMap->reserved[kIndex].size = (Address)_kernelEnd - (Address)_kernelStart;
|
||||
bootMap->reservedCount++;
|
||||
|
||||
sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
|
||||
sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
|
||||
sPMMBitmap = (MemoryPointer)_kernelEnd;
|
||||
|
||||
StringSet(sPMMBitmap, 0, sPMMBitmapSize);
|
||||
|
||||
UInt32 bIndex = bootMap->reservedCount;
|
||||
bootMap->reserved[bIndex].base = (Address)sPMMBitmap;
|
||||
bootMap->reserved[bIndex].size = sPMMBitmapSize;
|
||||
@@ -58,7 +51,9 @@ void PMMInitialize(VMBootMemoryMap* bootMap) {
|
||||
|
||||
for (Size p = 0; p < pagesToReserve; p++) {
|
||||
Address pageAdress = regionBase + (p * kVMPageSize);
|
||||
BitmapSet(sPMMBitmap, pageAdress);
|
||||
if (pageAdress >= sPMMRamBase && pageAdress < (sPMMRamBase + bootMap->totalRAM.size)) {
|
||||
BitmapSet(sPMMBitmap, pageAdress);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -68,7 +63,7 @@ Pointer PMMAllocatePage() {
|
||||
if (sPMMBitmap[i] == 0xFF) continue;
|
||||
for (Size bit = 0; bit < kVMBlocksPerByte; bit++) {
|
||||
if ((sPMMBitmap[i] & (1 << bit)) == 0) {
|
||||
Address address = (i * kVMBlocksPerByte + bit) * kVMPageSize;
|
||||
Address address = sPMMRamBase + (i * kVMBlocksPerByte + bit) * kVMPageSize;
|
||||
BitmapSet(sPMMBitmap, address);
|
||||
return (Pointer)address;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,183 @@
|
||||
#include <VM/VMM.h>
|
||||
#include <VM/PMM.h>
|
||||
#include <Lib/String.h>
|
||||
#include <Arch/CPU.h>
|
||||
#include <OS/Panic.h>
|
||||
#include "../Common/bootinfo.h"
|
||||
|
||||
static const UInt64 kPTEAddressMask = 0x0000FFFFFFFFF000ULL;
|
||||
static inline Address GetPTEAddress(UInt64 entry) { return entry & kPTEAddressMask; }
|
||||
static inline UInt64 GetPTEFlags(UInt64 entry) { return entry & ~kPTEAddressMask; }
|
||||
static inline UInt16 GetL0Index(Address virt) { return (virt >> 39) & 0x1FF; }
|
||||
static inline UInt16 GetL1Index(Address virt) { return (virt >> 30) & 0x1FF; }
|
||||
static inline UInt16 GetL2Index(Address virt) { return (virt >> 21) & 0x1FF; }
|
||||
static inline UInt16 GetL3Index(Address virt) { return (virt >> 12) & 0x1FF; }
|
||||
|
||||
static Boolean isInitialized = false;
|
||||
|
||||
Address* gVMKernelL0Table = nullptr;
|
||||
Address gVMKernelL0Physical = 0;
|
||||
|
||||
static Address* GetVirtualTable(Address phys) {
|
||||
if (isInitialized) return (Address*)VMPhysToHHDM(phys);
|
||||
return (Address*)phys;
|
||||
}
|
||||
|
||||
static inline Address* GetOrAllocateTable(Address* parentTable, Size index, UInt64 flags, UInt64 directoryFlags) {
|
||||
if (!(parentTable[index] & kPTEValid)) {
|
||||
Pointer newTable = PMMAllocatePage();
|
||||
if (!newTable) return nullptr;
|
||||
|
||||
Address* newTableVirt = GetVirtualTable((Address)newTable);
|
||||
MemorySet(newTableVirt, 0, kVMPageSize);
|
||||
|
||||
parentTable[index] = (Address)newTable | directoryFlags;
|
||||
return newTableVirt;
|
||||
}
|
||||
|
||||
parentTable[index] |= (flags & kPTEUser);
|
||||
|
||||
Address physAddress = GetPTEAddress(parentTable[index]);
|
||||
return GetVirtualTable(physAddress);
|
||||
}
|
||||
|
||||
static Address GetMappedPhysicalAddress(Address* l0Table, Address virt) {
|
||||
// A little bit of Monica in my life
|
||||
UInt16 l0Index = GetL0Index(virt);
|
||||
// A little bit of Erica by my side
|
||||
UInt16 l1Index = GetL1Index(virt);
|
||||
// A little bit of Rita's all I need
|
||||
UInt16 l2Index = GetL2Index(virt);
|
||||
// A little bit of Tina's what I see
|
||||
UInt16 l3Index = GetL3Index(virt);
|
||||
|
||||
// A little bit of Sandra in the sun
|
||||
Address* l0Virt = l0Table;
|
||||
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
|
||||
if (!(l0Virt[l0Index] & kPTEValid)) return 0;
|
||||
|
||||
// A little bit of Mary all night long...
|
||||
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
|
||||
if (!(l1Virt[l1Index] & kPTEValid)) return 0;
|
||||
|
||||
// A little bit of Jessica, here I am!
|
||||
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
|
||||
if (!(l2Virt[l2Index] & kPTEValid)) return 0;
|
||||
|
||||
// A little bit of you makes me your man
|
||||
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
|
||||
if (!(l3Virt[l3Index] & kPTEValid)) return 0;
|
||||
|
||||
return GetPTEAddress(l3Virt[l3Index]);
|
||||
}
|
||||
|
||||
Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags) {
|
||||
UInt16 l0Index = GetL0Index(virt);
|
||||
UInt16 l1Index = GetL1Index(virt);
|
||||
UInt16 l2Index = GetL2Index(virt);
|
||||
UInt16 l3Index = GetL3Index(virt);
|
||||
|
||||
Address* l0Virt = l0Table;
|
||||
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
|
||||
|
||||
UInt64 directoryFlags = kPTEValid | kPTETable | (flags & kPTEUser);
|
||||
|
||||
Address* l1Virt = GetOrAllocateTable(l0Virt, l0Index, flags, directoryFlags);
|
||||
if (!l1Virt) return nullptr;
|
||||
|
||||
Address* l2Virt = GetOrAllocateTable(l1Virt, l1Index, flags, directoryFlags);
|
||||
if (!l2Virt) return nullptr;
|
||||
|
||||
Address* l3Virt = GetOrAllocateTable(l2Virt, l2Index, flags, directoryFlags);
|
||||
if (!l3Virt) return nullptr;
|
||||
|
||||
l3Virt[l3Index] = phys | flags | kPTEPage | kPTEAccessFlag | kPTEValid;
|
||||
CPUInvalidateTLB(virt);
|
||||
return l3Virt;
|
||||
}
|
||||
|
||||
void VMMUnmapPage(Address* l0Table, Address virt) {
|
||||
UInt16 l0Index = GetL0Index(virt);
|
||||
UInt16 l1Index = GetL1Index(virt);
|
||||
UInt16 l2Index = GetL2Index(virt);
|
||||
UInt16 l3Index = GetL3Index(virt);
|
||||
|
||||
Address* l0Virt = l0Table;
|
||||
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
|
||||
if (!(l0Virt[l0Index] & kPTEValid)) return;
|
||||
|
||||
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
|
||||
if (!(l1Virt[l1Index] & kPTEValid)) return;
|
||||
|
||||
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
|
||||
if (!(l2Virt[l2Index] & kPTEValid)) return;
|
||||
|
||||
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
|
||||
l3Virt[l3Index] = 0;
|
||||
|
||||
CPUInvalidateTLB(virt);
|
||||
}
|
||||
|
||||
Pointer VMMGetOrAllocatePage(Address* l0Table, Address virt, UInt64 flags) {
|
||||
Address existingPhys = GetMappedPhysicalAddress(l0Table, virt);
|
||||
if (existingPhys) return (Pointer)GetVirtualTable(existingPhys);
|
||||
|
||||
Pointer newPhys = PMMAllocatePage();
|
||||
if (!newPhys) return nullptr; // OOM
|
||||
|
||||
Address* mappedVirt = VMMMapPage(l0Table, (Address) newPhys, virt, flags);
|
||||
if (!mappedVirt) return nullptr;
|
||||
|
||||
Pointer finalVirtAddress = (Pointer)GetVirtualTable((Address)newPhys);
|
||||
MemorySet(finalVirtAddress, 0, kVMPageSize);
|
||||
|
||||
return finalVirtAddress;
|
||||
}
|
||||
|
||||
void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
|
||||
gVMKernelL0Physical = (Address)PMMAllocatePage();
|
||||
gVMKernelL0Table = (Address*)gVMKernelL0Physical;
|
||||
if (!gVMKernelL0Physical) OSPanic("Failed to allocate kernel L0 table");
|
||||
|
||||
Size totalRAM = bootMap->totalRAM.size;
|
||||
Size ramEnd = bootMap->totalRAM.base + totalRAM;
|
||||
for (Address phys = bootMap->totalRAM.base; phys < ramEnd; phys += kVMPageSize) {
|
||||
VMMMapPage(
|
||||
gVMKernelL0Table,
|
||||
phys, VMPhysToHHDM(phys),
|
||||
kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX
|
||||
);
|
||||
}
|
||||
|
||||
Address kernelPhysStart = (Address)info->kernelInfo.kernelAddress;
|
||||
Size kernelSize = info->kernelInfo.kernelSize;
|
||||
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
|
||||
Address phys = kernelPhysStart + offset;
|
||||
Address virt = kVMKernelVMA + offset;
|
||||
VMMMapPage(gVMKernelL0Table, phys, virt, kPTENormalMem | kPTEAccessRW);
|
||||
}
|
||||
|
||||
Address fbPhys = (Address)info->framebuffer.base;
|
||||
Size fbSize = info->framebuffer.baseSize;
|
||||
for (Address offset = 0; offset < fbSize; offset += kVMPageSize) {
|
||||
VMMMapPage(
|
||||
gVMKernelL0Table, fbPhys + offset,
|
||||
kVMFbVirtBase + offset,
|
||||
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
|
||||
);
|
||||
}
|
||||
|
||||
Address UARTPhys = bootMap->UART.base;
|
||||
VMMMapPage(
|
||||
gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys),
|
||||
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
|
||||
);
|
||||
|
||||
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
|
||||
VMMMapPage(gVMKernelL0Table, kernelPhysStart + offset, kernelPhysStart + offset, kPTENormalMem | kPTEAccessRW);
|
||||
}
|
||||
|
||||
info->framebuffer.base = (BIUInt32*)kVMFbVirtBase;;
|
||||
CPUEnableMMU(gVMKernelL0Physical);
|
||||
isInitialized = true;
|
||||
}
|
||||
Reference in New Issue
Block a user