fix(vmm): make MMU bring-up and kernel mappings reliable
This commit is contained in:
@@ -2,6 +2,7 @@
|
||||
#include <Arch/DTB.h>
|
||||
#include <VM/PMM.h>
|
||||
#include <VM/VMM.h>
|
||||
#include <VM/Heap.h>
|
||||
#include <OS/Log.h>
|
||||
#include <OS/Panic.h>
|
||||
|
||||
@@ -14,8 +15,9 @@ void KernelMain(Bootinfo* bootinfo) {
|
||||
VMBootMemoryMap bootMap = {0};
|
||||
bootMap.reservedCount = 0;
|
||||
DTBParse(bootinfo->dtb, &bootMap);
|
||||
PMMInitialize(&bootMap, bootinfo);
|
||||
PMMInitialize(&bootMap);
|
||||
VMMInitialize(&bootMap, bootinfo);
|
||||
HeapInitialize();
|
||||
|
||||
OSLog("Kernel initialized.\n");
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
#include <VM/Heap.h>
|
||||
#include <VM/PMM.h>
|
||||
#include <VM/VMM.h>
|
||||
#include <Lib/String.h>
|
||||
#include <OS/Panic.h>
|
||||
|
||||
static VMHeapBlockHeader* sVMHeapListHead = nullptr;
|
||||
|
||||
static void CombineForward(VMHeapBlockHeader* current) {
|
||||
if (!current->next || !current->next->isFree) return;
|
||||
current->size += sizeof(VMHeapBlockHeader) + current->next->size;
|
||||
current->next = current->next->next;
|
||||
if (current->next) current->next->previous = current; // what the fuck
|
||||
}
|
||||
|
||||
void HeapInitialize() {
|
||||
Address heapStart = kKernelHeapStart;
|
||||
|
||||
for (UInt64 i = 0; i < kHeapSizePages; i++) {
|
||||
Address physical = (Address)PMMAllocatePage();
|
||||
if (!physical) OSPanic("OOM during heap init");
|
||||
|
||||
Address virtual = heapStart + (i * kVMPageSize);
|
||||
VMMMapPage(gVMKernelL0Table, physical, virtual, kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX);
|
||||
}
|
||||
|
||||
sVMHeapListHead = (VMHeapBlockHeader*)heapStart;
|
||||
sVMHeapListHead->magic = kHeapBlockHeaderMagic;
|
||||
sVMHeapListHead->size = (kHeapSizePages * kVMPageSize) - sizeof(VMHeapBlockHeader);
|
||||
sVMHeapListHead->isFree = true;
|
||||
sVMHeapListHead->next = nullptr;
|
||||
sVMHeapListHead->previous = nullptr;
|
||||
}
|
||||
|
||||
Pointer HeapAllocate(Size size) {
|
||||
if (size == 0) return nullptr;
|
||||
Size alignedSize = (size + 15) & ~15;
|
||||
|
||||
VMHeapBlockHeader* current = sVMHeapListHead;
|
||||
while (current) {
|
||||
if (current->isFree && current->size >= alignedSize) {
|
||||
if (current->size > alignedSize + sizeof(VMHeapBlockHeader) + 16) {
|
||||
VMHeapBlockHeader* new_block = (VMHeapBlockHeader*)((Address)current + sizeof(VMHeapBlockHeader) + alignedSize);
|
||||
new_block->size = current->size - alignedSize - sizeof(VMHeapBlockHeader);
|
||||
new_block->isFree = true;
|
||||
new_block->next = current->next;
|
||||
new_block->previous = current;
|
||||
new_block->magic = kHeapBlockHeaderMagic;
|
||||
|
||||
if (current->next) current->next->previous = new_block;
|
||||
current->next = new_block;
|
||||
current->size = alignedSize;
|
||||
}
|
||||
current->isFree = false;
|
||||
return (Pointer)((Address)current + sizeof(VMHeapBlockHeader));
|
||||
}
|
||||
current = current->next;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void HeapFree(Pointer pointer) {
|
||||
if (!pointer) return;
|
||||
|
||||
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((Address)pointer - sizeof(VMHeapBlockHeader));
|
||||
if (current->magic != kHeapBlockHeaderMagic) return;
|
||||
|
||||
current->isFree = true;
|
||||
if (current->next && current->next->isFree) CombineForward(current);
|
||||
if (current->previous && current->previous->isFree) CombineForward(current->previous);
|
||||
}
|
||||
|
||||
Pointer HeapResize(Pointer pointer, Size newSize) {
|
||||
if (!pointer) return HeapAllocate(newSize);
|
||||
if (newSize == 0) {
|
||||
HeapFree(pointer);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Size alignedSize = (newSize + 15) & ~15;
|
||||
|
||||
VMHeapBlockHeader* current = (VMHeapBlockHeader*)((Address)pointer - sizeof(VMHeapBlockHeader));
|
||||
if (current->size >= alignedSize) {
|
||||
return pointer;
|
||||
}
|
||||
|
||||
if (current->next && current->next->isFree &&
|
||||
(current->size + sizeof(VMHeapBlockHeader) + current->next->size) >= alignedSize) {
|
||||
CombineForward(current);
|
||||
return pointer;
|
||||
}
|
||||
|
||||
Pointer newPointer = HeapAllocate(newSize);
|
||||
if (newPointer) {
|
||||
MemoryCopy(newPointer, pointer, current->size);
|
||||
HeapFree(pointer);
|
||||
}
|
||||
|
||||
return newPointer;
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
#include <VM/PMM.h>
|
||||
#include <Lib/String.h>
|
||||
#include "../Common/bootinfo.h"
|
||||
|
||||
extern char _kernelStart[];
|
||||
extern char _kernelEnd[];
|
||||
@@ -26,13 +25,18 @@ static inline void BitmapUnset(MemoryPointer bitmap, Address address) {
|
||||
bitmap[BitmapGetByteIndex(address)] &= ~(1U << BitmapGetBitOffset(address));
|
||||
}
|
||||
|
||||
void PMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
|
||||
void PMMInitialize(VMBootMemoryMap* bootMap) {
|
||||
sPMMRamBase = bootMap->totalRAM.base;
|
||||
sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
|
||||
sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
|
||||
sPMMBitmap = (MemoryPointer)_kernelEnd;
|
||||
MemorySet(sPMMBitmap, 0, sPMMBitmapSize);
|
||||
|
||||
UInt32 safeIndex = bootMap->reservedCount;
|
||||
bootMap->reserved[safeIndex].base = sPMMRamBase;
|
||||
bootMap->reserved[safeIndex].size = 16 * 1024 * 1024; // 16 Mb
|
||||
bootMap->reservedCount++;
|
||||
|
||||
UInt32 kIndex = bootMap->reservedCount;
|
||||
bootMap->reserved[kIndex].base = (Address)_kernelStart;
|
||||
bootMap->reserved[kIndex].size = (Address)_kernelEnd - (Address)_kernelStart;
|
||||
|
||||
+30
-8
@@ -4,6 +4,7 @@
|
||||
#include <Arch/CPU.h>
|
||||
#include <OS/Panic.h>
|
||||
#include "../Common/bootinfo.h"
|
||||
#include "OS/Log.h"
|
||||
|
||||
static const UInt64 kPTEAddressMask = 0x0000FFFFFFFFF000ULL;
|
||||
static inline Address GetPTEAddress(UInt64 entry) { return entry & kPTEAddressMask; }
|
||||
@@ -18,6 +19,9 @@ static Boolean isInitialized = false;
|
||||
Address* gVMKernelL0Table = nullptr;
|
||||
Address gVMKernelL0Physical = 0;
|
||||
|
||||
extern char _kernelStart[];
|
||||
extern char _kernelEnd[];
|
||||
|
||||
static Address* GetVirtualTable(Address phys) {
|
||||
if (isInitialized) return (Address*)VMPhysToHHDM(phys);
|
||||
return (Address*)phys;
|
||||
@@ -92,7 +96,7 @@ Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags)
|
||||
if (!l3Virt) return nullptr;
|
||||
|
||||
l3Virt[l3Index] = phys | flags | kPTEPage | kPTEAccessFlag | kPTEValid;
|
||||
CPUInvalidateTLB(virt);
|
||||
if (isInitialized) CPUInvalidateTLB(virt);
|
||||
return l3Virt;
|
||||
}
|
||||
|
||||
@@ -138,7 +142,9 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
|
||||
gVMKernelL0Physical = (Address)PMMAllocatePage();
|
||||
gVMKernelL0Table = (Address*)gVMKernelL0Physical;
|
||||
if (!gVMKernelL0Physical) OSPanic("Failed to allocate kernel L0 table");
|
||||
MemorySet(gVMKernelL0Table, 0, kVMPageSize);
|
||||
|
||||
OSLog("Mapping RAM.. Can take a while\n");
|
||||
Size totalRAM = bootMap->totalRAM.size;
|
||||
Size ramEnd = bootMap->totalRAM.base + totalRAM;
|
||||
for (Address phys = bootMap->totalRAM.base; phys < ramEnd; phys += kVMPageSize) {
|
||||
@@ -148,14 +154,25 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
|
||||
kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX
|
||||
);
|
||||
}
|
||||
OSLog("RAM mapped\n");
|
||||
|
||||
Size pmmBitmapSize = (bootMap->totalRAM.size / kVMPageSize) / 8;
|
||||
Size kernelSize = ((Address)_kernelEnd - (Address)_kernelStart) + pmmBitmapSize;
|
||||
kernelSize = (kernelSize + kVMPageSize - 1) & ~(kVMPageSize - 1);
|
||||
|
||||
Address kernelPhysStart = 0x40100000; // TODO: hardcode is awful
|
||||
|
||||
Address kernelPhysStart = (Address)info->kernelInfo.kernelAddress;
|
||||
Size kernelSize = info->kernelInfo.kernelSize;
|
||||
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
|
||||
Address phys = kernelPhysStart + offset;
|
||||
Address virt = kVMKernelVMA + offset;
|
||||
Address virt = (Address)_kernelStart + offset;
|
||||
VMMMapPage(gVMKernelL0Table, phys, virt, kPTENormalMem | kPTEAccessRW);
|
||||
}
|
||||
OSLog("Kernel mapped to HHDM\n");
|
||||
|
||||
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
|
||||
VMMMapPage(gVMKernelL0Table, kernelPhysStart + offset, kernelPhysStart + offset, kPTENormalMem | kPTEAccessRW);
|
||||
}
|
||||
OSLog("Kernel Identity mapped\n");
|
||||
|
||||
Address fbPhys = (Address)info->framebuffer.base;
|
||||
Size fbSize = info->framebuffer.baseSize;
|
||||
@@ -166,18 +183,23 @@ void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
|
||||
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
|
||||
);
|
||||
}
|
||||
OSLog("Framebuffer mapped\n");
|
||||
|
||||
Address UARTPhys = bootMap->UART.base;
|
||||
if (!UARTPhys) UARTPhys = 0x09000000;
|
||||
|
||||
VMMMapPage(
|
||||
gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys),
|
||||
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
|
||||
);
|
||||
VMMMapPage(
|
||||
gVMKernelL0Table, UARTPhys, UARTPhys,
|
||||
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
|
||||
);
|
||||
OSLog("UART mapped\n");
|
||||
|
||||
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
|
||||
VMMMapPage(gVMKernelL0Table, kernelPhysStart + offset, kernelPhysStart + offset, kPTENormalMem | kPTEAccessRW);
|
||||
}
|
||||
|
||||
info->framebuffer.base = (BIUInt32*)kVMFbVirtBase;;
|
||||
OSLog("Enabling MMU...\n");
|
||||
CPUEnableMMU(gVMKernelL0Physical);
|
||||
isInitialized = true;
|
||||
}
|
||||
Reference in New Issue
Block a user