feat(arm64): higher-half kernel, early MMU in boot, and VMM

This commit is contained in:
karina
2026-04-26 21:47:41 +04:00
parent 3a55665bd7
commit b56b55e4b3
14 changed files with 485 additions and 55 deletions
+5 -1
View File
@@ -1,4 +1,3 @@
#include "Types.h"
#include <Arch/DTB.h>
#include <OS/Panic.h>
#include <OS/Log.h>
@@ -73,6 +72,11 @@ void DTBParse(Pointer dtb, VMBootMemoryMap* bootMap) {
bootMap->reserved[index].size = size;
bootMap->reservedCount++;
}
else if (StringStartsWith(currentNode, "pl011")) {
UInt32* cells = (UInt32*)structs;
bootMap->UART.base = Merge32To64(BytesSwap32(cells[1]), BytesSwap32(cells[0]));
bootMap->UART.size = Merge32To64(BytesSwap32(cells[3]), BytesSwap32(cells[2]));
}
}
structs += propertyLength;
+116 -5
View File
@@ -1,10 +1,121 @@
.section .text.boot, "ax"
.global _start
_start:
sub sp, sp, #16
str x0, [sp]
// disable interrupts
msr daifset, #3
// save phys addr of Bootinfo* in x0 to x20
mov x20, x0
// get phys addr of tables
adrp x0, early_ttbr0_l0
adrp x1, early_ttbr1_l0
// memzero that tables (4 tables = 16 KB)
mov x2, #16384
mov x3, x0
1: str xzr, [x3], #8
subs x2, x2, #8
b.ne 1b
// set up ttbr0 (identity map of 512 gb)
adrp x0, early_ttbr0_l0
adrp x1, early_ttbr1_l0
// early_ttbr0_l0[0] -> early_ttbr0_l1 (Valid + Table = 0x3)
ldr x2, =0x3
orr x3, x1, x2
str x3, [x0, #0]
// fill l1 table with 512 entrie
// flags 0x701 = valid + block + accessflag + innershareable + normalram
mov x2, xzr
mov x3, #512
ldr x4, =0x701
mov x6, #(1 << 30)
2: orr x5, x2, x4
str x5, [x1], #8 // early_ttbr0_l1[i] = base | flags
add x2, x2, x6
subs x3, x3, #1
b.ne 2b
// set up ttbr1 (HH: 0xFFFFFFFF80000000)
adrp x0, early_ttbr1_l0
adrp x1, early_ttbr1_l1
// early_ttbr1_l0[511] -> early_ttbr1_l1 (Valid + Table = 0x3)
ldr x2, =0x3
orr x3, x1, x2
mov x4, #(511 * 8)
str x3, [x0, x4]
// determine where is kernel rn
adr x2, _start // curr pc in absoule addr
lsr x2, x2, #30 // leave only number of gig
lsl x2, x2, #30 // return it in absoule addr
// map that at 510 (0xFFFFFFFF80000000)
ldr x3, =0x701 // flags
orr x2, x2, x3
mov x4, #(510 * 8)
str x2, [x1, x4] // early_ttbr1_l1[510] = base | flags
// enable MMU (MAIR, TCR, SCTLR)
// see Kernel/Include/Arch/CPU.h for explanaition
ldr x2, =((0xFF << 0) | (0x00 << 8))
msr mair_el1, x2
ldr x2, =((16 << 0) | (16 << 16) | (0 << 14) | (2 << 30) | (3 << 28) | (3 << 12) | (5 << 32))
msr tcr_el1, x2
adrp x0, early_ttbr0_l0
adrp x1, early_ttbr1_l0
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb ish
isb
mrs x2, sctlr_el1
ldr x3, =0x1005
orr x2, x2, x3
msr sctlr_el1, x2
isb
ldr x2, =higher_half_jump
br x2
higher_half_jump:
ldr x3, =_boot_stack_top
mov sp, x3
// clean .bss
ldr x1, =__bss_start
ldr x2, =__bss_end
cbz x1, 4f
cmp x1, x2
b.eq 4f
3: str xzr, [x1], #8
cmp x1, x2
b.lt 3b
4:
bl ExceptionsVectorsInit
ldr x0, [sp]
add sp, sp, #16
mov x0, x20 // return phys of Bootinfo* in x20
bl KernelMain
b .
halt:
wfi
b halt
.section .data
.align 12
early_ttbr0_l0: .fill 4096, 1, 0
early_ttbr0_l1: .fill 4096, 1, 0
early_ttbr1_l0: .fill 4096, 1, 0
early_ttbr1_l1: .fill 4096, 1, 0
.section .bss
.align 16
.skip 16384
_boot_stack_top:
+6 -2
View File
@@ -1,6 +1,7 @@
#include "../Common/bootinfo.h"
#include <VM/PMM.h>
#include <Arch/DTB.h>
#include <VM/PMM.h>
#include <VM/VMM.h>
#include <OS/Log.h>
#include <OS/Panic.h>
@@ -13,5 +14,8 @@ void KernelMain(Bootinfo* bootinfo) {
VMBootMemoryMap bootMap = {0};
bootMap.reservedCount = 0;
DTBParse(bootinfo->dtb, &bootMap);
PMMInitialize(&bootMap);
PMMInitialize(&bootMap, bootinfo);
VMMInitialize(&bootMap, bootinfo);
OSLog("Kernel initialized.\n");
}
+5 -5
View File
@@ -8,15 +8,15 @@ static void BufferAdd(ASCII* buffer, Size bufferSize, Size* written, ASCII chara
(*written)++;
}
void* StringSet(BytePointer destination, ASCII value, Size count) {
BytePointer savedDestination = destination;
Pointer MemorySet(Pointer destination, ASCII value, Size count) {
BytePointer savedDestination = (BytePointer) destination;
while (count--) {
*destination++ = (UInt8) value;
*savedDestination++ = (UInt8) value;
}
return savedDestination;
return destination;
}
void* MemoryCopy(void* destination, const void* source, Size count) {
Pointer MemoryCopy(Pointer destination, const Pointer source, Size count) {
BytePointer destinationBuffer = (BytePointer) destination;
const UInt8* sourceBuffer = (const UInt8*) source;
+11 -2
View File
@@ -2,5 +2,14 @@
#include <Lib/String.h>
void* memset(void* destination, int value, Size count) {
return StringSet(destination, value, count);
}
return MemorySet(destination, value, count);
}
// A little bit of Monica in my life
// A little bit of Erica by my side
// A little bit of Rita's all I need
// A little bit of Tina's what I see
// A little bit of Sandra in the sun
// A little bit of Mary all night long
// A little bit of Jessica, here I am
// A little bit of you makes me your man
+1 -1
View File
@@ -21,7 +21,7 @@ static const ASCII* GetExceptionClassString(UInt32 class) {
}
__attribute__((noreturn)) static void Halt() {
while (1) {
loop {
CPUDisableInterrupts();
CPUWaitForInterrupt();
}
+18 -23
View File
@@ -1,19 +1,21 @@
#include <VM/PMM.h>
#include <Lib/String.h>
#include "../Common/bootinfo.h"
extern char _kernelStart[];
extern char _kernelEnd[];
static Address sPMMRamBase = 0;
static MemoryPointer sPMMBitmap;
static Size sPMMBitmapSize;
static Size sPMMTotalPages;
static inline Size BitmapGetByteIndex(Address address) {
return (address / kVMPageSize) / kVMBlocksPerByte;
return ((address - sPMMRamBase) / kVMPageSize) / kVMBlocksPerByte;
}
static inline UInt8 BitmapGetBitOffset(Address address) {
return (UInt8)((address / kVMPageSize) % kVMBlocksPerByte);
}
static inline Boolean BitmapTest(const MemoryPointer bitmap, Address address) {
return (bitmap[BitmapGetByteIndex(address)] & (1U << BitmapGetBitOffset(address))) != 0;
return (UInt8)(((address - sPMMRamBase) / kVMPageSize) % kVMBlocksPerByte);
}
static inline void BitmapSet(MemoryPointer bitmap, Address address) {
@@ -24,27 +26,18 @@ static inline void BitmapUnset(MemoryPointer bitmap, Address address) {
bitmap[BitmapGetByteIndex(address)] &= ~(1U << BitmapGetBitOffset(address));
}
static MemoryPointer sPMMBitmap;
static Size sPMMBitmapSize;
static Size sPMMTotalPages;
void PMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
sPMMRamBase = bootMap->totalRAM.base;
sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
sPMMBitmap = (MemoryPointer)_kernelEnd;
MemorySet(sPMMBitmap, 0, sPMMBitmapSize);
void PMMInitialize(VMBootMemoryMap* bootMap) {
UInt32 vIndex = bootMap->reservedCount;
bootMap->reserved[vIndex].base = 0x0;
bootMap->reserved[vIndex].size = bootMap->totalRAM.base;
bootMap->reservedCount++;
UInt32 kIndex = bootMap->reservedCount;
bootMap->reserved[kIndex].base = (Address)_kernelStart;
bootMap->reserved[kIndex].size = (Address)_kernelEnd - (Address)_kernelStart;
bootMap->reservedCount++;
sPMMTotalPages = bootMap->totalRAM.size / kVMPageSize;
sPMMBitmapSize = sPMMTotalPages / kVMBlocksPerByte;
sPMMBitmap = (MemoryPointer)_kernelEnd;
StringSet(sPMMBitmap, 0, sPMMBitmapSize);
UInt32 bIndex = bootMap->reservedCount;
bootMap->reserved[bIndex].base = (Address)sPMMBitmap;
bootMap->reserved[bIndex].size = sPMMBitmapSize;
@@ -58,7 +51,9 @@ void PMMInitialize(VMBootMemoryMap* bootMap) {
for (Size p = 0; p < pagesToReserve; p++) {
Address pageAdress = regionBase + (p * kVMPageSize);
BitmapSet(sPMMBitmap, pageAdress);
if (pageAdress >= sPMMRamBase && pageAdress < (sPMMRamBase + bootMap->totalRAM.size)) {
BitmapSet(sPMMBitmap, pageAdress);
}
}
}
}
@@ -68,7 +63,7 @@ Pointer PMMAllocatePage() {
if (sPMMBitmap[i] == 0xFF) continue;
for (Size bit = 0; bit < kVMBlocksPerByte; bit++) {
if ((sPMMBitmap[i] & (1 << bit)) == 0) {
Address address = (i * kVMBlocksPerByte + bit) * kVMPageSize;
Address address = sPMMRamBase + (i * kVMBlocksPerByte + bit) * kVMPageSize;
BitmapSet(sPMMBitmap, address);
return (Pointer)address;
}
+183
View File
@@ -0,0 +1,183 @@
#include <VM/VMM.h>
#include <VM/PMM.h>
#include <Lib/String.h>
#include <Arch/CPU.h>
#include <OS/Panic.h>
#include "../Common/bootinfo.h"
static const UInt64 kPTEAddressMask = 0x0000FFFFFFFFF000ULL;
static inline Address GetPTEAddress(UInt64 entry) { return entry & kPTEAddressMask; }
static inline UInt64 GetPTEFlags(UInt64 entry) { return entry & ~kPTEAddressMask; }
static inline UInt16 GetL0Index(Address virt) { return (virt >> 39) & 0x1FF; }
static inline UInt16 GetL1Index(Address virt) { return (virt >> 30) & 0x1FF; }
static inline UInt16 GetL2Index(Address virt) { return (virt >> 21) & 0x1FF; }
static inline UInt16 GetL3Index(Address virt) { return (virt >> 12) & 0x1FF; }
static Boolean isInitialized = false;
Address* gVMKernelL0Table = nullptr;
Address gVMKernelL0Physical = 0;
static Address* GetVirtualTable(Address phys) {
if (isInitialized) return (Address*)VMPhysToHHDM(phys);
return (Address*)phys;
}
static inline Address* GetOrAllocateTable(Address* parentTable, Size index, UInt64 flags, UInt64 directoryFlags) {
if (!(parentTable[index] & kPTEValid)) {
Pointer newTable = PMMAllocatePage();
if (!newTable) return nullptr;
Address* newTableVirt = GetVirtualTable((Address)newTable);
MemorySet(newTableVirt, 0, kVMPageSize);
parentTable[index] = (Address)newTable | directoryFlags;
return newTableVirt;
}
parentTable[index] |= (flags & kPTEUser);
Address physAddress = GetPTEAddress(parentTable[index]);
return GetVirtualTable(physAddress);
}
static Address GetMappedPhysicalAddress(Address* l0Table, Address virt) {
// A little bit of Monica in my life
UInt16 l0Index = GetL0Index(virt);
// A little bit of Erica by my side
UInt16 l1Index = GetL1Index(virt);
// A little bit of Rita's all I need
UInt16 l2Index = GetL2Index(virt);
// A little bit of Tina's what I see
UInt16 l3Index = GetL3Index(virt);
// A little bit of Sandra in the sun
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
if (!(l0Virt[l0Index] & kPTEValid)) return 0;
// A little bit of Mary all night long...
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
if (!(l1Virt[l1Index] & kPTEValid)) return 0;
// A little bit of Jessica, here I am!
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
if (!(l2Virt[l2Index] & kPTEValid)) return 0;
// A little bit of you makes me your man
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
if (!(l3Virt[l3Index] & kPTEValid)) return 0;
return GetPTEAddress(l3Virt[l3Index]);
}
Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags) {
UInt16 l0Index = GetL0Index(virt);
UInt16 l1Index = GetL1Index(virt);
UInt16 l2Index = GetL2Index(virt);
UInt16 l3Index = GetL3Index(virt);
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
UInt64 directoryFlags = kPTEValid | kPTETable | (flags & kPTEUser);
Address* l1Virt = GetOrAllocateTable(l0Virt, l0Index, flags, directoryFlags);
if (!l1Virt) return nullptr;
Address* l2Virt = GetOrAllocateTable(l1Virt, l1Index, flags, directoryFlags);
if (!l2Virt) return nullptr;
Address* l3Virt = GetOrAllocateTable(l2Virt, l2Index, flags, directoryFlags);
if (!l3Virt) return nullptr;
l3Virt[l3Index] = phys | flags | kPTEPage | kPTEAccessFlag | kPTEValid;
CPUInvalidateTLB(virt);
return l3Virt;
}
void VMMUnmapPage(Address* l0Table, Address virt) {
UInt16 l0Index = GetL0Index(virt);
UInt16 l1Index = GetL1Index(virt);
UInt16 l2Index = GetL2Index(virt);
UInt16 l3Index = GetL3Index(virt);
Address* l0Virt = l0Table;
if (isInitialized) l0Virt = (Address*)VMPhysToHHDM((Address)l0Table);
if (!(l0Virt[l0Index] & kPTEValid)) return;
Address* l1Virt = GetVirtualTable(GetPTEAddress(l0Virt[l0Index]));
if (!(l1Virt[l1Index] & kPTEValid)) return;
Address* l2Virt = GetVirtualTable(GetPTEAddress(l1Virt[l1Index]));
if (!(l2Virt[l2Index] & kPTEValid)) return;
Address* l3Virt = GetVirtualTable(GetPTEAddress(l2Virt[l2Index]));
l3Virt[l3Index] = 0;
CPUInvalidateTLB(virt);
}
Pointer VMMGetOrAllocatePage(Address* l0Table, Address virt, UInt64 flags) {
Address existingPhys = GetMappedPhysicalAddress(l0Table, virt);
if (existingPhys) return (Pointer)GetVirtualTable(existingPhys);
Pointer newPhys = PMMAllocatePage();
if (!newPhys) return nullptr; // OOM
Address* mappedVirt = VMMMapPage(l0Table, (Address) newPhys, virt, flags);
if (!mappedVirt) return nullptr;
Pointer finalVirtAddress = (Pointer)GetVirtualTable((Address)newPhys);
MemorySet(finalVirtAddress, 0, kVMPageSize);
return finalVirtAddress;
}
void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info) {
gVMKernelL0Physical = (Address)PMMAllocatePage();
gVMKernelL0Table = (Address*)gVMKernelL0Physical;
if (!gVMKernelL0Physical) OSPanic("Failed to allocate kernel L0 table");
Size totalRAM = bootMap->totalRAM.size;
Size ramEnd = bootMap->totalRAM.base + totalRAM;
for (Address phys = bootMap->totalRAM.base; phys < ramEnd; phys += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table,
phys, VMPhysToHHDM(phys),
kPTENormalMem | kPTEAccessRW | kPTEPrivNX | kPTEUserNX
);
}
Address kernelPhysStart = (Address)info->kernelInfo.kernelAddress;
Size kernelSize = info->kernelInfo.kernelSize;
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
Address phys = kernelPhysStart + offset;
Address virt = kVMKernelVMA + offset;
VMMMapPage(gVMKernelL0Table, phys, virt, kPTENormalMem | kPTEAccessRW);
}
Address fbPhys = (Address)info->framebuffer.base;
Size fbSize = info->framebuffer.baseSize;
for (Address offset = 0; offset < fbSize; offset += kVMPageSize) {
VMMMapPage(
gVMKernelL0Table, fbPhys + offset,
kVMFbVirtBase + offset,
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
}
Address UARTPhys = bootMap->UART.base;
VMMMapPage(
gVMKernelL0Table, UARTPhys, VMPhysToHHDM(UARTPhys),
kPTEDeviceMem | kPTEAccessRW | kPTEUserNX | kPTEPrivNX
);
for (Address offset = 0; offset < kernelSize; offset += kVMPageSize) {
VMMMapPage(gVMKernelL0Table, kernelPhysStart + offset, kernelPhysStart + offset, kPTENormalMem | kPTEAccessRW);
}
info->framebuffer.base = (BIUInt32*)kVMFbVirtBase;;
CPUEnableMMU(gVMKernelL0Physical);
isInitialized = true;
}