feat(arm64): higher-half kernel, early MMU in boot, and VMM

This commit is contained in:
karina
2026-04-26 21:47:41 +04:00
parent 3a55665bd7
commit b56b55e4b3
14 changed files with 485 additions and 55 deletions
+50
View File
@@ -21,4 +21,54 @@ static inline UInt64 CPUGetFAR() {
UInt64 far;
__asm__ volatile ("mrs %0, far_el1" : "=r" (far));
return far;
}
static inline void CPUInvalidateTLB(Address virt) {
__asm__ volatile(
"dsb ishst\n"
"tlbi vaae1is, %0\n"
"dsb ish\n"
"isb\n"
:: "r" (virt >> 12) : "memory"
);
}
static inline void CPUEnableMMU(Address l0PhysicalAddress) {
// MAIR_EL1 (Memory Attribute Indirection Register)
// kPTENormalMem is index 0 and kPTEDeviceMem is index 1
// 0xFF = Normal, 0x00 = Device
UInt64 mair = (0xFFULL << 0) | (0x00ULL << 8);
// TCR_EL1 (Translation Control Register)
// configures the mmu for 4kb pages and 48bit virtual addresses
// t0sz/t1sz = 16 (64-48 = 16)
// tg0/tg1 = 4kb granule
UInt64 tcr = (16ULL << 0) | // T0SZ (userspace size)
(16ULL << 16) | // T1SZ (kernelspace size)
(0ULL << 14) | // TG0 (User 4KB)
(2ULL << 30) | // TG1 (Kernel 4KB)
(3ULL << 28) | // SH1 (Inner Shareable)
(3ULL << 12) | // SH0 (Inner Shareable)
(5ULL << 32); // IPS
__asm__ volatile (
"msr mair_el1, %0\n"
"msr tcr_el1, %1\n"
"msr ttbr0_el1, %2\n" // set userspace root
"msr ttbr1_el1, %2\n" // set kernelspace root
"isb\n" // Instruction Synchronization Barrier
:: "r"(mair), "r"(tcr), "r"(l0PhysicalAddress) : "memory"
);
// turn on the MMU in SCTLR_EL1 (System Control Register)
// Bit 0 = M (MMU Enable), Bit 2 = C (Data Cache Enable), Bit 12 = I (Instruction Cache Enable)
UInt64 sctlr;
UInt64 sctlr_flags = 0x1005; // set bits 0 (M), 2 (C), and 12 (I)
__asm__ volatile (
"mrs %0, sctlr_el1\n"
"orr %0, %0, %1\n"
"msr sctlr_el1, %0\n"
"isb\n"
: "=r"(sctlr) : "r"(sctlr_flags) : "memory"
);
}
+2 -2
View File
@@ -2,8 +2,8 @@
#include <Types.h>
#include <Lib/VAArgs.h>
void* StringSet(BytePointer destination, ASCII value, Size count);
void* MemoryCopy(void* destination, const void* source, Size count);
Pointer MemorySet(Pointer destination, ASCII value, Size count);
Pointer MemoryCopy(Pointer destination, const Pointer source, Size count);
Int32 StringCompare(const ASCII* firstString, const ASCII* secondString);
Int32 StringCompareWithLimit(const ASCII* firstString, const ASCII* secondString, Size limit);
+4 -1
View File
@@ -1,5 +1,6 @@
#pragma once
#include <Types.h>
#include "../Common/bootinfo.h"
enum {
kVMPageSize = 4096,
@@ -12,12 +13,14 @@ typedef struct {
Size size;
} VMMemoryRegion;
typedef struct {
VMMemoryRegion totalRAM;
VMMemoryRegion reserved[kVMMaxReservedRegions];
UInt32 reservedCount;
VMMemoryRegion UART;
} VMBootMemoryMap;
void PMMInitialize(VMBootMemoryMap* bootMap);
void PMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info);
Pointer PMMAllocatePage();
void PMMFreePage(Address address);
+49
View File
@@ -0,0 +1,49 @@
#pragma once
#include <Types.h>
#include <VM/PMM.h>
#include "../Common/bootinfo.h"
enum VMPTEFlags {
kPTEValid = (1ULL << 0), // 1 = Present (Will page fault if 0)
kPTETable = (1ULL << 1), // 1 = Valid for L0/L1/L2 Directory
kPTEPage = (1ULL << 1), // 1 = Valid for L3 Page (Same bit)
kPTENormalMem = (0ULL << 2), // Cached, Normal RAM
kPTEDeviceMem = (1ULL << 2), // Uncached, MMIO Device
kPTEAccessRW = (0ULL << 6), // Read/Write
kPTEAccessRO = (1ULL << 6), // Read-Only
kPTEUser = (1ULL << 7), // 1 = EL0, 0 = EL1
kPTEInnerShare = (3ULL << 8), // Inner Shareable (SMP safe)
kPTEAccessFlag = (1ULL << 10), // CPU access tracking (MUST be 1 to avoid faults)
kPTEPrivNX = (1ULL << 53), // PXN: Privileged Execute Never
kPTEUserNX = (1ULL << 54) // UXN: Unprivileged Execute Never
};
enum {
kVMKernelVMA = 0xFFFFFFFF80000000,
kHHDMOffset = 0xFFFF888000000000,
kVMFbVirtBase = 0xFFFFFFFFFC000000,
};
static inline Address VMKernelVirtToPhys(Address virt) {
return virt - kVMKernelVMA;
}
static inline Address VMPhysToHHDM(Address phys) {
return phys + kHHDMOffset;
}
static inline Address VMHHDMToPhys(Address virt) {
return virt - kHHDMOffset;
}
extern Address* gVMKernelL0Table;
extern Address gVMKernelL0Physical;
Address* VMMMapPage(Address* l0Table, Address phys, Address virt, UInt64 flags);
void VMMUnmapPage(Address* l0Table, Address virt);
Pointer VMMGetOrAllocatePage(Address* l0Table, Address virt, UInt64 flags);
void VMMInitialize(VMBootMemoryMap* bootMap, Bootinfo* info);