Files
ksOS/Kernel/Include/Arch/CPU.h
T
2026-04-29 17:00:11 +04:00

88 lines
2.9 KiB
C

#pragma once
#include <Types.h>
static inline void CPUYield() {
__asm__ volatile ("yield" ::: "memory");
}
static inline void CPUWaitForInterrupt() {
__asm__ volatile ("wfi" ::: "memory");
}
static inline void CPUDisableInterrupts() {
__asm__ volatile ("msr daifset, #3" ::: "memory");
}
static inline void CPUEnableInterrupts() {
__asm__ volatile ("msr daifclr, #3" ::: "memory");
}
static inline UInt64 CPUGetFAR() {
UInt64 far;
__asm__ volatile ("mrs %0, far_el1" : "=r" (far));
return far;
}
static inline void CPUInvalidateTLB(Address virt) {
__asm__ volatile(
"dsb ishst\n"
"tlbi vaae1is, %0\n"
"dsb ish\n"
"isb\n"
:: "r" (virt >> 12) : "memory"
);
}
static inline void CPUEnableMMU(Address l0PhysicalAddress) {
// MAIR_EL1 (Memory Attribute Indirection Register)
// kPTENormalMem is index 0 and kPTEDeviceMem is index 1
// 0xFF = Normal, 0x00 = Device
UInt64 mair = (0xFFULL << 0) | (0x00ULL << 8);
// TCR_EL1 (Translation Control Register)
// configures the mmu for 4kb pages and 48bit virtual addresses
// t0sz/t1sz = 16 (64-48 = 16)
// tg0/tg1 = 4kb granule
UInt64 tcr = (16ULL << 0) | // T0SZ (userspace size)
(16ULL << 16) | // T1SZ (kernelspace size)
(0ULL << 14) | // TG0 (User 4KB)
(2ULL << 30) | // TG1 (Kernel 4KB)
(3ULL << 28) | // SH1 (Inner Shareable)
(3ULL << 12) | // SH0 (Inner Shareable)
(5ULL << 32); // IPS
__asm__ volatile (
"msr mair_el1, %0\n"
"msr tcr_el1, %1\n"
"msr ttbr0_el1, %2\n" // set userspace root
"msr ttbr1_el1, %2\n" // set kernelspace root
"tlbi vmalle1is\n"
"isb\n" // Instruction Synchronization Barrier
:: "r"(mair), "r"(tcr), "r"(l0PhysicalAddress) : "memory"
);
// turn on the MMU in SCTLR_EL1 (System Control Register)
// Bit 0 = M (MMU Enable), Bit 2 = C (Data Cache Enable), Bit 12 = I (Instruction Cache Enable)
UInt64 sctlr;
UInt64 sctlr_flags = 0x1005; // set bits 0 (M), 2 (C), and 12 (I)
__asm__ volatile (
"mrs %0, sctlr_el1\n"
"orr %0, %0, %1\n"
"msr sctlr_el1, %0\n"
"isb\n"
: "=r"(sctlr) : "r"(sctlr_flags) : "memory"
);
}
static inline void CPUSwitchAddressSpace(Address l0Physical) {
__asm__ volatile(
"dsb ishst\n" // wait till all previous writes are finished physically
"msr ttbr0_el1, %0\n" // Update TTBR0_EL1 (userspace)
"tlbi vmalle1is\n" // Reset TLB cache
"dsb ish\n" // wait for tlb cache to reset
"isb\n" // Clear instruction pipeline
:: "r" (l0Physical) : "memory"
);
}
#define CPUException(number) __asm__ volatile ("svc %0" :: "i" (number) : "memory")