From 5ef70cc72d7338db14452773ccc903da1df6ec90 Mon Sep 17 00:00:00 2001 From: karina Date: Sun, 3 May 2026 12:27:47 +0400 Subject: [PATCH] feat: CPUCleanAndInvalidateCode for future EL0 support --- Kernel/Include/Arch/CPU.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/Kernel/Include/Arch/CPU.h b/Kernel/Include/Arch/CPU.h index db026f2..eebc81d 100644 --- a/Kernel/Include/Arch/CPU.h +++ b/Kernel/Include/Arch/CPU.h @@ -33,6 +33,29 @@ static inline void CPUInvalidateTLB(Address virt) { ); } +static inline void CPUCleanAndInvalidateCode(Pointer codeVirt, Size size) { + // Read cache line sizes from CTR_EL0 + UInt64 ctr; + __asm__ volatile ("mrs %0, ctr_el0" : "=r" (ctr)); + UInt64 dcacheLineSize = 4ULL << ((ctr >> 16) & 0xF); + UInt64 icacheLineSize = 4ULL << (ctr & 0xF); + + Address addr = (Address)codeVirt; + Address end = addr + size; + + // Clean D-cache to PoU (Point of Unification) + for (Address va = addr; va < end; va += dcacheLineSize) { + __asm__ volatile ("dc cvau, %0" :: "r" (va) : "memory"); + } + __asm__ volatile ("dsb ish" ::: "memory"); + + // Invalidate I-cache to PoU + for (Address va = addr; va < end; va += icacheLineSize) { + __asm__ volatile ("ic ivau, %0" :: "r" (va) : "memory"); + } + __asm__ volatile ("dsb ish\nisb" ::: "memory"); +} + static inline void CPUEnableMMU(Address l0PhysicalAddress) { // MAIR_EL1 (Memory Attribute Indirection Register) // kPTENormalMem is index 0 and kPTEDeviceMem is index 1