feat: CPUCleanAndInvalidateCode for future EL0 support
This commit is contained in:
@@ -33,6 +33,29 @@ static inline void CPUInvalidateTLB(Address virt) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void CPUCleanAndInvalidateCode(Pointer codeVirt, Size size) {
|
||||||
|
// Read cache line sizes from CTR_EL0
|
||||||
|
UInt64 ctr;
|
||||||
|
__asm__ volatile ("mrs %0, ctr_el0" : "=r" (ctr));
|
||||||
|
UInt64 dcacheLineSize = 4ULL << ((ctr >> 16) & 0xF);
|
||||||
|
UInt64 icacheLineSize = 4ULL << (ctr & 0xF);
|
||||||
|
|
||||||
|
Address addr = (Address)codeVirt;
|
||||||
|
Address end = addr + size;
|
||||||
|
|
||||||
|
// Clean D-cache to PoU (Point of Unification)
|
||||||
|
for (Address va = addr; va < end; va += dcacheLineSize) {
|
||||||
|
__asm__ volatile ("dc cvau, %0" :: "r" (va) : "memory");
|
||||||
|
}
|
||||||
|
__asm__ volatile ("dsb ish" ::: "memory");
|
||||||
|
|
||||||
|
// Invalidate I-cache to PoU
|
||||||
|
for (Address va = addr; va < end; va += icacheLineSize) {
|
||||||
|
__asm__ volatile ("ic ivau, %0" :: "r" (va) : "memory");
|
||||||
|
}
|
||||||
|
__asm__ volatile ("dsb ish\nisb" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
static inline void CPUEnableMMU(Address l0PhysicalAddress) {
|
static inline void CPUEnableMMU(Address l0PhysicalAddress) {
|
||||||
// MAIR_EL1 (Memory Attribute Indirection Register)
|
// MAIR_EL1 (Memory Attribute Indirection Register)
|
||||||
// kPTENormalMem is index 0 and kPTEDeviceMem is index 1
|
// kPTENormalMem is index 0 and kPTEDeviceMem is index 1
|
||||||
|
|||||||
Reference in New Issue
Block a user