The previous page table mode dectection performed a risky operation: clearing the page talbe in memory before disabling MMU in CSR satp. This approach works fine on systems like QEMU, SPIKE or most CPUs, which have a TLB that caches PTEs. The TLB retains the cached address translation state until explicitly clearing by sfence.vma instruction.
However, on systems without TLB, such as OpenXiangShan/NEMU, clearing page table immediately affects virtual address traslation, causing the system would trap into endless page fault exception.
This patch modifies the detection procedure inspired by Linux:it immediately clears the satp register right after setting it. This patch also remove some redundant page table clearing operation.
Signed-off-by: "Xu, Zefan" <
ceba_...@outlook.com>
---
.../riscv/cpu/generic/cpu_mmu_initial_pgtbl.c | 27 +++++++++++--------
1 file changed, 16 insertions(+), 11 deletions(-)
diff --git a/arch/riscv/cpu/generic/cpu_mmu_initial_pgtbl.c b/arch/riscv/cpu/generic/cpu_mmu_initial_pgtbl.c
index 4b471736..64e646da 100644
--- a/arch/riscv/cpu/generic/cpu_mmu_initial_pgtbl.c
+++ b/arch/riscv/cpu/generic/cpu_mmu_initial_pgtbl.c
@@ -258,7 +258,7 @@ void __attribute__ ((section(".entry")))
{
#ifdef CONFIG_64BIT
u32 i, index;
- unsigned long satp;
+ unsigned long satp, hw_satp;
arch_pte_t *pgtbl =
(arch_pte_t *)to_load_pa((virtual_addr_t)&stage1_pgtbl_root);
@@ -280,20 +280,18 @@ void __attribute__ ((section(".entry")))
pgtbl[index] |= PGTBL_PTE_VALID_MASK;
satp = (unsigned long)pgtbl >> PGTBL_PAGE_SIZE_SHIFT;
satp |= SATP_MODE_SV57 << SATP_MODE_SHIFT;
+
+ /* Set and clear satp */
__sfence_vma_all();
csr_write(CSR_SATP, satp);
- if ((csr_read(CSR_SATP) >> SATP_MODE_SHIFT) == SATP_MODE_SV57) {
+ hw_satp = csr_swap(CSR_SATP, 0);
+ __sfence_vma_all();
+
+ if ((hw_satp >> SATP_MODE_SHIFT) == SATP_MODE_SV57) {
riscv_stage1_mode = SATP_MODE_SV57;
goto skip_sv48_test;
}
- /* Cleanup and disable MMU */
- for (i = 0; i < PGTBL_ROOT_ENTCNT; i++) {
- pgtbl[i] = 0x0ULL;
- }
- csr_write(CSR_SATP, 0);
- __sfence_vma_all();
-
/* Clear page table memory */
for (i = 0; i < PGTBL_ROOT_ENTCNT; i++) {
pgtbl[i] = 0x0ULL;
@@ -312,17 +310,24 @@ void __attribute__ ((section(".entry")))
pgtbl[index] |= PGTBL_PTE_VALID_MASK;
satp = (unsigned long)pgtbl >> PGTBL_PAGE_SIZE_SHIFT;
satp |= SATP_MODE_SV48 << SATP_MODE_SHIFT;
+
+ /* Set and clear satp */
__sfence_vma_all();
csr_write(CSR_SATP, satp);
- if ((csr_read(CSR_SATP) >> SATP_MODE_SHIFT) == SATP_MODE_SV48) {
+ hw_satp = csr_swap(CSR_SATP, 0);
+ __sfence_vma_all();
+
+ if ((hw_satp >> SATP_MODE_SHIFT) == SATP_MODE_SV48) {
riscv_stage1_mode = SATP_MODE_SV48;
}
skip_sv48_test:
- /* Cleanup and disable MMU */
+ /* Clear page table memory */
for (i = 0; i < PGTBL_ROOT_ENTCNT; i++) {
pgtbl[i] = 0x0ULL;
}
+
+ /* Disable MMU */
csr_write(CSR_SATP, 0);
__sfence_vma_all();
#endif
--
2.43.0