[PATCH 1/3] riscv: mm: Rename new_vmalloc into new_valid_map_cpus

0 views
Skip to first unread message

Vivian Wang

unread,
Mar 1, 2026, 9:21:51 PMMar 1
to Paul Walmsley, Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Alexander Potapenko, Marco Elver, Dmitry Vyukov, linux...@lists.infradead.org, linux-...@vger.kernel.org, kasa...@googlegroups.com, Palmer Dabbelt, Vivian Wang
In preparation of a future patch using this mechanism for non-vmalloc
mappings, rename new_vmalloc into new_valid_map_cpus to avoid misleading
readers.

No functional change intended.

Signed-off-by: Vivian Wang <wangr...@iscas.ac.cn>
---
arch/riscv/include/asm/cacheflush.h | 6 +++---
arch/riscv/kernel/entry.S | 38 ++++++++++++++++++-------------------
arch/riscv/mm/init.c | 2 +-
3 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 0092513c3376..b6d1a5eb7564 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -41,7 +41,7 @@ do { \
} while (0)

#ifdef CONFIG_64BIT
-extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+extern u64 new_valid_map_cpus[NR_CPUS / sizeof(u64) + 1];
extern char _end[];
#define flush_cache_vmap flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
@@ -54,8 +54,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
* the only place this can happen is in handle_exception() where
* an sfence.vma is emitted.
*/
- for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i)
- new_vmalloc[i] = -1ULL;
+ for (i = 0; i < ARRAY_SIZE(new_valid_map_cpus); ++i)
+ new_valid_map_cpus[i] = -1ULL;
}
}
#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 60eb221296a6..e57a0f550860 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -20,44 +20,44 @@

.section .irqentry.text, "ax"

-.macro new_vmalloc_check
+.macro new_valid_map_cpus_check
REG_S a0, TASK_TI_A0(tp)
csrr a0, CSR_CAUSE
/* Exclude IRQs */
- blt a0, zero, .Lnew_vmalloc_restore_context_a0
+ blt a0, zero, .Lnew_valid_map_cpus_restore_context_a0

REG_S a1, TASK_TI_A1(tp)
- /* Only check new_vmalloc if we are in page/protection fault */
+ /* Only check new_valid_map_cpus if we are in page/protection fault */
li a1, EXC_LOAD_PAGE_FAULT
- beq a0, a1, .Lnew_vmalloc_kernel_address
+ beq a0, a1, .Lnew_valid_map_cpus_kernel_address
li a1, EXC_STORE_PAGE_FAULT
- beq a0, a1, .Lnew_vmalloc_kernel_address
+ beq a0, a1, .Lnew_valid_map_cpus_kernel_address
li a1, EXC_INST_PAGE_FAULT
- bne a0, a1, .Lnew_vmalloc_restore_context_a1
+ bne a0, a1, .Lnew_valid_map_cpus_restore_context_a1

-.Lnew_vmalloc_kernel_address:
+.Lnew_valid_map_cpus_kernel_address:
/* Is it a kernel address? */
csrr a0, CSR_TVAL
- bge a0, zero, .Lnew_vmalloc_restore_context_a1
+ bge a0, zero, .Lnew_valid_map_cpus_restore_context_a1

/* Check if a new vmalloc mapping appeared that could explain the trap */
REG_S a2, TASK_TI_A2(tp)
/*
* Computes:
- * a0 = &new_vmalloc[BIT_WORD(cpu)]
+ * a0 = &new_valid_map_cpus[BIT_WORD(cpu)]
* a1 = BIT_MASK(cpu)
*/
lw a2, TASK_TI_CPU(tp)
/*
- * Compute the new_vmalloc element position:
+ * Compute the new_valid_map_cpus element position:
* (cpu / 64) * 8 = (cpu >> 6) << 3
*/
srli a1, a2, 6
slli a1, a1, 3
- la a0, new_vmalloc
+ la a0, new_valid_map_cpus
add a0, a0, a1
/*
- * Compute the bit position in the new_vmalloc element:
+ * Compute the bit position in the new_valid_map_cpus element:
* bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6
* = cpu - ((cpu >> 6) << 3) << 3
*/
@@ -67,12 +67,12 @@
li a2, 1
sll a1, a2, a1

- /* Check the value of new_vmalloc for this cpu */
+ /* Check the value of new_valid_map_cpus for this cpu */
REG_L a2, 0(a0)
and a2, a2, a1
- beq a2, zero, .Lnew_vmalloc_restore_context
+ beq a2, zero, .Lnew_valid_map_cpus_restore_context

- /* Atomically reset the current cpu bit in new_vmalloc */
+ /* Atomically reset the current cpu bit in new_valid_map_cpus */
amoxor.d a0, a1, (a0)

/* Only emit a sfence.vma if the uarch caches invalid entries */
@@ -84,11 +84,11 @@
csrw CSR_SCRATCH, x0
sret

-.Lnew_vmalloc_restore_context:
+.Lnew_valid_map_cpus_restore_context:
REG_L a2, TASK_TI_A2(tp)
-.Lnew_vmalloc_restore_context_a1:
+.Lnew_valid_map_cpus_restore_context_a1:
REG_L a1, TASK_TI_A1(tp)
-.Lnew_vmalloc_restore_context_a0:
+.Lnew_valid_map_cpus_restore_context_a0:
REG_L a0, TASK_TI_A0(tp)
.endm

@@ -144,7 +144,7 @@ SYM_CODE_START(handle_exception)
* could "miss" the new mapping and traps: in that case, we only need
* to retry the access, no sfence.vma is required.
*/
- new_vmalloc_check
+ new_valid_map_cpus_check
#endif

REG_S sp, TASK_TI_KERNEL_SP(tp)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 811e03786c56..9922c22a2a5f 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -37,7 +37,7 @@

#include "../kernel/head.h"

-u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
+u64 new_valid_map_cpus[NR_CPUS / sizeof(u64) + 1];

struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map);

--
2.52.0

Alexander Potapenko

unread,
Mar 2, 2026, 10:41:46 AMMar 2
to Vivian Wang, Paul Walmsley, Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Marco Elver, Dmitry Vyukov, linux...@lists.infradead.org, linux-...@vger.kernel.org, kasa...@googlegroups.com, Palmer Dabbelt
On Mon, Mar 2, 2026 at 3:21 AM Vivian Wang <wangr...@iscas.ac.cn> wrote:
>
> In preparation of a future patch using this mechanism for non-vmalloc
> mappings, rename new_vmalloc into new_valid_map_cpus to avoid misleading
> readers.
>
> No functional change intended.
>
> Signed-off-by: Vivian Wang <wangr...@iscas.ac.cn>
> ---
> arch/riscv/include/asm/cacheflush.h | 6 +++---
> arch/riscv/kernel/entry.S | 38 ++++++++++++++++++-------------------
> arch/riscv/mm/init.c | 2 +-
> 3 files changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
> index 0092513c3376..b6d1a5eb7564 100644
> --- a/arch/riscv/include/asm/cacheflush.h
> +++ b/arch/riscv/include/asm/cacheflush.h
> @@ -41,7 +41,7 @@ do { \
> } while (0)
>
> #ifdef CONFIG_64BIT
> -extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1];
> +extern u64 new_valid_map_cpus[NR_CPUS / sizeof(u64) + 1];

new_valid_map_cpus is a bitmap, right? If so, you are allocating 8x
more memory than needed.
Can we use DECLARE_BITMAP instead?

Vivian Wang

unread,
Mar 2, 2026, 9:11:33 PMMar 2
to Alexander Potapenko, Paul Walmsley, Palmer Dabbelt, Albert Ou, Alexandre Ghiti, Marco Elver, Dmitry Vyukov, linux...@lists.infradead.org, linux-...@vger.kernel.org, kasa...@googlegroups.com, Palmer Dabbelt
I hadn't considered changing since this series is supposed to be just a
fix, but that is a good point.

I'll reorganize this in v2 to include a fix to use DECLARE_BITMAP, and
also use bitmap operations for the marking operation. But I'll leave
that out of consideration for stable backport, maybe along with the
renaming.

Thanks,
Vivian "dramforever" Wang

Reply all
Reply to author
Forward
0 new messages