[PATCH 0/2] kasan: cleanups for kasan_enabled() checks

0 views
Skip to first unread message

Sabyrzhan Tasbolatov

unread,
Oct 9, 2025, 11:54:18 AM (5 days ago) Oct 9
to andre...@gmail.com, ryabin...@gmail.com, gli...@google.com, dvy...@google.com, vincenzo...@arm.com, ak...@linux-foundation.org, b...@redhat.com, christop...@csgroup.eu, rites...@gmail.com, snov...@gmail.com, kasa...@googlegroups.com, linux-...@vger.kernel.org, linu...@kvack.org
This patch series is the continuation of [1] the previous discussion
related to the KASAN internal refactoring.

Here we remove kasan_enabled() checks which are duplicated by higher callers.
These checks deduplication are also related to the separate patch series [2].

[1] https://lore.kernel.org/all/CA+fCnZce3AR+pUesbDkKMtMJ+iR8eDrcjFTbVpAcwjBoZ=gJ...@mail.gmail.com/
[2] https://lore.kernel.org/all/aNTfPjS2buXMI46D@MiWiFi-R3L-srv/

* Altered functions:

check_page_allocation
Delete the check because callers have it already in __wrappers in
include/linux/kasan.h:
__kasan_kfree_large
__kasan_mempool_poison_pages
__kasan_mempool_poison_object

kasan_populate_vmalloc, kasan_release_vmalloc
Add __wrappers in include/linux/kasan.h.
They are called externally in mm/vmalloc.c.

__kasan_unpoison_vmalloc, __kasan_poison_vmalloc
Delete checks because there're already kasan_enabled() checks
in respective __wrappers in include/linux/kasan.h.

release_free_meta -- Delete the check because the higher caller path
has it already. See the stack trace:

__kasan_slab_free -- has the check already
__kasan_mempool_poison_object -- has the check already
poison_slab_object
kasan_save_free_info
release_free_meta
kasan_enabled() -- Delete here

* Other mm/kasan/* functions with kasan_enabled()
where callers are defined in internal mm/kasan/kasan.h:

mm/kasan/generic.c:
kasan_check_range
check_region_inline
kasan_byte_accessible

mm/kasan/shadow.c:
kasan_poison
kasan_poison_last_granule

mm/kasan/kasan_test_c.c:
kasan_suite_init

== Tests:

* ARCH=um defconfig (-e KASAN, selects ARCH_DEFER_KASAN)
Compiled and run ./linux with no issue

* ARCH=powerpc ppc64le_defconfig (-e KASAN, selects ARCH_DEFER_KASAN)
Compiled and run qemu-system-ppc64 with no issue

* ARCH=arm64 defconfig (-e KASAN_GENERIC) and KUnit tests:

[ 4.065375] # kasan: pass:61 fail:1 skip:14 total:76
[ 4.065529] # Totals: pass:61 fail:1 skip:14 total:76
[ 4.065682] not ok 1 kasan

1 test is failing:

[ 3.772739] # kasan_strings: EXPECTATION FAILED at mm/kasan/kasan_test_c.c:1700
[ 3.772739] KASAN failure expected in "strscpy(ptr, src + KASAN_GRANULE_SIZE, KASAN_GRANULE_SIZE)", but none occurred

which is also reproducable in the main tree.

Sabyrzhan Tasbolatov (2):
kasan: remove __kasan_save_free_info wrapper
kasan: cleanup of kasan_enabled() checks

include/linux/kasan.h | 20 ++++++++++++++++++--
mm/kasan/common.c | 3 ---
mm/kasan/generic.c | 5 +----
mm/kasan/kasan.h | 7 +------
mm/kasan/shadow.c | 20 ++++----------------
mm/kasan/tags.c | 2 +-
6 files changed, 25 insertions(+), 32 deletions(-)

--
2.34.1

Sabyrzhan Tasbolatov

unread,
Oct 9, 2025, 11:54:20 AM (5 days ago) Oct 9
to andre...@gmail.com, ryabin...@gmail.com, gli...@google.com, dvy...@google.com, vincenzo...@arm.com, ak...@linux-foundation.org, b...@redhat.com, christop...@csgroup.eu, rites...@gmail.com, snov...@gmail.com, kasa...@googlegroups.com, linux-...@vger.kernel.org, linu...@kvack.org
We don't need a kasan_enabled() check in
kasan_save_free_info() at all. Both the higher level paths
(kasan_slab_free and kasan_mempool_poison_object) already contain this
check. Therefore, remove the __wrapper.

Signed-off-by: Sabyrzhan Tasbolatov <snov...@gmail.com>
Fixes: 1e338f4d99e6 ("kasan: introduce ARCH_DEFER_KASAN and unify static key across modes")
---
mm/kasan/generic.c | 2 +-
mm/kasan/kasan.h | 7 +------
mm/kasan/tags.c | 2 +-
3 files changed, 3 insertions(+), 8 deletions(-)

diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index b413c46b3e0..516b49accc4 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -573,7 +573,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
kasan_save_track(&alloc_meta->alloc_track, flags);
}

-void __kasan_save_free_info(struct kmem_cache *cache, void *object)
+void kasan_save_free_info(struct kmem_cache *cache, void *object)
{
struct kasan_free_meta *free_meta;

diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 07fa7375a84..fc9169a5476 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -399,12 +399,7 @@ void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack);
void kasan_save_track(struct kasan_track *track, gfp_t flags);
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);

-void __kasan_save_free_info(struct kmem_cache *cache, void *object);
-static inline void kasan_save_free_info(struct kmem_cache *cache, void *object)
-{
- if (kasan_enabled())
- __kasan_save_free_info(cache, object);
-}
+void kasan_save_free_info(struct kmem_cache *cache, void *object);

#ifdef CONFIG_KASAN_GENERIC
bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index b9f31293622..d65d48b85f9 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -142,7 +142,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
save_stack_info(cache, object, flags, false);
}

-void __kasan_save_free_info(struct kmem_cache *cache, void *object)
+void kasan_save_free_info(struct kmem_cache *cache, void *object)
{
save_stack_info(cache, object, 0, true);
}
--
2.34.1

Sabyrzhan Tasbolatov

unread,
Oct 9, 2025, 11:54:23 AM (5 days ago) Oct 9
to andre...@gmail.com, ryabin...@gmail.com, gli...@google.com, dvy...@google.com, vincenzo...@arm.com, ak...@linux-foundation.org, b...@redhat.com, christop...@csgroup.eu, rites...@gmail.com, snov...@gmail.com, kasa...@googlegroups.com, linux-...@vger.kernel.org, linu...@kvack.org
Deduplication of kasan_enabled() checks which are already used by callers.

* Altered functions:

check_page_allocation
Delete the check because callers have it already in __wrappers in
include/linux/kasan.h:
__kasan_kfree_large
__kasan_mempool_poison_pages
__kasan_mempool_poison_object

kasan_populate_vmalloc, kasan_release_vmalloc
Add __wrappers in include/linux/kasan.h.
They are called externally in mm/vmalloc.c.

__kasan_unpoison_vmalloc, __kasan_poison_vmalloc
Delete checks because there're already kasan_enabled() checks
in respective __wrappers in include/linux/kasan.h.

release_free_meta -- Delete the check because the higher caller path
has it already. See the stack trace:

__kasan_slab_free -- has the check already
__kasan_mempool_poison_object -- has the check already
poison_slab_object
kasan_save_free_info
release_free_meta
kasan_enabled() -- Delete here

Signed-off-by: Sabyrzhan Tasbolatov <snov...@gmail.com>
---
include/linux/kasan.h | 20 ++++++++++++++++++--
mm/kasan/common.c | 3 ---
mm/kasan/generic.c | 3 ---
mm/kasan/shadow.c | 20 ++++----------------
4 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d12e1a5f5a9..f335c1d7b61 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -571,11 +571,27 @@ static inline void kasan_init_hw_tags(void) { }
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)

void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+ unsigned long size, gfp_t gfp_mask)
+{
+ if (kasan_enabled())
+ return __kasan_populate_vmalloc(addr, size, gfp_mask);
+ return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags)
+{
+ if (kasan_enabled())
+ return __kasan_release_vmalloc(start, end, free_region_start,
+ free_region_end, flags);
+}

#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d4c14359fea..22e5d67ff06 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -305,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,

static inline bool check_page_allocation(void *ptr, unsigned long ip)
{
- if (!kasan_enabled())
- return false;
-
if (ptr != page_address(virt_to_head_page(ptr))) {
kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
return true;
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 516b49accc4..2b8e73f5f6a 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -506,9 +506,6 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)

static void release_free_meta(const void *object, struct kasan_free_meta *meta)
{
- if (!kasan_enabled())
- return;
-
/* Check if free meta is valid. */
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return;
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 5d2a876035d..cf842b620a2 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -354,7 +354,7 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask
return 0;
}

-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
+static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
{
unsigned long nr_pages, nr_total = PFN_UP(end - start);
struct vmalloc_populate_data data;
@@ -403,14 +403,11 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_
return ret;
}

-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
{
unsigned long shadow_start, shadow_end;
int ret;

- if (!kasan_enabled())
- return 0;
-
if (!is_vmalloc_or_module_addr((void *)addr))
return 0;

@@ -432,7 +429,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mas
shadow_start = PAGE_ALIGN_DOWN(shadow_start);
shadow_end = PAGE_ALIGN(shadow_end);

- ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
+ ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
if (ret)
return ret;

@@ -574,7 +571,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
* pages entirely covered by the free region, we will not run in to any
* trouble - any simultaneous allocations will be for disjoint regions.
*/
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
unsigned long free_region_end,
unsigned long flags)
@@ -583,9 +580,6 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;

- if (!kasan_enabled())
- return;
-
region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);

@@ -634,9 +628,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
* with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
*/

- if (!kasan_enabled())
- return (void *)start;
-
if (!is_vmalloc_or_module_addr(start))
return (void *)start;

@@ -659,9 +650,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
*/
void __kasan_poison_vmalloc(const void *start, unsigned long size)
{
- if (!kasan_enabled())
- return;
-
if (!is_vmalloc_or_module_addr(start))
return;

--
2.34.1

Reply all
Reply to author
Forward
0 new messages