The quilt patch titled
Subject: kasan: make kasan_record_aux_stack_noalloc() the default behaviour
has been removed from the -mm tree. Its filename was
kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour.patch
This patch was dropped because it was merged into the mm-stable branch
of git://
git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------
From: Peter Zijlstra <
pet...@infradead.org>
Subject: kasan: make kasan_record_aux_stack_noalloc() the default behaviour
Date: Fri, 22 Nov 2024 16:54:51 +0100
kasan_record_aux_stack_noalloc() was introduced to record a stack trace
without allocating memory in the process. It has been added to callers
which were invoked while a raw_spinlock_t was held. More and more callers
were identified and changed over time. Is it a good thing to have this
while functions try their best to do a locklessly setup? The only
downside of having kasan_record_aux_stack() not allocate any memory is
that we end up without a stacktrace if stackdepot runs out of memory and
at the same stacktrace was not recorded before To quote Marco Elver from
https://lore.kernel.org/all/CANpmjNPmQYJ7pv1N3cuU8cP1...@mail.gmail.com/
| I'd be in favor, it simplifies things. And stack depot should be
| able to replenish its pool sufficiently in the "non-aux" cases
| i.e. regular allocations. Worst case we fail to record some
| aux stacks, but I think that's only really bad if there's a bug
| around one of these allocations. In general the probabilities
| of this being a regression are extremely small [...]
Make the kasan_record_aux_stack_noalloc() behaviour default as
kasan_record_aux_stack().
[
big...@linutronix.de: dressed the diff as patch]
Link:
https://lkml.kernel.org/r/20241122155...@linutronix.de
Fixes: 7cb3007ce2da ("kasan: generic: introduce kasan_record_aux_stack_noalloc()")
Signed-off-by: Peter Zijlstra (Intel) <
pet...@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <
big...@linutronix.de>
Reported-by:
syzbot+39f85d...@syzkaller.appspotmail.com
Closes:
https://lore.kernel.org/all/67275485.050a022...@google.com
Reviewed-by: Andrey Konovalov <
andre...@gmail.com>
Reviewed-by: Marco Elver <
el...@google.com>
Reviewed-by: Waiman Long <
lon...@redhat.com>
Cc: Alexander Potapenko <
gli...@google.com>
Cc: Andrey Ryabinin <
ryabin...@gmail.com>
Cc: Ben Segall <
bse...@google.com>
Cc: Boqun Feng <
boqun...@gmail.com>
Cc: Christoph Lameter <
c...@linux.com>
Cc: David Rientjes <
rien...@google.com>
Cc: Dietmar Eggemann <
dietmar....@arm.com>
Cc: Dmitry Vyukov <
dvy...@google.com>
Cc: Frederic Weisbecker <
fred...@kernel.org>
Cc: Hyeonggon Yoo <
42.h...@gmail.com>
Cc: Ingo Molnar <
mi...@redhat.com>
Cc: Jann Horn <
ja...@google.com>
Cc: Joel Fernandes (Google) <
jo...@joelfernandes.org>
Cc: Joonsoo Kim <
iamjoon...@lge.com>
Cc: Josh Triplett <
jo...@joshtriplett.org>
Cc: Juri Lelli <
juri....@redhat.com>
Cc: <
kasa...@googlegroups.com>
Cc: Lai Jiangshan <
jiangs...@gmail.com>
Cc: Liam R. Howlett <Liam.H...@Oracle.com>
Cc: Lorenzo Stoakes <
lorenzo...@oracle.com>
Cc: Mathieu Desnoyers <
mathieu....@efficios.com>
Cc: Mel Gorman <
mgo...@suse.de>
Cc: Neeraj Upadhyay <
neeraj....@kernel.org>
Cc: Paul E. McKenney <
pau...@kernel.org>
Cc: Pekka Enberg <
pen...@kernel.org>
Cc: Roman Gushchin <
roman.g...@linux.dev>
Cc: Steven Rostedt <
ros...@goodmis.org>
Cc:
syzkall...@googlegroups.com
Cc: Tejun Heo <
t...@kernel.org>
Cc: Thomas Gleixner <
tg...@linutronix.de>
Cc: Uladzislau Rezki (Sony) <
ure...@gmail.com>
Cc: Valentin Schneider <
vsch...@redhat.com>
Cc: Vincent Guittot <
vincent...@linaro.org>
Cc: Vincenzo Frascino <
vincenzo...@arm.com>
Cc: Vlastimil Babka <
vba...@suse.cz>
Cc: Zqiang <
qiang.z...@gmail.com>
Signed-off-by: Andrew Morton <
ak...@linux-foundation.org>
---
include/linux/kasan.h | 2 --
include/linux/task_work.h | 3 ---
kernel/irq_work.c | 2 +-
kernel/rcu/tiny.c | 2 +-
kernel/rcu/tree.c | 4 ++--
kernel/sched/core.c | 2 +-
kernel/task_work.c | 14 +-------------
kernel/workqueue.c | 2 +-
mm/kasan/generic.c | 18 ++++++------------
mm/slub.c | 2 +-
10 files changed, 14 insertions(+), 37 deletions(-)
--- a/include/linux/kasan.h~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/include/linux/kasan.h
@@ -491,7 +491,6 @@ void kasan_cache_create(struct kmem_cach
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
-void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
@@ -509,7 +508,6 @@ static inline void kasan_cache_create(st
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
-static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
#endif /* CONFIG_KASAN_GENERIC */
--- a/include/linux/task_work.h~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/include/linux/task_work.h
@@ -19,9 +19,6 @@ enum task_work_notify_mode {
TWA_SIGNAL,
TWA_SIGNAL_NO_IPI,
TWA_NMI_CURRENT,
-
- TWA_FLAGS = 0xff00,
- TWAF_NO_ALLOC = 0x0100,
};
static inline bool task_work_pending(struct task_struct *task)
--- a/kernel/irq_work.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/kernel/irq_work.c
@@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *
if (!irq_work_claim(work))
return false;
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
preempt_disable();
if (cpu != smp_processor_id()) {
--- a/kernel/rcu/tiny.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/kernel/rcu/tiny.c
@@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head)
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
__kvfree_call_rcu(head, ptr);
}
--- a/kernel/rcu/tree.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/kernel/rcu/tree.c
@@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head,
}
head->func = func;
head->next = NULL;
- kasan_record_aux_stack_noalloc(head);
+ kasan_record_aux_stack(head);
local_irq_save(flags);
rdp = this_cpu_ptr(&rcu_data);
lazy = lazy_in && !rcu_async_should_hurry();
@@ -3817,7 +3817,7 @@ void kvfree_call_rcu(struct rcu_head *he
return;
}
- kasan_record_aux_stack_noalloc(ptr);
+ kasan_record_aux_stack(ptr);
success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
if (!success) {
run_page_cache_worker(krcp);
--- a/kernel/sched/core.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/kernel/sched/core.c
@@ -10590,7 +10590,7 @@ void task_tick_mm_cid(struct rq *rq, str
return;
/* No page allocation under rq lock */
- task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+ task_work_add(curr, work, TWA_RESUME);
}
void sched_mm_cid_exit_signals(struct task_struct *t)
--- a/kernel/task_work.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/kernel/task_work.c
@@ -55,26 +55,14 @@ int task_work_add(struct task_struct *ta
enum task_work_notify_mode notify)
{
struct callback_head *head;
- int flags = notify & TWA_FLAGS;
- notify &= ~TWA_FLAGS;
if (notify == TWA_NMI_CURRENT) {
if (WARN_ON_ONCE(task != current))
return -EINVAL;
if (!IS_ENABLED(CONFIG_IRQ_WORK))
return -EINVAL;
} else {
- /*
- * Record the work call stack in order to print it in KASAN
- * reports.
- *
- * Note that stack allocation can fail if TWAF_NO_ALLOC flag
- * is set and new page is needed to expand the stack buffer.
- */
- if (flags & TWAF_NO_ALLOC)
- kasan_record_aux_stack_noalloc(work);
- else
- kasan_record_aux_stack(work);
+ kasan_record_aux_stack(work);
}
head = READ_ONCE(task->task_works);
--- a/kernel/workqueue.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/kernel/workqueue.c
@@ -2180,7 +2180,7 @@ static void insert_work(struct pool_work
debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */
- kasan_record_aux_stack_noalloc(work);
+ kasan_record_aux_stack(work);
/* we own @work, set data and link */
set_work_pwq(work, pwq, extra_flags);
--- a/mm/kasan/generic.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/mm/kasan/generic.c
@@ -524,7 +524,11 @@ size_t kasan_metadata_size(struct kmem_c
sizeof(struct kasan_free_meta) : 0);
}
-static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
+/*
+ * This function avoids dynamic memory allocations and thus can be called from
+ * contexts that do not allow allocating memory.
+ */
+void kasan_record_aux_stack(void *addr)
{
struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache;
@@ -541,17 +545,7 @@ static void __kasan_record_aux_stack(voi
return;
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
- alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
-}
-
-void kasan_record_aux_stack(void *addr)
-{
- return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
-}
-
-void kasan_record_aux_stack_noalloc(void *addr)
-{
- return __kasan_record_aux_stack(addr, 0);
+ alloc_meta->aux_stack[0] = kasan_save_stack(0, 0);
}
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
--- a/mm/slub.c~kasan-make-kasan_record_aux_stack_noalloc-the-default-behaviour
+++ a/mm/slub.c
@@ -2311,7 +2311,7 @@ bool slab_free_hook(struct kmem_cache *s
* We have to do this manually because the rcu_head is
* not located inside the object.
*/
- kasan_record_aux_stack_noalloc(x);
+ kasan_record_aux_stack(x);
delayed_free->object = x;
call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
_
Patches currently in -mm which might be from
pet...@infradead.org are
x86-disable-execmem_rox-support.patch