INFO: rcu detected stall in iterate_cleanup_work (2)

48 views
Skip to first unread message

syzbot

unread,
Jul 7, 2020, 3:23:21 AM7/7/20
to linux-...@vger.kernel.org, syzkall...@googlegroups.com, tg...@linutronix.de
Hello,

syzbot found the following crash on:

HEAD commit: cd77006e Merge tag 'hyperv-fixes-signed' of git://git.kern..
git tree: upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=103ff25b100000
kernel config: https://syzkaller.appspot.com/x/.config?x=7be693511b29b338
dashboard link: https://syzkaller.appspot.com/bug?extid=cc8495ea4052b9b79b72
compiler: gcc (GCC) 10.1.0-syz 20200507
syz repro: https://syzkaller.appspot.com/x/repro.syz?x=12e567e5100000
C reproducer: https://syzkaller.appspot.com/x/repro.c?x=126ffccd100000

IMPORTANT: if you fix the bug, please add the following tag to the commit:
Reported-by: syzbot+cc8495...@syzkaller.appspotmail.com

rcu: INFO: rcu_preempt detected stalls on CPUs/tasks:
rcu: 0-...!: (2 GPs behind) idle=89e/1/0x4000000000000000 softirq=9541/9542 fqs=10
(detected by 1, t=10502 jiffies, g=8501, q=157)
Sending NMI from CPU 1 to CPUs 0:
NMI backtrace for cpu 0
CPU: 0 PID: 3843 Comm: kworker/0:10 Not tainted 5.8.0-rc3-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Workqueue: events iterate_cleanup_work
RIP: 0010:check_memory_region_inline mm/kasan/generic.c:172 [inline]
RIP: 0010:check_memory_region+0x9/0x180 mm/kasan/generic.c:192
Code: 00 48 89 f2 be f5 00 00 00 e9 43 25 ff 01 0f 1f 00 48 89 f2 be f8 00 00 00 e9 33 25 ff 01 0f 1f 00 48 85 f6 0f 84 70 01 00 00 <49> 89 f9 41 54 44 0f b6 c2 49 01 f1 55 53 0f 82 18 01 00 00 48 b8
RSP: 0018:ffffc90000007b30 EFLAGS: 00000002
RAX: 0000000000000001 RBX: 0000000000000054 RCX: ffffffff8159836c
RDX: 0000000000000000 RSI: 0000000000000008 RDI: ffffffff8c58fa28
RBP: ffff8880995340c0 R08: 0000000000000000 R09: ffffffff8c58fa2f
R10: fffffbfff18b1f45 R11: 0000000000000001 R12: 0000000000000000
R13: ffff888099534a2a R14: ffff888099534a08 R15: 0000000000000003
FS: 0000000000000000(0000) GS:ffff8880ae600000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00000000004c9088 CR3: 0000000009a79000 CR4: 00000000001406f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
<IRQ>
instrument_atomic_read include/linux/instrumented.h:56 [inline]
test_bit include/asm-generic/bitops/instrumented-non-atomic.h:110 [inline]
hlock_class kernel/locking/lockdep.c:179 [inline]
mark_lock+0xbc/0x1710 kernel/locking/lockdep.c:3937
mark_usage kernel/locking/lockdep.c:3856 [inline]
__lock_acquire+0x1422/0x56e0 kernel/locking/lockdep.c:4334
lock_acquire+0x1f1/0xad0 kernel/locking/lockdep.c:4959
__raw_spin_lock_irq include/linux/spinlock_api_smp.h:128 [inline]
_raw_spin_lock_irq+0x5b/0x80 kernel/locking/spinlock.c:167
__run_hrtimer kernel/time/hrtimer.c:1524 [inline]
__hrtimer_run_queues+0x252/0xfc0 kernel/time/hrtimer.c:1584
hrtimer_interrupt+0x32a/0x930 kernel/time/hrtimer.c:1646
local_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1080 [inline]
__sysvec_apic_timer_interrupt+0x142/0x5e0 arch/x86/kernel/apic/apic.c:1097
asm_call_on_stack+0xf/0x20 arch/x86/entry/entry_64.S:711
</IRQ>
__run_on_irqstack arch/x86/include/asm/irq_stack.h:22 [inline]
run_on_irqstack_cond arch/x86/include/asm/irq_stack.h:48 [inline]
sysvec_apic_timer_interrupt+0xe0/0x120 arch/x86/kernel/apic/apic.c:1091
asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:596
RIP: 0010:__sanitizer_cov_trace_pc+0x55/0x60 kernel/kcov.c:207
Code: 82 e0 13 00 00 83 f8 02 75 20 48 8b 8a e8 13 00 00 8b 92 e4 13 00 00 48 8b 01 48 83 c0 01 48 39 c2 76 07 48 89 34 c1 48 89 01 <c3> 66 2e 0f 1f 84 00 00 00 00 00 41 55 41 54 49 89 fc 55 48 bd eb
RSP: 0018:ffffc90004a3fbc0 EFLAGS: 00000293
RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffffff866f7422
RDX: ffff8880995340c0 RSI: ffffffff866f742f RDI: 0000000000000001
RBP: dffffc0000000000 R08: 0000000000000001 R09: 0000000000000003
R10: 0000000000000000 R11: 0000000000000000 R12: ffffffff89a6fc80
R13: 0000000000007f07 R14: 0000000000000000 R15: 0000000000000001
get_next_corpse net/netfilter/nf_conntrack_core.c:2215 [inline]
nf_ct_iterate_cleanup+0x11f/0x330 net/netfilter/nf_conntrack_core.c:2259
nf_ct_iterate_cleanup_net net/netfilter/nf_conntrack_core.c:2344 [inline]
nf_ct_iterate_cleanup_net+0x113/0x170 net/netfilter/nf_conntrack_core.c:2329
iterate_cleanup_work+0x45/0x130 net/netfilter/nf_nat_masquerade.c:216
process_one_work+0x94c/0x1670 kernel/workqueue.c:2269
worker_thread+0x64c/0x1120 kernel/workqueue.c:2415
kthread+0x3b5/0x4a0 kernel/kthread.c:291
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:293
INFO: NMI handler (nmi_cpu_backtrace_handler) took too long to run: 0.000 msecs
rcu: rcu_preempt kthread starved for 10482 jiffies! g8501 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x402 ->cpu=1
rcu: Unless rcu_preempt kthread gets sufficient CPU time, OOM is now expected behavior.
rcu: RCU grace-period kthread stack dump:
rcu_preempt I29664 10 2 0x00004000
Call Trace:
context_switch kernel/sched/core.c:3453 [inline]
__schedule+0x8e1/0x1eb0 kernel/sched/core.c:4178
schedule+0xd0/0x2a0 kernel/sched/core.c:4253
schedule_timeout+0x148/0x250 kernel/time/timer.c:1897
rcu_gp_fqs_loop kernel/rcu/tree.c:1874 [inline]
rcu_gp_kthread+0xae5/0x1b50 kernel/rcu/tree.c:2044
kthread+0x3b5/0x4a0 kernel/kthread.c:291
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:293


---
This bug is generated by a bot. It may contain errors.
See https://goo.gl/tpsmEJ for more information about syzbot.
syzbot engineers can be reached at syzk...@googlegroups.com.

syzbot will keep track of this bug report. See:
https://goo.gl/tpsmEJ#status for how to communicate with syzbot.
syzbot can test patches for this bug, for details see:
https://goo.gl/tpsmEJ#testing-patches

Hillf Danton

unread,
Jul 7, 2020, 10:13:48 AM7/7/20
to syzbot, linux-...@vger.kernel.org, syzkall...@googlegroups.com, tg...@linutronix.de, Markus Elfring, Hillf Danton

Tue, 07 Jul 2020 00:23:19 -0700
CPU0 is spinning for spinlock with bh disabled.

> nf_ct_iterate_cleanup+0x11f/0x330 net/netfilter/nf_conntrack_core.c:2259
> nf_ct_iterate_cleanup_net net/netfilter/nf_conntrack_core.c:2344 [inline]
> nf_ct_iterate_cleanup_net+0x113/0x170 net/netfilter/nf_conntrack_core.c:2329
> iterate_cleanup_work+0x45/0x130 net/netfilter/nf_nat_masquerade.c:216
> process_one_work+0x94c/0x1670 kernel/workqueue.c:2269
> worker_thread+0x64c/0x1120 kernel/workqueue.c:2415
> kthread+0x3b5/0x4a0 kernel/kthread.c:291
> ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:293
> INFO: NMI handler (nmi_cpu_backtrace_handler) took too long to run: 0.000 msecs
> rcu: rcu_preempt kthread starved for 10482 jiffies! g8501 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x402 ->cpu=1
> rcu: Unless rcu_preempt kthread gets sufficient CPU time, OOM is now expected behavior.
> rcu: RCU grace-period kthread stack dump:
> rcu_preempt I29664 10 2 0x00004000
> Call Trace:
> context_switch kernel/sched/core.c:3453 [inline]
> __schedule+0x8e1/0x1eb0 kernel/sched/core.c:4178
> schedule+0xd0/0x2a0 kernel/sched/core.c:4253
> schedule_timeout+0x148/0x250 kernel/time/timer.c:1897

Have difficult time working out why local apic timer did not fire
on CPU1. Any light on that is appreciated.

syzbot

unread,
Jul 19, 2020, 7:18:08 AM7/19/20
to Markus....@web.de, da...@davemloft.net, hda...@sina.com, j...@mojatatu.com, ji...@resnulli.us, linux-...@vger.kernel.org, net...@vger.kernel.org, syzkall...@googlegroups.com, tg...@linutronix.de, viniciu...@intel.com, xiyou.w...@gmail.com
syzbot has bisected this issue to:

commit 5a781ccbd19e4664babcbe4b4ead7aa2b9283d22
Author: Vinicius Costa Gomes <viniciu...@intel.com>
Date: Sat Sep 29 00:59:43 2018 +0000

tc: Add support for configuring the taprio scheduler

bisection log: https://syzkaller.appspot.com/x/bisect.txt?x=141ed087100000
start commit: cd77006e Merge tag 'hyperv-fixes-signed' of git://git.kern..
git tree: upstream
console output: https://syzkaller.appspot.com/x/log.txt?x=121ed087100000
Reported-by: syzbot+cc8495...@syzkaller.appspotmail.com
Fixes: 5a781ccbd19e ("tc: Add support for configuring the taprio scheduler")

For information about bisection process see: https://goo.gl/tpsmEJ#bisection

Hillf Danton

unread,
Jul 19, 2020, 9:47:50 AM7/19/20
to syzbot, Markus....@web.de, da...@davemloft.net, hda...@sina.com, j...@mojatatu.com, ji...@resnulli.us, linux-...@vger.kernel.org, net...@vger.kernel.org, syzkall...@googlegroups.com, tg...@linutronix.de, viniciu...@intel.com, xiyou.w...@gmail.com

Sun, 19 Jul 2020 04:18:07 -0700
If it's unlikely to add any changes on the writer side, add spin
trylock on the seqcount's reader side to avoid livelock.

--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1985,10 +1985,30 @@ static bool nf_conntrack_get_tuple_skb(s
return true;
}

+static bool __nf_conntrack_lock(spinlock_t *lock, unsigned int seq)
+{
+ if (read_seqcount_retry(&nf_conntrack_generation, seq))
+ return false;
+ if (nf_conntrack_locks_all != false) {
+lock_all:
+ if (!spin_trylock(&nf_conntrack_locks_all_lock))
+ return false;
+ spin_lock(lock);
+ spin_unlock(&nf_conntrack_locks_all_lock);
+ return true;
+ } else {
+ spin_lock(lock);
+ if (nf_conntrack_locks_all == false)
+ return true;
+ spin_unlock(lock);
+ goto lock_all;
+ }
+}
+
/* Bring out ya dead! */
static struct nf_conn *
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
- void *data, unsigned int *bucket)
+ void *data, unsigned int *bucket, unsigned int seq)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
@@ -1998,7 +2018,8 @@ get_next_corpse(int (*iter)(struct nf_co
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
local_bh_disable();
- nf_conntrack_lock(lockp);
+ if (!__nf_conntrack_lock(lockp, seq))
+ goto next_bkt;
if (*bucket < nf_conntrack_htable_size) {
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -2009,8 +2030,11 @@ get_next_corpse(int (*iter)(struct nf_co
}
}
spin_unlock(lockp);
+next_bkt:
local_bh_enable();
cond_resched();
+ if (read_seqcount_retry(&nf_conntrack_generation, seq))
+ return NULL;
}

return NULL;
@@ -2032,7 +2056,7 @@ static void nf_ct_iterate_cleanup(int (*
for (;;) {
sequence = read_seqcount_begin(&nf_conntrack_generation);

- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+ while ((ct = get_next_corpse(iter, data, &bucket, sequence)) != NULL) {
/* Time to push up daises... */

nf_ct_delete(ct, portid, report);
@@ -2043,6 +2067,7 @@ static void nf_ct_iterate_cleanup(int (*
if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
break;
bucket = 0;
+ cond_resched();
}
}


Reply all
Reply to author
Forward
0 new messages