panic: WpAoRoNlI_NcGa:c hSeP_Li tNeOmT_ mLagOWiEcR_EchDe cONk :S YmSbCuAfLLp l 2c p-1u 18f5re4e56 5li2s6 tE XmIodT if0 i

1 view
Skip to first unread message

syzbot

unread,
Dec 26, 2019, 10:57:12 PM12/26/19
to syzkaller-o...@googlegroups.com
Hello,

syzbot found the following crash on:

HEAD commit: fa51c99a Pass correct value into iterator callback for tim..
git tree: openbsd
console output: https://syzkaller.appspot.com/x/log.txt?x=141aba49e00000
kernel config: https://syzkaller.appspot.com/x/.config?x=bf87b6915a88cd0d
dashboard link: https://syzkaller.appspot.com/bug?extid=2c737d948aa3d0cc0003

Unfortunately, I don't have any reproducer for this crash yet.

IMPORTANT: if you fix the bug, please add the following tag to the commit:
Reported-by: syzbot+2c737d...@syzkaller.appspotmail.com

panic: WpAoRoNlI_NcGa:c hSeP_Li tNeOmT_ mLagOWiEcR_EchDe cONk :S
YmSbCuAfLLp l 2c p-1u 18f5re4e56 5li2s6 tE XmIodT if0 iae
d
: Stopped at savectx+0xb1: movl $0,%gs:0x530
TID PID UID PRFLAGS PFLAGS CPU COMMAND
*300051 42796 0 0x2 0 0 syz-executor.1
118095 43878 0 0x12 0 1 sshd
savectx() at savectx+0xb1
end of kernel
end trace frame: 0x7f7ffffc5530, count: 14
https://www.openbsd.org/ddb.html describes the minimum info required in bug
reports. Insufficient info makes it difficult to find and fix bugs.
ddb{0}>
ddb{0}> set $lines = 0
ddb{0}> set $maxwidth = 0
ddb{0}> show panic
pool_cache_item_magic_check: mbufpl cpu free list modified: item addr
0xfffffd8064bbbf00+16 0x0!=0x90f3a74d470844a2
ddb{0}> trace
savectx() at savectx+0xb1
end of kernel
end trace frame: 0x7f7ffffc5530, count: -1
ddb{0}> show registers
rdi 0
rsi 0
rbp 0xffff800022da91c0
rbx 0
rdx 0xffff800020aa0018
rcx 0
rax 0x3b
r8 0xffffffff8162ae5f kprintf+0x16f
r9 0x1
r10 0x25
r11 0x9f00815c5e61d645
r12 0
r13 0
r14 0xffff800020aa0018
r15 0
rip 0xffffffff820853f1 savectx+0xb1
cs 0x8
rflags 0x46
rsp 0xffff800022da9140
ss 0x10
savectx+0xb1: movl $0,%gs:0x530
ddb{0}> show proc
PROC (syz-executor.1) pid=300051 stat=onproc
flags process=2<EXEC> proc=0
pri=81, usrpri=81, nice=20
forw=0xffffffffffffffff, list=0xffff800020aa0c70,0xffff800020aa1170
process=0xffff800020aa2e30 user=0xffff800022da4000,
vmspace=0xfffffd806e7885c8
estcpu=36, cpticks=1, pctcpu=1.21
user=0, sys=1, intr=0
ddb{0}> ps
PID TID PPID UID S FLAGS WAIT COMMAND
36998 374812 42796 0 2 0 syz-executor.1
20898 349739 80627 0 3 0x2 biowait syz-executor.0
14903 510880 0 0 3 0x14200 bored sosplice
*42796 300051 80627 0 7 0x2 syz-executor.1
80627 204901 62171 0 3 0x82 thrsleep syz-fuzzer
80627 266116 62171 0 3 0x4000082 nanosleep syz-fuzzer
80627 82646 62171 0 3 0x4000082 thrsleep syz-fuzzer
80627 189096 62171 0 3 0x4000082 thrsleep syz-fuzzer
80627 491447 62171 0 3 0x4000082 thrsleep syz-fuzzer
80627 133505 62171 0 3 0x4000082 kqread syz-fuzzer
80627 39285 62171 0 3 0x4000082 thrsleep syz-fuzzer
80627 19457 62171 0 3 0x4000082 thrsleep syz-fuzzer
80627 451305 62171 0 3 0x4000082 nanosleep syz-fuzzer
80627 57884 62171 0 3 0x4000082 thrsleep syz-fuzzer
62171 15190 43878 0 3 0x10008a pause ksh
43878 118095 19185 0 7 0x12 sshd
90915 361885 1 0 3 0x100083 ttyin getty
19185 410781 1 0 3 0x80 select sshd
69789 486971 27464 74 3 0x100092 bpf pflogd
27464 487334 1 0 3 0x80 netio pflogd
33137 294248 50303 73 3 0x100090 kqread syslogd
50303 347176 1 0 3 0x100082 netio syslogd
65384 316226 1 77 3 0x100090 poll dhclient
75343 429494 1 0 3 0x80 poll dhclient
20138 469736 0 0 3 0x14200 pgzero zerothread
45071 324233 0 0 3 0x14200 aiodoned aiodoned
96140 190635 0 0 3 0x14200 syncer update
46290 410261 0 0 3 0x14200 cleaner cleaner
91880 263297 0 0 3 0x14200 reaper reaper
43463 373145 0 0 3 0x14200 pgdaemon pagedaemon
97514 33686 0 0 3 0x14200 bored crynlk
95218 505715 0 0 3 0x14200 bored crypto
9520 135446 0 0 3 0x40014200 acpi0 acpi0
54702 462034 0 0 3 0x40014200 idle1
61172 442000 0 0 3 0x14200 bored softnet
61513 422971 0 0 3 0x14200 bored systqmp
72903 30635 0 0 3 0x14200 bored systq
98955 318542 0 0 3 0x40014200 bored softclock
56973 197462 0 0 3 0x40014200 idle0
56643 43436 0 0 3 0x14200 bored smr
1 366343 0 0 3 0x82 wait init
0 0 -1 0 3 0x10200 scheduler swapper
ddb{0}> show all locks
CPU 0:
exclusive mutex vmmpekpl r = 0 (0xffffffff8264d468)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 mtx_enter+0x4b sys/kern/kern_lock.c:266
#3 pool_get+0xbf sys/kern/subr_pool.c:578
#4 uvm_mapent_alloc+0x394 sys/uvm/uvm_map.c:1741
#5 uvm_map+0x2a5 sys/uvm/uvm_map.c:1224
#6 uvm_km_kmemalloc_pla+0x11d sys/uvm/uvm_km.c:334
#7 uvm_uarea_alloc+0x51 sys/uvm/uvm_glue.c:274
#8 fork1+0x295 sys/kern/kern_fork.c:366
#9 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#9 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:555
#10 Xsyscall+0x128
Process 20898 (syz-executor.0) thread 0xffff800020aa09f8 (349739)
exclusive rrwlock inode r = 0 (0xfffffd806eb17a28)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 rw_enter+0x453 sys/kern/kern_rwlock.c:309
#2 rrw_enter+0x88 sys/kern/kern_rwlock.c:453
#3 VOP_LOCK+0xf9 sys/kern/vfs_vops.c:615
#4 vn_lock+0x81 sys/kern/vfs_vnops.c:571
#5 vget+0x1c8 sys/kern/vfs_subr.c:672
#6 ufs_ihashget+0x141 sys/ufs/ufs/ufs_ihash.c:119
#7 ffs_vget+0x74 sys/ufs/ffs/ffs_vfsops.c:1323
#8 ufs_lookup+0x14b4 sys/ufs/ufs/ufs_lookup.c:487
#9 VOP_LOOKUP+0x5b sys/kern/vfs_vops.c:91
#10 vfs_lookup+0x7a6 sys/kern/vfs_lookup.c:568
#11 namei+0x63c sys/kern/vfs_lookup.c:249
#12 dounlinkat+0x99 sys/kern/vfs_syscalls.c:1776
#13 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#13 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:555
#14 Xsyscall+0x128
exclusive rrwlock inode r = 0 (0xfffffd806d8d8928)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 rw_enter+0x453 sys/kern/kern_rwlock.c:309
#2 rrw_enter+0x88 sys/kern/kern_rwlock.c:453
#3 VOP_LOCK+0xf9 sys/kern/vfs_vops.c:615
#4 vn_lock+0x81 sys/kern/vfs_vnops.c:571
#5 vfs_lookup+0xe6 sys/kern/vfs_lookup.c:419
#6 namei+0x63c sys/kern/vfs_lookup.c:249
#7 dounlinkat+0x99 sys/kern/vfs_syscalls.c:1776
#8 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#8 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:555
#9 Xsyscall+0x128
Process 42796 (syz-executor.1) thread 0xffff800020aa0018 (300051)
exclusive kernel_lock &kernel_lock r = 0 (0xffffffff82633630)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 syscall+0x400 mi_syscall sys/sys/syscall_mi.h:83 [inline]
#1 syscall+0x400 sys/arch/amd64/amd64/trap.c:555
#2 Xsyscall+0x128
exclusive mutex vmmpekpl r = 0 (0xffffffff8264d468)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 mtx_enter+0x4b sys/kern/kern_lock.c:266
#3 pool_get+0xbf sys/kern/subr_pool.c:578
#4 uvm_mapent_alloc+0x394 sys/uvm/uvm_map.c:1741
#5 uvm_map+0x2a5 sys/uvm/uvm_map.c:1224
#6 uvm_km_kmemalloc_pla+0x11d sys/uvm/uvm_km.c:334
#7 uvm_uarea_alloc+0x51 sys/uvm/uvm_glue.c:274
#8 fork1+0x295 sys/kern/kern_fork.c:366
#9 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#9 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:555
#10 Xsyscall+0x128
ddb{0}> show malloc
Type InUse MemUse HighUse Limit Requests Type Lim
devbuf 9523 7060K 7574K 78643K 11462 0
pcb 13 8K 8K 78643K 100 0
rtable 108 4K 4K 78643K 406 0
ifaddr 81 16K 17K 78643K 142 0
counters 39 33K 33K 78643K 39 0
ioctlops 0 0K 4K 78643K 1491 0
iov 0 0K 48K 78643K 83 0
mount 1 1K 1K 78643K 1 0
vnodes 1217 76K 77K 78643K 1652 0
UFS quota 1 32K 32K 78643K 1 0
UFS mount 5 36K 36K 78643K 5 0
shm 2 1K 5K 78643K 7 0
VM map 2 1K 1K 78643K 2 0
sem 12 0K 1K 78643K 102 0
dirhash 12 2K 2K 78643K 12 0
ACPI 1809 196K 290K 78643K 12766 0
file desc 5 13K 25K 78643K 451 0
sigio 0 0K 0K 78643K 34 0
proc 61 63K 83K 78643K 592 0
subproc 32 2K 2K 78643K 68 0
NFS srvsock 1 0K 0K 78643K 1 0
NFS daemon 1 16K 16K 78643K 1 0
ip_moptions 0 0K 0K 78643K 20 0
in_multi 71 3K 3K 78643K 107 0
ether_multi 1 0K 0K 78643K 4 0
mrt 0 0K 0K 78643K 6 0
ISOFS mount 1 32K 32K 78643K 1 0
MSDOSFS mount 1 16K 16K 78643K 1 0
ttys 67 307K 307K 78643K 67 0
exec 0 0K 1K 78643K 286 0
pagedep 1 8K 8K 78643K 1 0
inodedep 1 32K 32K 78643K 1 0
newblk 1 0K 0K 78643K 1 0
VM swap 7 26K 26K 78643K 7 0
UVM amap 126 55K 55K 78643K 2707 0
UVM aobj 44 2K 2K 78643K 46 0
memdesc 1 4K 4K 78643K 1 0
crypto data 1 1K 1K 78643K 1 0
ip6_options 0 0K 0K 78643K 102 0
NDP 12 0K 0K 78643K 31 0
temp 135 3027K 3658K 78643K 6042 0
kqueue 0 0K 0K 78643K 4 0
SYN cache 2 16K 16K 78643K 2 0
ddb{0}> show all pools
Name Size Requests Fail Releases Pgreq Pgrel Npage Hiwat Minpg Maxpg
Idle
arp 64 13 0 7 1 0 1 1 0
8 0
plcache 128 20 0 0 1 0 1 1 0
8 0
rtpcb 80 52 0 50 1 0 1 1 0
8 0
rtentry 112 81 0 37 2 0 2 2 0
8 0
unpcb 120 299 0 289 1 0 1 1 0
8 0
syncache 264 6 0 6 2 2 0 1 0
8 0
tcpqe 32 77 0 77 1 1 0 1 0
8 0
tcpcb 544 177 0 173 1 0 1 1 0
8 0
inpcb 280 685 0 678 7 5 2 3 0
8 1
rttmr 72 3 0 3 2 1 1 1 0
8 1
nd6 48 9 0 5 1 0 1 1 0
8 0
pkpcb 40 1 0 1 1 1 0 1 0
8 0
ppxss 1128 3 0 3 3 2 1 1 0
8 1
pffrag 232 10 0 10 4 4 0 1 0
482 0
pffrnode 88 10 0 10 4 4 0 1 0
8 0
pffrent 40 365 0 365 4 4 0 1 0
8 0
pfosfp 40 846 0 423 5 0 5 5 0
8 0
pfosfpen 112 1428 0 714 21 0 21 21 0
8 0
pfstitem 24 49 0 29 1 0 1 1 0
8 0
pfstkey 112 49 0 29 1 0 1 1 0
8 0
pfstate 328 49 0 27 3 0 3 3 0
8 0
pfrule 1360 21 0 16 2 1 1 2 0
8 0
art_heap8 4096 1 0 0 1 0 1 1 0
8 0
art_heap4 256 391 0 174 17 3 14 15 0
8 0
art_table 32 392 0 174 2 0 2 2 0
8 0
art_node 16 80 0 40 1 0 1 1 0
8 0
sysvmsgpl 40 60 0 37 1 0 1 1 0
8 0
semupl 112 3 0 3 1 1 0 1 0
8 0
semapl 112 100 0 90 1 0 1 1 0
8 0
shmpl 112 44 0 2 2 0 2 2 0
8 0
dirhash 1024 17 0 0 3 0 3 3 0
8 0
dino1pl 128 2238 0 832 46 0 46 46 0
8 0
ffsino 272 2238 0 832 94 0 94 94 0
8 0
nchpl 144 3117 0 1511 61 0 61 61 0
8 0
uvmvnodes 72 2518 0 0 46 0 46 46 0
8 0
vnodes 208 2518 0 0 133 0 133 133 0
8 0
namei 1024 10303 0 10303 2 1 1 1 0
8 1
percpumem 16 30 0 0 1 0 1 1 0
8 0
vcpupl 1984 4 0 0 1 0 1 1 0
8 0
vmpool 560 6 0 2 1 0 1 1 0
8 0
scxspl 192 8605 0 8604 12 11 1 7 0
8 0
plimitpl 152 47 0 39 1 0 1 1 0
8 0
sigapl 432 644 0 629 3 1 2 3 0
8 0
futexpl 56 10622 0 10622 1 0 1 1 0
8 1
knotepl 112 129 0 110 1 0 1 1 0
8 0
kqueuepl 104 93 0 91 1 0 1 1 0
8 0
pipepl 112 398 0 379 2 1 1 2 0
8 0
fdescpl 488 645 0 629 3 0 3 3 0
8 0
filepl 152 5668 0 5568 14 8 6 7 0
8 2
lockfpl 104 116 0 115 1 0 1 1 0
8 0
lockfspl 48 47 0 46 1 0 1 1 0
8 0
sessionpl 112 20 0 9 1 0 1 1 0
8 0
pgrppl 48 24 0 13 1 0 1 1 0
8 0
ucredpl 96 1483 0 1474 1 0 1 1 0
8 0
zombiepl 144 629 0 629 1 0 1 1 0
8 1
processpl 904 661 0 629 4 0 4 4 0
8 0
procpl 632 1832 0 1791 6 1 5 5 0
8 1
srpgc 64 2 0 2 1 1 0 1 0
8 0
sosppl 128 7 0 7 2 2 0 1 0
8 0
sockpl 384 1065 0 1046 11 7 4 6 0
8 2
mcl64k 65536 258 0 0 33 0 33 33 0
8 1
mcl16k 16384 3 0 0 1 0 1 1 0
8 0
mcl12k 12288 2 0 0 1 0 1 1 0
8 0
mcl9k 9216 3 0 0 1 0 1 1 0
8 0
mcl8k 8192 9 0 0 2 0 2 2 0
8 0
mcl4k 4096 15 0 0 2 0 2 2 0
8 0
mcl2k2 2112 3 0 0 1 0 1 1 0
8 0
mcl2k 2048 171 0 0 21 0 21 21 0
8 0
mtagpl 80 23 0 0 1 0 1 1 0
8 0
mbufpl 256 421 0 0 24 0 24 24 0
8 0
bufpl 280 7386 0 1354 431 0 431 431 0
8 0
anonpl 16 89020 0 69701 96 16 80 95 0
125 0
amapchunkpl 152 4618 0 4460 18 10 8 14 0
158 0
amappl16 192 3372 0 2322 77 23 54 65 0
8 1
amappl15 184 156 0 152 1 0 1 1 0
8 0
amappl14 176 39 0 37 1 0 1 1 0
8 0
amappl12 160 118 0 111 1 0 1 1 0
8 0
amappl11 152 263 0 245 1 0 1 1 0
8 0
amappl10 144 18 0 13 1 0 1 1 0
8 0
amappl9 136 629 0 626 1 0 1 1 0
8 0
amappl8 128 142 0 115 1 0 1 1 0
8 0
amappl7 120 123 0 111 1 0 1 1 0
8 0
amappl6 112 247 0 241 1 0 1 1 0
8 0
amappl5 104 255 0 239 1 0 1 1 0
8 0
amappl4 96 875 0 845 1 0 1 1 0
8 0
amappl3 88 167 0 159 1 0 1 1 0
8 0
amappl2 80 4208 0 4143 3 1 2 3 0
8 0
amappl1 72 24582 0 24140 25 14 11 21 0
8 0
amappl 80 2117 0 2068 2 0 2 2 0
84 0
dma4096 4096 1 0 1 1 1 0 1 0
8 0
dma256 256 6 0 6 1 1 0 1 0
8 0
dma128 128 253 0 253 1 1 0 1 0
8 0
dma64 64 6 0 6 1 1 0 1 0
8 0
dma32 32 7 0 7 1 1 0 1 0
8 0
dma16 16 18 0 17 1 0 1 1 0
8 0
aobjpl 64 45 0 2 1 0 1 1 0
8 0
uaddrrnd 24 651 0 631 1 0 1 1 0
8 0
uaddrbest 32 2 0 0 1 0 1 1 0
8 0
uaddr 24 651 0 631 1 0 1 1 0
8 0
vmmpekpl 168 9542 0 9507 2 0 2 2 0
8 0
vmmpepl 168 89561 0 87344 171 52 119 140 0 357
14
vmsppl 368 650 0 631 2 0 2 2 0
8 0
pdppl 4096 1309 0 1266 6 0 6 6 0
8 0
pvpl 32 260477 0 238030 223 34 189 221 0
265 3
pmappl 232 650 0 631 3 1 2 2 0
8 0
extentpl 40 46 0 29 1 0 1 1 0
8 0
phpool 112 217 0 7 6 0 6 6 0
8 0


---
This bug is generated by a bot. It may contain errors.
See https://goo.gl/tpsmEJ for more information about syzbot.
syzbot engineers can be reached at syzk...@googlegroups.com.

syzbot will keep track of this bug report. See:
https://goo.gl/tpsmEJ#status for how to communicate with syzbot.

Greg Steuck

unread,
Dec 26, 2019, 11:41:53 PM12/26/19
to syzbot, syzkaller-o...@googlegroups.com
#syz dup: pool: cpu free list modifiedmbufpl

--
You received this message because you are subscribed to the Google Groups "syzkaller-openbsd-bugs" group.
To unsubscribe from this group and stop receiving emails from it, send an email to syzkaller-openbsd...@googlegroups.com.
To view this discussion on the web visit https://groups.google.com/d/msgid/syzkaller-openbsd-bugs/000000000000fede67059aa77cb6%40google.com.
Reply all
Reply to author
Forward
0 new messages