panic: WAaRcNqIuNirGi:n g SPbLl NoOcT kLaObWlEReE Ds lOeNe pSY SloCcAkL Lwi 2 t15h7 s79p3i4n4l5o0c kEX IorT 0c rai

1 view
Skip to first unread message

syzbot

unread,
Jan 18, 2020, 2:07:10 AM1/18/20
to syzkaller-o...@googlegroups.com
Hello,

syzbot found the following crash on:

HEAD commit: b6410ec9 Fix usage: domain is not optional with start comm..
git tree: openbsd
console output: https://syzkaller.appspot.com/x/log.txt?x=1760f959e00000
kernel config: https://syzkaller.appspot.com/x/.config?x=bf87b6915a88cd0d
dashboard link: https://syzkaller.appspot.com/bug?extid=6e269594ae7f3818d0e9

Unfortunately, I don't have any reproducer for this crash yet.

IMPORTANT: if you fix the bug, please add the following tag to the commit:
Reported-by: syzbot+6e2695...@syzkaller.appspotmail.com

login: panic: WAaRcNqIuNirGi:n g SPbLl NoOcT kLaObWlEReE Ds lOeNe pSY SloCcAkL Lwi 2 t15h7 s79p3i4n4l5o0c kEX IorT 0c rai
t
iStopped at savectx+0xb1: movl $0,%gs:0x530
TID PID UID PRFLAGS PFLAGS CPU COMMAND
*210227 3166 0 0x2 0 0 syz-executor.1
348508 13235 0 0x12 0 1 sshd
savectx() at savectx+0xb1
end of kernel
end trace frame: 0x7f7ffffc6a50, count: 14
https://www.openbsd.org/ddb.html describes the minimum info required in bug
reports. Insufficient info makes it difficult to find and fix bugs.
ddb{0}>
ddb{0}> set $lines = 0
ddb{0}> set $maxwidth = 0
ddb{0}> show panic
acquiring blockable sleep lock with spinlock or critical section held (kernel_lock) &kernel_lock
ddb{0}> trace
savectx() at savectx+0xb1
end of kernel
end trace frame: 0x7f7ffffc6a50, count: -1
ddb{0}> show registers
rdi 0
rsi 0
rbp 0xffff800022da9db0
rbx 0
rdx 0xffff800020ad69f8
rcx 0
rax 0x3a
r8 0xffffffff818f616f kprintf+0x16f
r9 0x1
r10 0x25
r11 0xfc4633967e4c1b70
r12 0
r13 0
r14 0xffff800020ad69f8
r15 0
rip 0xffffffff81eea3f1 savectx+0xb1
cs 0x8
rflags 0x46
rsp 0xffff800022da9d30
ss 0x10
savectx+0xb1: movl $0,%gs:0x530
ddb{0}> show proc
PROC (syz-executor.1) pid=210227 stat=onproc
flags process=2<EXEC> proc=0
pri=57, usrpri=57, nice=20
forw=0xffffffffffffffff, list=0xffff800020ad6290,0xffff800020ad6ef8
process=0xffff800020af4380 user=0xffff800022da4000, vmspace=0xfffffd807f000a10
estcpu=36, cpticks=1, pctcpu=0.4
user=0, sys=1, intr=0
ddb{0}> ps
PID TID PPID UID S FLAGS WAIT COMMAND
76774 234294 3166 0 2 0 syz-executor.1
* 3166 210227 31580 0 7 0x2 syz-executor.1
57348 136453 31580 0 3 0x2 biowait syz-executor.0
31580 461086 45755 0 3 0x82 thrsleep syz-fuzzer
31580 49968 45755 0 3 0x4000082 nanosleep syz-fuzzer
31580 272184 45755 0 3 0x4000082 thrsleep syz-fuzzer
31580 230596 45755 0 3 0x4000082 thrsleep syz-fuzzer
31580 39504 45755 0 3 0x4000082 thrsleep syz-fuzzer
31580 86352 45755 0 2 0x4000082 syz-fuzzer
31580 188532 45755 0 3 0x4000082 thrsleep syz-fuzzer
31580 261832 45755 0 3 0x4000082 thrsleep syz-fuzzer
31580 147840 45755 0 3 0x4000082 thrsleep syz-fuzzer
31580 18987 45755 0 2 0x4000002 syz-fuzzer
45755 92764 13235 0 3 0x10008a pause ksh
13235 348508 73319 0 7 0x12 sshd
37576 167007 1 0 3 0x100083 ttyin getty
73319 63108 1 0 3 0x80 select sshd
93397 150514 74699 74 3 0x100092 bpf pflogd
74699 149558 1 0 3 0x80 netio pflogd
29636 72074 28747 73 3 0x100090 kqread syslogd
28747 119773 1 0 3 0x100082 netio syslogd
98942 434619 1 77 3 0x100090 poll dhclient
79894 313183 1 0 3 0x80 poll dhclient
67970 414989 0 0 2 0x14200 zerothread
3171 9150 0 0 3 0x14200 aiodoned aiodoned
72129 199524 0 0 3 0x14200 syncer update
25412 165980 0 0 3 0x14200 cleaner cleaner
61707 38587 0 0 3 0x14200 reaper reaper
93768 391269 0 0 3 0x14200 pgdaemon pagedaemon
95484 163400 0 0 3 0x14200 bored crynlk
46082 385368 0 0 3 0x14200 bored crypto
38622 120205 0 0 3 0x40014200 acpi0 acpi0
13123 190228 0 0 3 0x40014200 idle1
37826 311148 0 0 2 0x14200 softnet
52772 128701 0 0 3 0x14200 bored systqmp
51482 359344 0 0 3 0x14200 bored systq
29868 19403 0 0 3 0x40014200 bored softclock
43009 404929 0 0 3 0x40014200 idle0
54686 258696 0 0 3 0x14200 bored smr
1 386027 0 0 3 0x82 wait init
0 0 -1 0 3 0x10200 scheduler swapper
ddb{0}> show all locks
CPU 0:
exclusive sched_lock &sched_lock r = 1 (0xffffffff826567a8)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 wakeup_n+0x37 sys/kern/kern_synch.c:569
#2 uvm_pmr_getpages+0xd80 sys/uvm/uvm_pmemrange.c:1172
#3 uvm_pglistalloc+0x362 sys/uvm/uvm_page.c:790
#4 uvm_km_kmemalloc_pla+0x238 sys/uvm/uvm_km.c:366
#5 uvm_uarea_alloc+0x51 sys/uvm/uvm_glue.c:274
#6 fork1+0x295 sys/kern/kern_fork.c:365
#7 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#7 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#8 Xsyscall+0x128
exclusive mutex &uvm.fpageqlock r = 0 (0xffffffff82628fd0)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 mtx_enter+0x4b sys/kern/kern_lock.c:266
#3 uvm_pmr_getpages+0xd2c sys/uvm/uvm_pmemrange.c:1170
#4 uvm_pglistalloc+0x362 sys/uvm/uvm_page.c:790
#5 uvm_km_kmemalloc_pla+0x238 sys/uvm/uvm_km.c:366
#6 uvm_uarea_alloc+0x51 sys/uvm/uvm_glue.c:274
#7 fork1+0x295 sys/kern/kern_fork.c:365
#8 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#8 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#9 Xsyscall+0x128
CPU 1:
exclusive mutex mbufpl r = 0 (0xffffffff82652d78)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 pool_cache_get+0xa3 pl_enter_try sys/kern/subr_pool.c:109 [inline]
#2 pool_cache_get+0xa3 pool_list_enter sys/kern/subr_pool.c:1797 [inline]
#2 pool_cache_get+0xa3 pool_cache_list_alloc sys/kern/subr_pool.c:1814 [inline]
#2 pool_cache_get+0xa3 sys/kern/subr_pool.c:1887
#3 pool_get+0x91 sys/kern/subr_pool.c:572
#4 m_copym+0x174 m_get sys/kern/uipc_mbuf.c:250 [inline]
#4 m_copym+0x174 sys/kern/uipc_mbuf.c:667
#5 tcp_output+0x15ba sys/netinet/tcp_output.c:673
#6 tcp_usrreq+0xa55
#7 sosend+0x671 sys/kern/uipc_socket.c:549
#8 dofilewritev+0x1b7 sys/kern/sys_generic.c:364
#9 sys_write+0x83 sys/kern/sys_generic.c:284
#10 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#10 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#11 Xsyscall+0x128
Process 3166 (syz-executor.1) thread 0xffff800020ad69f8 (210227)
exclusive kernel_lock &kernel_lock r = 0 (0xffffffff82655880)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 syscall+0x400 mi_syscall sys/sys/syscall_mi.h:83 [inline]
#1 syscall+0x400 sys/arch/amd64/amd64/trap.c:570
#2 Xsyscall+0x128
exclusive sched_lock &sched_lock r = 1 (0xffffffff826567a8)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 wakeup_n+0x37 sys/kern/kern_synch.c:569
#2 uvm_pmr_getpages+0xd80 sys/uvm/uvm_pmemrange.c:1172
#3 uvm_pglistalloc+0x362 sys/uvm/uvm_page.c:790
#4 uvm_km_kmemalloc_pla+0x238 sys/uvm/uvm_km.c:366
#5 uvm_uarea_alloc+0x51 sys/uvm/uvm_glue.c:274
#6 fork1+0x295 sys/kern/kern_fork.c:365
#7 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#7 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#8 Xsyscall+0x128
exclusive mutex &uvm.fpageqlock r = 0 (0xffffffff82628fd0)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 mtx_enter+0x4b sys/kern/kern_lock.c:266
#3 uvm_pmr_getpages+0xd2c sys/uvm/uvm_pmemrange.c:1170
#4 uvm_pglistalloc+0x362 sys/uvm/uvm_page.c:790
#5 uvm_km_kmemalloc_pla+0x238 sys/uvm/uvm_km.c:366
#6 uvm_uarea_alloc+0x51 sys/uvm/uvm_glue.c:274
#7 fork1+0x295 sys/kern/kern_fork.c:365
#8 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#8 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#9 Xsyscall+0x128
Process 57348 (syz-executor.0) thread 0xffff800020ad6290 (136453)
exclusive rrwlock inode r = 0 (0xfffffd80653e6920)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 rw_enter+0x453 sys/kern/kern_rwlock.c:309
#2 rrw_enter+0x88 sys/kern/kern_rwlock.c:453
#3 VOP_LOCK+0xf9 sys/kern/vfs_vops.c:615
#4 vn_lock+0x81 sys/kern/vfs_vnops.c:571
#5 vget+0x1c8 sys/kern/vfs_subr.c:671
#6 ufs_ihashget+0x141 sys/ufs/ufs/ufs_ihash.c:119
#7 ffs_vget+0x74 sys/ufs/ffs/ffs_vfsops.c:1323
#8 ufs_lookup+0x14b7 sys/ufs/ufs/ufs_lookup.c:487
#9 VOP_LOOKUP+0x5b sys/kern/vfs_vops.c:91
#10 vfs_lookup+0x7a6 sys/kern/vfs_lookup.c:568
#11 namei+0x63c sys/kern/vfs_lookup.c:249
#12 dounlinkat+0x99 sys/kern/vfs_syscalls.c:1776
#13 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#13 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#14 Xsyscall+0x128
exclusive rrwlock inode r = 0 (0xfffffd806ac935e8)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 rw_enter+0x453 sys/kern/kern_rwlock.c:309
#2 rrw_enter+0x88 sys/kern/kern_rwlock.c:453
#3 VOP_LOCK+0xf9 sys/kern/vfs_vops.c:615
#4 vn_lock+0x81 sys/kern/vfs_vnops.c:571
#5 vfs_lookup+0xe6 sys/kern/vfs_lookup.c:419
#6 namei+0x63c sys/kern/vfs_lookup.c:249
#7 dounlinkat+0x99 sys/kern/vfs_syscalls.c:1776
#8 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#8 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#9 Xsyscall+0x128
Process 13235 (sshd) thread 0xffff800020ad9b38 (348508)
exclusive rwlock netlock r = 0 (0xffffffff824e9568)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 solock+0x5a sys/kern/uipc_socket2.c:282
#2 sosend+0x559 sys/kern/uipc_socket.c:537
#3 dofilewritev+0x1b7 sys/kern/sys_generic.c:364
#4 sys_write+0x83 sys/kern/sys_generic.c:284
#5 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#5 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:570
#6 Xsyscall+0x128
ddb{0}> show malloc
Type InUse MemUse HighUse Limit Requests Type Lim
devbuf 9478 6401K 6592K 78643K 10629 0
pcb 13 8K 8K 78643K 27 0
rtable 105 3K 3K 78643K 211 0
ifaddr 49 11K 11K 78643K 59 0
counters 39 33K 33K 78643K 39 0
ioctlops 0 0K 4K 78643K 1473 0
iov 0 0K 12K 78643K 8 0
mount 1 1K 1K 78643K 1 0
vnodes 1218 77K 77K 78643K 1236 0
UFS quota 1 32K 32K 78643K 1 0
UFS mount 5 36K 36K 78643K 5 0
shm 2 1K 1K 78643K 2 0
VM map 2 1K 1K 78643K 2 0
sem 5 0K 0K 78643K 6 0
dirhash 12 2K 2K 78643K 12 0
ACPI 1809 196K 290K 78643K 12766 0
file desc 5 13K 25K 78643K 64 0
proc 60 63K 95K 78643K 444 0
subproc 32 2K 2K 78643K 34 0
NFS srvsock 1 0K 0K 78643K 1 0
NFS daemon 1 16K 16K 78643K 1 0
ip_moptions 0 0K 0K 78643K 4 0
in_multi 33 2K 2K 78643K 33 0
ether_multi 1 0K 0K 78643K 1 0
ISOFS mount 1 32K 32K 78643K 1 0
MSDOSFS mount 1 16K 16K 78643K 1 0
ttys 31 148K 148K 78643K 31 0
exec 0 0K 1K 78643K 209 0
pagedep 1 8K 8K 78643K 1 0
inodedep 1 32K 32K 78643K 1 0
newblk 1 0K 0K 78643K 1 0
VM swap 7 26K 26K 78643K 7 0
UVM amap 113 54K 54K 78643K 1127 0
UVM aobj 4 2K 2K 78643K 4 0
memdesc 1 4K 4K 78643K 1 0
crypto data 1 1K 1K 78643K 1 0
NDP 7 0K 0K 78643K 12 0
temp 74 3012K 3076K 78643K 4201 0
kqueue 3 4K 6K 78643K 4 0
SYN cache 2 16K 16K 78643K 2 0
ddb{0}> show all pools
Name Size Requests Fail Releases Pgreq Pgrel Npage Hiwat Minpg Maxpg Idle
arp 64 6 0 0 1 0 1 1 0 8 0
plcache 128 20 0 0 1 0 1 1 0 8 0
rtpcb 80 21 0 19 1 0 1 1 0 8 0
rtentry 112 45 0 1 2 0 2 2 0 8 0
unpcb 120 111 0 101 1 0 1 1 0 8 0
syncache 264 4 0 4 1 1 0 1 0 8 0
tcpqe 32 150 0 150 1 0 1 1 0 8 1
tcpcb 544 12 0 8 1 0 1 1 0 8 0
inpcb 280 98 0 91 2 0 2 2 0 8 1
nd6 48 4 0 0 1 0 1 1 0 8 0
pffrag 232 4 0 4 1 0 1 1 0 482 1
pffrnode 88 4 0 4 1 0 1 1 0 8 1
pffrent 40 54 0 54 1 0 1 1 0 8 1
pfosfp 40 846 0 423 5 0 5 5 0 8 0
pfosfpen 112 1428 0 714 21 0 21 21 0 8 0
pfstitem 24 17 0 0 1 0 1 1 0 8 0
pfstkey 112 17 0 0 1 0 1 1 0 8 0
pfstate 328 17 0 0 2 0 2 2 0 8 0
pfrule 1360 21 0 16 2 1 1 2 0 8 0
art_heap8 4096 1 0 0 1 0 1 1 0 8 0
art_heap4 256 212 0 0 14 0 14 14 0 8 0
art_table 32 213 0 0 2 0 2 2 0 8 0
art_node 16 44 0 4 1 0 1 1 0 8 0
sysvmsgpl 40 2 0 2 1 0 1 1 0 8 1
semupl 112 1 0 1 1 0 1 1 0 8 1
semapl 112 4 0 1 1 0 1 1 0 8 0
shmpl 112 2 0 0 1 0 1 1 0 8 0
dirhash 1024 17 0 0 3 0 3 3 0 8 0
dino1pl 128 1476 0 67 46 0 46 46 0 8 0
ffsino 272 1476 0 67 95 0 95 95 0 8 0
nchpl 144 1752 0 132 61 0 61 61 0 8 0
uvmvnodes 72 1534 0 0 28 0 28 28 0 8 0
vnodes 208 1534 0 0 81 0 81 81 0 8 0
namei 1024 4548 0 4548 1 0 1 1 0 8 1
percpumem 16 30 0 0 1 0 1 1 0 8 0
vcpupl 1984 3 0 0 1 0 1 1 0 8 0
vmpool 560 3 0 0 1 0 1 1 0 8 0
scxspl 192 4955 0 4954 8 1 7 7 0 8 6
plimitpl 152 19 0 11 1 0 1 1 0 8 0
sigapl 432 263 0 248 3 0 3 3 0 8 1
futexpl 56 633 0 633 1 0 1 1 0 8 1
knotepl 112 54 0 35 1 0 1 1 0 8 0
kqueuepl 104 8 0 6 1 0 1 1 0 8 0
pipelkpl 48 88 0 78 1 0 1 1 0 8 0
pipepl 120 176 0 157 1 0 1 1 0 8 0
fdescpl 496 264 0 248 3 0 3 3 0 8 0
filepl 152 1473 0 1373 5 0 5 5 0 8 1
lockfpl 104 23 0 22 1 0 1 1 0 8 0
lockfspl 48 8 0 7 1 0 1 1 0 8 0
sessionpl 112 18 0 7 1 0 1 1 0 8 0
pgrppl 48 18 0 7 1 0 1 1 0 8 0
ucredpl 96 71 0 62 1 0 1 1 0 8 0
zombiepl 144 248 0 248 1 0 1 1 0 8 1
processpl 896 279 0 248 4 0 4 4 0 8 0
procpl 632 363 0 323 5 0 5 5 0 8 1
sockpl 400 231 0 212 5 0 5 5 0 8 3
mcl64k 65536 2 0 0 1 0 1 1 0 8 0
mcl16k 16384 1 0 0 1 0 1 1 0 8 0
mcl8k 8192 3 0 0 1 0 1 1 0 8 0
mcl4k 4096 6 0 0 1 0 1 1 0 8 0
mcl2k2 2112 1 0 0 1 0 1 1 0 8 0
mcl2k 2048 162 0 0 20 0 20 20 0 8 0
mtagpl 80 2 0 0 1 0 1 1 0 8 0
mbufpl 256 196 0 0 12 0 12 12 0 8 0
bufpl 280 6381 0 1340 361 0 361 361 0 8 0
anonpl 16 39525 0 25246 60 1 59 59 0 125 1
amapchunkpl 152 1225 0 1090 7 0 7 7 0 158 1
amappl16 192 991 0 227 40 0 40 40 0 8 1
amappl15 184 58 0 54 1 0 1 1 0 8 0
amappl14 176 30 0 28 1 0 1 1 0 8 0
amappl13 168 11 0 9 1 0 1 1 0 8 0
amappl12 160 13 0 9 2 1 1 1 0 8 0
amappl11 152 84 0 67 1 0 1 1 0 8 0
amappl10 144 17 0 11 1 0 1 1 0 8 0
amappl9 136 619 0 614 1 0 1 1 0 8 0
amappl8 128 129 0 108 1 0 1 1 0 8 0
amappl7 120 106 0 93 1 0 1 1 0 8 0
amappl6 112 69 0 63 1 0 1 1 0 8 0
amappl5 104 133 0 118 1 0 1 1 0 8 0
amappl4 96 509 0 479 1 0 1 1 0 8 0
amappl3 88 113 0 105 1 0 1 1 0 8 0
amappl2 80 1199 0 1133 3 1 2 3 0 8 0
amappl1 72 15966 0 15526 26 13 13 21 0 8 3
amappl 80 609 0 564 2 0 2 2 0 84 0
dma4096 4096 1 0 1 1 1 0 1 0 8 0
dma256 256 6 0 6 1 1 0 1 0 8 0
dma128 128 253 0 253 1 1 0 1 0 8 0
dma64 64 6 0 6 1 1 0 1 0 8 0
dma32 32 7 0 7 1 1 0 1 0 8 0
dma16 16 18 0 17 1 0 1 1 0 8 0
aobjpl 64 3 0 0 1 0 1 1 0 8 0
uaddrrnd 24 267 0 248 1 0 1 1 0 8 0
uaddrbest 32 2 0 0 1 0 1 1 0 8 0
uaddr 24 267 0 248 1 0 1 1 0 8 0
vmmpekpl 168 5985 0 5955 2 0 2 2 0 8 0
vmmpepl 168 39904 0 37983 120 8 112 112 0 357 24
vmsppl 368 266 0 248 2 0 2 2 0 8 0
pdppl 4096 541 0 499 6 0 6 6 0 8 0
pvpl 32 138594 0 121243 150 1 149 149 0 265 7
pmappl 232 266 0 248 2 0 2 2 0 8 0
extentpl 40 46 0 29 1 0 1 1 0 8 0
phpool 112 168 0 3 5 0 5 5 0 8 0


---
This bug is generated by a bot. It may contain errors.
See https://goo.gl/tpsmEJ for more information about syzbot.
syzbot engineers can be reached at syzk...@googlegroups.com.

syzbot will keep track of this bug report. See:
https://goo.gl/tpsmEJ#status for how to communicate with syzbot.

Anton Lindqvist

unread,
Jan 18, 2020, 3:34:55 AM1/18/20
to syzbot, syzkaller-o...@googlegroups.com
#syz invalid
Reply all
Reply to author
Forward
0 new messages