uvm_fault: _bus_dmamap_load_mbuf

0 views
Skip to first unread message

syzbot

unread,
Dec 16, 2019, 10:46:09 PM12/16/19
to syzkaller-o...@googlegroups.com
Hello,

syzbot found the following crash on:

HEAD commit: e749f6f8 Remove some more show_*_head() functions. Compile..
git tree: openbsd
console output: https://syzkaller.appspot.com/x/log.txt?x=125569b6e00000
kernel config: https://syzkaller.appspot.com/x/.config?x=26ca0a9c07f16a3a
dashboard link: https://syzkaller.appspot.com/bug?extid=49a5043c95abbd492dff

Unfortunately, I don't have any reproducer for this crash yet.

IMPORTANT: if you fix the bug, please add the following tag to the commit:
Reported-by: syzbot+49a504...@syzkaller.appspotmail.com

" "� ~�t�� π ��H�D^�" "� ~�t�� π ��H�D^�uvm_fault(0xffffffff82531700,
0xfffffd0000000018, 0, 1) -> e
kernel: page fault trap, code=0
Stopped at _bus_dmamap_load_mbuf+0xb5: movl 0x18(%r15),%r12d
ddb{1}>
ddb{1}> set $lines = 0
ddb{1}> set $maxwidth = 0
ddb{1}> show panic
kernel page fault
uvm_fault(0xffffffff82531700, 0xfffffd0000000018, 0, 1) -> e
_bus_dmamap_load_mbuf(ffffffff82468ef0,ffff80000024ec00,fffffd8065e21700,401)
at
_bus_dmamap_load_mbuf+0xb5 sys/arch/amd64/amd64/bus_dma.c:217
end trace frame: 0xffff800020a475d0, count: 0
ddb{1}> trace
_bus_dmamap_load_mbuf(ffffffff82468ef0,ffff80000024ec00,fffffd8065e21700,401)
at
_bus_dmamap_load_mbuf+0xb5 sys/arch/amd64/amd64/bus_dma.c:217
vio_start(ffff8000001712a8) at vio_start+0x28e vio_encap
sys/dev/pv/if_vio.c:1170 [inline]
vio_start(ffff8000001712a8) at vio_start+0x28e sys/dev/pv/if_vio.c:774
if_qstart_compat(ffff800000171520) at if_qstart_compat+0x36 sys/net/if.c:685
ifq_serialize(ffff800000171520,ffff800000171630) at ifq_serialize+0x173
sys/net/ifq.c:108
taskq_thread(ffff800000023080) at taskq_thread+0x9c sys/kern/kern_task.c:368
end trace frame: 0x0, count: -5
ddb{1}> show registers
rdi 0
rsi 0
rbp 0xffff800020a47520
rbx 0xffff800020a474cc
rdx 0xffff800020a473d0
rcx 0
rax 0
r8 0
r9 0x5
r10 0x8a63a3cab281f0fd
r11 0x75bb286736b62547
r12 0
r13 0
r14 0xffff80000024ec00
r15 0xfffffd0000000000
rip 0xffffffff82186295 _bus_dmamap_load_mbuf+0xb5
cs 0x8
rflags 0x10246 __ALIGN_SIZE+0xf246
rsp 0xffff800020a474b0
ss 0x10
_bus_dmamap_load_mbuf+0xb5: movl 0x18(%r15),%r12d
ddb{1}> show proc
PROC (softnet) pid=359422 stat=onproc
flags process=14000<NOZOMBIE,SYSTEM> proc=200<SYSTEM>
pri=32, usrpri=51, nice=20
forw=0xffffffffffffffff, list=0xffff800020a20000,0xffff800020a209f0
process=0xffff800020a22710 user=0xffff800020a42000,
vmspace=0xffffffff82623bb0
estcpu=1, cpticks=1, pctcpu=0.32
user=0, sys=1, intr=0
ddb{1}> ps
PID TID PPID UID S FLAGS WAIT COMMAND
56096 157992 21050 0 3 0x2 biowait syz-executor.1
21050 324442 58261 0 3 0x82 thrsleep syz-fuzzer
21050 477132 58261 0 3 0x4000082 nanosleep syz-fuzzer
21050 421743 58261 0 3 0x4000082 thrsleep syz-fuzzer
21050 324842 58261 0 3 0x4000082 thrsleep syz-fuzzer
21050 292641 58261 0 3 0x4000082 thrsleep syz-fuzzer
21050 344071 58261 0 3 0x4000082 thrsleep syz-fuzzer
21050 424011 58261 0 3 0x4000082 thrsleep syz-fuzzer
21050 222393 58261 0 3 0x4000082 kqread syz-fuzzer
21050 255361 58261 0 3 0x4000082 thrsleep syz-fuzzer
21050 136042 58261 0 3 0x4000082 thrsleep syz-fuzzer
58261 17175 46089 0 3 0x10008a pause ksh
46089 260396 71042 0 3 0x92 select sshd
56143 118445 1 0 3 0x100083 ttyin getty
71042 470154 1 0 3 0x80 select sshd
51216 174334 13550 74 3 0x100092 bpf pflogd
13550 309016 1 0 3 0x80 netio pflogd
51387 375501 47806 73 3 0x100090 kqread syslogd
47806 54886 1 0 3 0x100082 netio syslogd
65539 249935 1 77 7 0x100010 dhclient
15 288517 1 0 2 0x80 dhclient
64389 278992 0 0 2 0x14200 zerothread
48617 77703 0 0 3 0x14200 aiodoned aiodoned
91392 303668 0 0 3 0x14200 syncer update
25222 69413 0 0 3 0x14200 cleaner cleaner
87258 68917 0 0 3 0x14200 reaper reaper
62126 170393 0 0 3 0x14200 pgdaemon pagedaemon
76503 479322 0 0 3 0x14200 bored crynlk
64858 258394 0 0 3 0x14200 bored crypto
58299 359569 0 0 3 0x14200 bored viomb
6701 344013 0 0 3 0x40014200 acpi0 acpi0
39002 124346 0 0 3 0x40014200 idle1
*82267 359422 0 0 7 0x14200 softnet
38049 452234 0 0 2 0x14200 systqmp
55466 438773 0 0 3 0x14200 bored systq
33850 42751 0 0 3 0x40014200 bored softclock
58317 388377 0 0 3 0x40014200 idle0
14648 197234 0 0 3 0x14200 bored smr
1 299452 0 0 3 0x82 wait init
0 0 -1 0 3 0x10200 scheduler swapper
ddb{1}> show all locks
CPU 1:
exclusive mutex &ifq->ifq_mtx r = 0 (0xffff800000171548)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 mtx_enter+0x4b sys/kern/kern_lock.c:266
#3 ifq_deq_begin+0x31 sys/net/ifq.c:345
#4 vio_start+0xf6 sys/dev/pv/if_vio.c:735
#5 if_qstart_compat+0x36 sys/net/if.c:685
#6 ifq_serialize+0x173 sys/net/ifq.c:108
#7 taskq_thread+0x9c sys/kern/kern_task.c:368
#8 proc_trampoline+0x1c
Process 56096 (syz-executor.1) thread 0xffff800020a989f8 (157992)
exclusive rrwlock inode r = 0 (0xfffffd8007e19e70)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 rw_enter+0x453 sys/kern/kern_rwlock.c:309
#2 rrw_enter+0x88 sys/kern/kern_rwlock.c:453
#3 ufs_ihashins+0x45 sys/ufs/ufs/ufs_ihash.c:140
#4 ffs_vget+0x13e sys/ufs/ffs/ffs_vfsops.c:1352
#5 ffs_inode_alloc+0x1cf sys/ufs/ffs/ffs_alloc.c:392
#6 ufs_mkdir+0xf4 sys/ufs/ufs/ufs_vnops.c:1164
#7 VOP_MKDIR+0xc6 sys/kern/vfs_vops.c:450
#8 domkdirat+0x121 sys/kern/vfs_syscalls.c:2974
#9 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#9 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:555
#10 Xsyscall+0x128
exclusive rrwlock inode r = 0 (0xfffffd806acb46f8)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 rw_enter+0x453 sys/kern/kern_rwlock.c:309
#2 rrw_enter+0x88 sys/kern/kern_rwlock.c:453
#3 VOP_LOCK+0xf9 sys/kern/vfs_vops.c:615
#4 vn_lock+0x81 sys/kern/vfs_vnops.c:571
#5 vfs_lookup+0xe6 sys/kern/vfs_lookup.c:419
#6 namei+0x63c sys/kern/vfs_lookup.c:249
#7 domkdirat+0x75 sys/kern/vfs_syscalls.c:2959
#8 syscall+0x4a4 mi_syscall sys/sys/syscall_mi.h:92 [inline]
#8 syscall+0x4a4 sys/arch/amd64/amd64/trap.c:555
#9 Xsyscall+0x128
Process 82267 (softnet) thread 0xffff800020a20768 (359422)
exclusive kernel_lock &kernel_lock r = 1 (0xffffffff8265cfc8)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 if_qstart_compat+0x1a sys/net/if.c:683
#2 ifq_serialize+0x173 sys/net/ifq.c:108
#3 taskq_thread+0x9c sys/kern/kern_task.c:368
#4 proc_trampoline+0x1c
shared rwlock softnet r = 0 (0xffff8000000230e0)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 taskq_thread+0x8f sys/kern/kern_task.c:367
#2 proc_trampoline+0x1c
exclusive mutex &ifq->ifq_mtx r = 0 (0xffff800000171548)
#0 witness_lock+0x52e sys/kern/subr_witness.c:1163
#1 mtx_enter_try+0x102
#2 mtx_enter+0x4b sys/kern/kern_lock.c:266
#3 ifq_deq_begin+0x31 sys/net/ifq.c:345
#4 vio_start+0xf6 sys/dev/pv/if_vio.c:735
#5 if_qstart_compat+0x36 sys/net/if.c:685
#6 ifq_serialize+0x173 sys/net/ifq.c:108
#7 taskq_thread+0x9c sys/kern/kern_task.c:368
#8 proc_trampoline+0x1c
ddb{1}> show malloc
Type InUse MemUse HighUse Limit Requests Type Lim
devbuf 9507 6552K 8039K 78643K 11059 0
pcb 13 8K 8K 78643K 42 0
rtable 95 4K 4K 78643K 248 0
ifaddr 52 11K 13K 78643K 83 0
counters 39 33K 33K 78643K 39 0
ioctlops 0 0K 4K 78643K 1480 0
iov 0 0K 24K 78643K 43 0
mount 1 1K 1K 78643K 1 0
vnodes 1233 77K 78K 78643K 1376 0
UFS quota 1 32K 32K 78643K 1 0
UFS mount 5 36K 36K 78643K 5 0
shm 2 1K 5K 78643K 3 0
VM map 2 1K 1K 78643K 2 0
sem 12 0K 0K 78643K 32 0
dirhash 12 2K 2K 78643K 12 0
ACPI 1810 197K 290K 78643K 12817 0
file desc 4 9K 25K 78643K 169 0
sigio 0 0K 0K 78643K 6 0
proc 60 63K 95K 78643K 459 0
subproc 23 1K 2K 78643K 34 0
NFS srvsock 1 0K 0K 78643K 1 0
NFS daemon 1 16K 16K 78643K 1 0
ip_moptions 0 0K 0K 78643K 22 0
in_multi 37 2K 2K 78643K 48 0
ether_multi 1 0K 0K 78643K 2 0
mrt 0 0K 0K 78643K 4 0
ISOFS mount 1 32K 32K 78643K 1 0
MSDOSFS mount 1 16K 16K 78643K 1 0
ttys 36 159K 159K 78643K 36 0
exec 0 0K 1K 78643K 217 0
pagedep 1 8K 8K 78643K 1 0
inodedep 1 32K 32K 78643K 1 0
newblk 1 0K 0K 78643K 1 0
VM swap 7 26K 26K 78643K 7 0
UVM amap 120 22K 24K 78643K 1543 0
UVM aobj 20 2K 2K 78643K 20 0
memdesc 1 4K 4K 78643K 1 0
crypto data 1 1K 1K 78643K 1 0
ip6_options 0 0K 0K 78643K 25 0
NDP 8 0K 0K 78643K 16 0
temp 121 3030K 3102K 78643K 17591 0
SYN cache 2 16K 16K 78643K 2 0
ddb{1}> show all pools
Name Size Requests Fail Releases Pgreq Pgrel Npage Hiwat Minpg Maxpg
Idle
arp 64 7 0 3 1 0 1 1 0
8 0
plcache 128 20 0 0 1 0 1 1 0
8 0
rtpcb 80 29 0 27 1 0 1 1 0
8 0
rtentry 112 46 0 10 2 0 2 2 0
8 0
unpcb 120 215 0 205 2 1 1 2 0
8 0
syncache 264 6 0 6 3 3 0 1 0
8 0
tcpqe 32 19 0 19 1 1 0 1 0
8 0
tcpcb 544 144 0 140 4 0 4 4 0
8 3
inpcb 280 315 0 308 4 0 4 4 0
8 3
nd6 48 4 0 2 1 0 1 1 0
8 0
pkpcb 40 2 0 2 1 1 0 1 0
8 0
ppxss 1128 3 0 3 1 0 1 1 0
8 1
pffrag 232 4 0 4 2 1 1 1 0
482 1
pffrnode 88 4 0 4 2 1 1 1 0
8 1
pffrent 40 180 0 180 2 1 1 1 0
8 1
pfosfp 40 846 0 423 5 0 5 5 0
8 0
pfosfpen 112 1428 0 714 21 0 21 21 0
8 0
pfstitem 24 27 0 3 2 1 1 1 0
8 0
pfstkey 112 27 0 3 2 1 1 1 0
8 0
pfstate 328 27 0 3 3 1 2 2 0
8 0
pfrule 1360 21 0 16 2 1 1 2 0
8 0
art_heap8 4096 1 0 0 1 0 1 1 0
8 0
art_heap4 256 212 0 0 14 0 14 14 0
8 0
art_table 32 213 0 0 2 0 2 2 0
8 0
art_node 16 45 0 4 1 0 1 1 0
8 0
sysvmsgpl 40 107 0 80 1 0 1 1 0
8 0
semupl 112 1 0 1 1 1 0 1 0
8 0
semapl 112 30 0 20 1 0 1 1 0
8 0
shmpl 112 18 0 0 1 0 1 1 0
8 0
dirhash 1024 17 0 0 3 0 3 3 0
8 0
dino1pl 128 1681 0 276 46 0 46 46 0
8 0
ffsino 272 1681 0 276 95 0 95 95 0
8 0
nchpl 144 2168 0 549 61 0 61 61 0
8 0
uvmvnodes 72 1833 0 0 34 0 34 34 0
8 0
vnodes 208 1833 0 0 97 0 97 97 0
8 0
namei 1024 6463 0 6462 4 3 1 1 0
8 0
percpumem 16 30 0 0 1 0 1 1 0
8 0
scxspl 192 6663 0 6662 16 13 3 7 0
8 2
plimitpl 152 25 0 17 1 0 1 1 0
8 0
sigapl 432 368 0 354 3 1 2 3 0
8 0
futexpl 56 3998 0 3998 1 0 1 1 0
8 1
knotepl 112 72 0 55 1 0 1 1 0
8 0
kqueuepl 104 34 0 32 1 0 1 1 0
8 0
pipepl 160 252 0 235 4 2 2 2 0
8 1
fdescpl 488 369 0 354 3 0 3 3 0
8 0
filepl 152 2734 0 2645 9 2 7 7 0
8 3
lockfpl 104 76 0 75 1 0 1 1 0
8 0
lockfspl 48 31 0 30 1 0 1 1 0
8 0
sessionpl 112 18 0 7 1 0 1 1 0
8 0
pgrppl 48 26 0 15 1 0 1 1 0
8 0
ucredpl 96 230 0 221 1 0 1 1 0
8 0
zombiepl 144 355 0 354 3 2 1 1 0
8 0
processpl 904 385 0 354 4 0 4 4 0
8 0
procpl 632 747 0 707 5 0 5 5 0
8 1
srpgc 64 2 0 0 1 0 1 1 0
8 0
sockpl 384 564 0 545 10 2 8 8 0
8 5
mcl64k 65536 10 0 0 2 0 2 2 0
8 0
mcl16k 16384 2 0 0 1 0 1 1 0
8 0
mcl12k 12288 2 0 0 1 0 1 1 0
8 0
mcl9k 9216 2 0 0 1 0 1 1 0
8 0
mcl8k 8192 4 0 0 1 0 1 1 0
8 0
mcl4k 4096 4 0 0 1 0 1 1 0
8 0
mcl2k2 2112 1 0 0 1 0 1 1 0
8 0
mcl2k 2048 133 0 0 16 0 16 16 0
8 0
mtagpl 80 16 0 0 1 0 1 1 0
8 0
mbufpl 256 305 0 0 18 0 18 18 0
8 0
bufpl 280 7085 0 1340 411 0 411 411 0
8 0
anonpl 16 59755 0 40558 91 2 89 90 0 125
10
amapchunkpl 152 2184 0 2026 16 2 14 14 0
158 7
amappl16 192 2183 0 1148 62 4 58 62 0
8 6
amappl15 184 58 0 55 1 0 1 1 0
8 0
amappl14 176 93 0 90 2 1 1 1 0
8 0
amappl13 168 73 0 70 1 0 1 1 0
8 0
amappl12 160 4 0 1 2 1 1 1 0
8 0
amappl11 152 53 0 37 1 0 1 1 0
8 0
amappl10 144 13 0 7 1 0 1 1 0
8 0
amappl9 136 270 0 266 1 0 1 1 0
8 0
amappl8 128 144 0 110 2 0 2 2 0
8 0
amappl7 120 104 0 93 1 0 1 1 0
8 0
amappl6 112 59 0 53 1 0 1 1 0
8 0
amappl5 104 808 0 793 1 0 1 1 0
8 0
amappl4 96 638 0 602 2 1 1 2 0
8 0
amappl3 88 250 0 240 1 0 1 1 0
8 0
amappl2 80 2021 0 1953 3 1 2 3 0
8 0
amappl1 72 18055 0 17617 28 18 10 21 0
8 0
amappl 80 1007 0 962 2 0 2 2 0
84 0
dma4096 4096 1 0 1 1 1 0 1 0
8 0
dma1024 1024 1 0 0 1 0 1 1 0
8 0
dma256 256 6 0 6 1 1 0 1 0
8 0
dma128 128 253 0 253 1 1 0 1 0
8 0
dma64 64 6 0 6 1 1 0 1 0
8 0
dma32 32 7 0 7 1 1 0 1 0
8 0
dma16 16 18 0 17 1 0 1 1 0
8 0
aobjpl 64 19 0 0 1 0 1 1 0
8 0
uaddrrnd 24 369 0 354 1 0 1 1 0
8 0
uaddrbest 32 2 0 0 1 0 1 1 0
8 0
uaddr 24 369 0 354 1 0 1 1 0
8 0
vmmpekpl 168 6968 0 6935 2 0 2 2 0
8 0
vmmpepl 168 54030 0 51895 160 42 118 127 0 357
16
vmsppl 368 368 0 354 2 0 2 2 0
8 0
pdppl 4096 745 0 708 7 1 6 6 0
8 1
pvpl 32 181061 0 158783 225 12 213 214 0 265
28
pmappl 232 368 0 354 2 1 1 2 0
8 0
extentpl 40 50 0 32 1 0 1 1 0
8 0
phpool 112 178 0 7 5 0 5 5 0
8 0


---
This bug is generated by a bot. It may contain errors.
See https://goo.gl/tpsmEJ for more information about syzbot.
syzbot engineers can be reached at syzk...@googlegroups.com.

syzbot will keep track of this bug report. See:
https://goo.gl/tpsmEJ#status for how to communicate with syzbot.

syzbot

unread,
Mar 15, 2020, 11:46:08 PM3/15/20
to syzkaller-o...@googlegroups.com
Auto-closing this bug as obsolete.
Crashes did not happen for a while, no reproducer and no activity.
Reply all
Reply to author
Forward
0 new messages