WARNING in io_fill_cqe_aux

132 views
Skip to first unread message

Xingyuan Mo

unread,
Jan 19, 2023, 6:38:02 AM1/19/23
to ax...@kernel.dk, asml.s...@gmail.com, syzk...@googlegroups.com, io-u...@vger.kernel.org
Hello,

Recently, when using our tool to fuzz kernel, the following bug was
triggered.

HEAD commit: 5dc4c995db9e Linux 6.2-rc4
git tree: mainline
compiler: gcc (Ubuntu 10.3.0-1ubuntu1~20.04) 10.3.0
kernel config: https://drive.google.com/file/d/1anGeZxcTgSKNZX4oywvsSfLqw1tcZSTp/view?usp=share_link
C reproducer: https://drive.google.com/file/d/1DxYuWGnFSBhqve-jjXloYhwKpyUm8nDt/view?usp=share_link

IMPORTANT: if you fix the issue, please add the following tag to the commit:
Reported-by: Xingyuan Mo <hdt...@gmail.com>

------------[ cut here ]------------
WARNING: CPU: 1 PID: 36200 at io_uring/io_uring.h:108 io_get_cqe_overflow root/linux-6.2-rc4/io_uring/io_uring.h:108 [inline]
WARNING: CPU: 1 PID: 36200 at io_uring/io_uring.h:108 io_get_cqe root/linux-6.2-rc4/io_uring/io_uring.h:125 [inline]
WARNING: CPU: 1 PID: 36200 at io_uring/io_uring.h:108 io_fill_cqe_aux+0x69b/0x840 root/linux-6.2-rc4/io_uring/io_uring.c:832
Modules linked in:
CPU: 1 PID: 36200 Comm: syz-executor.0 Not tainted 6.2.0-rc4 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014
RIP: 0010:io_get_cqe_overflow root/linux-6.2-rc4/io_uring/io_uring.h:108 [inline]
RIP: 0010:io_get_cqe root/linux-6.2-rc4/io_uring/io_uring.h:125 [inline]
RIP: 0010:io_fill_cqe_aux+0x69b/0x840 root/linux-6.2-rc4/io_uring/io_uring.c:832
Code: fd 48 8d bb a8 00 00 00 be ff ff ff ff e8 dd 1b 02 06 31 ff 89 c5 89 c6 e8 c2 76 7e fd 85 ed 0f 85 44 fa ff ff e8 05 7a 7e fd <0f> 0b e9 38 fa ff ff e8 f9 79 7e fd 31 ff 89 ee e8 a0 76 7e fd 85
RSP: 0018:ffffc90015747b68 EFLAGS: 00010212
RAX: 000000000000016e RBX: ffff8881245b6000 RCX: ffffc90013881000
RDX: 0000000000040000 RSI: ffffffff8401f31b RDI: 0000000000000005
RBP: 0000000000000000 R08: 0000000000000005 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000001
R13: 0000000000000000 R14: 0000000000000000 R15: ffff8881245b6018
FS: 00007fcf02ab4700(0000) GS:ffff888135c00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000001b2e024000 CR3: 00000001054e6000 CR4: 0000000000752ee0
PKRU: 55555554
Call Trace:
<TASK>
__io_post_aux_cqe root/linux-6.2-rc4/io_uring/io_uring.c:880 [inline]
io_post_aux_cqe+0x3b/0x90 root/linux-6.2-rc4/io_uring/io_uring.c:890
io_msg_ring_data root/linux-6.2-rc4/io_uring/msg_ring.c:74 [inline]
io_msg_ring+0x5b9/0xb70 root/linux-6.2-rc4/io_uring/msg_ring.c:227
io_issue_sqe+0x6c2/0x1210 root/linux-6.2-rc4/io_uring/io_uring.c:1856
io_queue_sqe root/linux-6.2-rc4/io_uring/io_uring.c:2028 [inline]
io_submit_sqe root/linux-6.2-rc4/io_uring/io_uring.c:2286 [inline]
io_submit_sqes+0x96c/0x1e10 root/linux-6.2-rc4/io_uring/io_uring.c:2397
__do_sys_io_uring_enter+0xc20/0x2540 root/linux-6.2-rc4/io_uring/io_uring.c:3345
do_syscall_x64 root/linux-6.2-rc4/arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x39/0xb0 root/linux-6.2-rc4/arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x63/0xcd
RIP: 0033:0x7fcf01c8f6cd
Code: c3 e8 17 32 00 00 0f 1f 80 00 00 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007fcf02ab3bf8 EFLAGS: 00000246 ORIG_RAX: 00000000000001aa
RAX: ffffffffffffffda RBX: 00007fcf01dbbf80 RCX: 00007fcf01c8f6cd
RDX: 0000000000000000 RSI: 0000000000007b84 RDI: 0000000000000004
RBP: 00007fcf01cfcb05 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 00007fcf01edfb2f R14: 00007fcf01edfcd0 R15: 00007fcf02ab3d80
</TASK>

Jens Axboe

unread,
Jan 19, 2023, 10:54:59 AM1/19/23
to Xingyuan Mo, asml.s...@gmail.com, syzk...@googlegroups.com, io-u...@vger.kernel.org
I think this should fix it. Pavel?

diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 2d3cd945a531..c98e9c74054b 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -25,6 +25,28 @@ struct io_msg {
u32 flags;
};

+static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+{
+ mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
+ unsigned int issue_flags)
+{
+ /*
+ * To ensure proper ordering between the two ctxs, we can only
+ * attempt a trylock on the target. If that fails and we already have
+ * the source ctx lock, punt to io-wq.
+ */
+ if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+ if (!mutex_trylock(&octx->uring_lock))
+ return -EAGAIN;
+ return 0;
+ }
+ mutex_lock(&octx->uring_lock);
+ return 0;
+}
+
void io_msg_ring_cleanup(struct io_kiocb *req)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
@@ -43,20 +65,25 @@ static void io_msg_tw_complete(struct callback_head *head)
struct io_ring_ctx *target_ctx = req->file->private_data;
int ret = 0;

- if (current->flags & PF_EXITING)
+ if (current->flags & PF_EXITING) {
ret = -EOWNERDEAD;
- else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = -EOVERFLOW;
+ } else {
+ mutex_lock(&target_ctx->uring_lock);
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = -EOVERFLOW;
+ mutex_unlock(&target_ctx->uring_lock);
+ }

if (ret < 0)
req_set_fail(req);
io_req_queue_tw_complete(req, ret);
}

-static int io_msg_ring_data(struct io_kiocb *req)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ int ret;

if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
@@ -71,33 +98,14 @@ static int io_msg_ring_data(struct io_kiocb *req)
return IOU_ISSUE_SKIP_COMPLETE;
}

- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- return 0;
-
- return -EOVERFLOW;
-}
-
-static void io_double_unlock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
-{
- mutex_unlock(&octx->uring_lock);
-}
+ if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ return -EAGAIN;

-static int io_double_lock_ctx(struct io_ring_ctx *octx,
- unsigned int issue_flags)
-{
- /*
- * To ensure proper ordering between the two ctxs, we can only
- * attempt a trylock on the target. If that fails and we already have
- * the source ctx lock, punt to io-wq.
- */
- if (!(issue_flags & IO_URING_F_UNLOCKED)) {
- if (!mutex_trylock(&octx->uring_lock))
- return -EAGAIN;
- return 0;
- }
- mutex_lock(&octx->uring_lock);
- return 0;
+ ret = -EOVERFLOW;
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
+ io_double_unlock_ctx(target_ctx);
+ return ret;
}

static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
@@ -148,7 +156,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
ret = -EOVERFLOW;
out_unlock:
- io_double_unlock_ctx(target_ctx, issue_flags);
+ io_double_unlock_ctx(target_ctx);
return ret;
}

@@ -224,7 +232,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)

switch (msg->cmd) {
case IORING_MSG_DATA:
- ret = io_msg_ring_data(req);
+ ret = io_msg_ring_data(req, issue_flags);
break;
case IORING_MSG_SEND_FD:
ret = io_msg_send_fd(req, issue_flags);

--
Jens Axboe

Pavel Begunkov

unread,
Jan 19, 2023, 12:33:16 PM1/19/23
to Jens Axboe, Xingyuan Mo, syzk...@googlegroups.com, io-u...@vger.kernel.org
Looking that you added uring_lock locking, was the target
ring IOPOLL? If so sounds right and a comment below


> diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
> index 2d3cd945a531..c98e9c74054b 100644
> --- a/io_uring/msg_ring.c
> +++ b/io_uring/msg_ring.c
[...]
> +
> void io_msg_ring_cleanup(struct io_kiocb *req)
> {
> struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
> @@ -43,20 +65,25 @@ static void io_msg_tw_complete(struct callback_head *head)
> struct io_ring_ctx *target_ctx = req->file->private_data;
> int ret = 0;
>
> - if (current->flags & PF_EXITING)
> + if (current->flags & PF_EXITING) {
> ret = -EOWNERDEAD;
> - else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
> - ret = -EOVERFLOW;
> + } else {
> + mutex_lock(&target_ctx->uring_lock);

It can be be conditional or could use a comment that it's only
necessary in case of IOPOLL ring.


> + if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
> + ret = -EOVERFLOW;
> + mutex_unlock(&target_ctx->uring_lock);
> + }
>
> if (ret < 0)
> req_set_fail(req);
> io_req_queue_tw_complete(req, ret);
> }
>

--
Pavel Begunkov

Jens Axboe

unread,
Jan 19, 2023, 12:36:00 PM1/19/23
to Pavel Begunkov, Xingyuan Mo, syzk...@googlegroups.com, io-u...@vger.kernel.org
Right, this is for the target ring using IOPOLL.

>> diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
>> index 2d3cd945a531..c98e9c74054b 100644
>> --- a/io_uring/msg_ring.c
>> +++ b/io_uring/msg_ring.c
> [...]
>> +
>> void io_msg_ring_cleanup(struct io_kiocb *req)
>> {
>> struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
>> @@ -43,20 +65,25 @@ static void io_msg_tw_complete(struct callback_head *head)
>> struct io_ring_ctx *target_ctx = req->file->private_data;
>> int ret = 0;
>> - if (current->flags & PF_EXITING)
>> + if (current->flags & PF_EXITING) {
>> ret = -EOWNERDEAD;
>> - else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
>> - ret = -EOVERFLOW;
>> + } else {
>> + mutex_lock(&target_ctx->uring_lock);
>
> It can be be conditional or could use a comment that it's only
> necessary in case of IOPOLL ring.

I did mention that in the actual git commit message for it, but let's
add a comment as well. I initially figured it's not worth it making it
conditional, but it probably is since not a lot of folks would be using
IOPOLL anyway and sending messages. I'll make that tweak and post the
two patches I made out of this one.

--
Jens Axboe

Reply all
Reply to author
Forward
0 new messages