Mon, 15 Mar 2021 12:08:40 -0700
[next] Ask the exit worker to handle poll events.
--- x/fs/io_uring.c
+++ y/fs/io_uring.c
@@ -333,6 +333,7 @@ struct io_ring_ctx {
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
+ unsigned int in_exit_work: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -8489,6 +8490,7 @@ static void io_ring_exit_work(struct wor
struct io_tctx_node *node;
int ret;
+ ctx->in_exit_work = 1;
/*
* If we're doing polled IO and end up having requests being
* submitted async (out-of-line), then completions can come in while
@@ -8654,6 +8656,12 @@ static void io_uring_try_cancel_requests
{
struct io_task_cancel cancel = { .task = task, .files = files, };
struct io_uring_task *tctx = task ? task->io_uring : NULL;
+ bool sickle;
+
+ /* handle poll events if ctx is dying */
+ sickle = !(ctx->flags & IORING_SETUP_SQPOLL) && !files;
+ sickle |= ctx->sq_data && ctx->sq_data->thread == current;
+ sickle |= ctx->in_exit_work != 0;
while (1) {
enum io_wq_cancel cret;
@@ -8671,9 +8679,7 @@ static void io_uring_try_cancel_requests
ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
}
- /* SQPOLL thread does its own polling */
- if ((!(ctx->flags & IORING_SETUP_SQPOLL) && !files) ||
- (ctx->sq_data && ctx->sq_data->thread == current)) {
+ if (sickle) {
while (!list_empty_careful(&ctx->iopoll_list)) {
io_iopoll_try_reap_events(ctx);
ret = true;