io_move_task_work_from_local() executes requests with a normal
task_work of a possible alive task, which which will the check.
I was thinking to kill the extra step as it doesn't make sense,
git garbage digging shows the patch below, but I don't remember
if it has ever been tested.
commit 65560732da185c85f472e9c94e6b8ff147fc4b96
Author: Pavel Begunkov <
asml.s...@gmail.com>
Date: Fri Jun 7 13:13:06 2024 +0100
io_uring: skip normal tw with DEFER_TASKRUN
DEFER_TASKRUN execution first falls back to normal task_work and only
then, when the task is dying, to workers. It's cleaner to remove the
middle step and use workers as the only fallback. It also detaches
DEFER_TASKRUN and normal task_work handling from each other.
Signed-off-by: Pavel Begunkov <
asml.s...@gmail.com>
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 9789cf8c68c1..d9e3661ff93d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1111,9 +1111,8 @@ static inline struct llist_node *io_llist_xchg(struct llist_head *head,
return xchg(&head->first, new);
}
-static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
{
- struct llist_node *node = llist_del_all(&tctx->task_list);
struct io_ring_ctx *last_ctx = NULL;
struct io_kiocb *req;
@@ -1139,6 +1138,13 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
}
}
+static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+{
+ struct llist_node *node = llist_del_all(&tctx->task_list);
+
+ __io_fallback_tw(node, sync);
+}
+
struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
unsigned int max_entries,
unsigned int *count)
@@ -1287,13 +1293,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
struct llist_node *node;
node = llist_del_all(&ctx->work_llist);
- while (node) {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- io_task_work.node);
-
- node = node->next;
- io_req_normal_work_add(req);
- }
+ __io_fallback_tw(node, false);
}
static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index e46d13e8a215..bc0a800b5ae7 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -128,7 +128,7 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
* Not from an SQE, as those cannot be submitted, but via
* updating tagged resources.
*/
- if (ctx->submitter_task->flags & PF_EXITING)
+ if (percpu_ref_is_dying(&ctx->refs))
lockdep_assert(current_work());
else
lockdep_assert(current == ctx->submitter_task);
--
Pavel Begunkov