diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d18985476654..07a5824ad98d 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1459,8 +1459,10 @@ static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts) void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts) { - io_tw_lock(req->ctx, ts); - if (unlikely(io_should_terminate_tw())) + struct io_ring_ctx *ctx = req->ctx; + + io_tw_lock(ctx, ts); + if (unlikely(io_should_terminate_tw(ctx))) io_req_defer_failed(req, -EFAULT); else if (req->flags & REQ_F_FORCE_ASYNC) io_queue_iowq(req); diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 6123159da448..0f6b47f55c24 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -402,9 +402,9 @@ static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) * 2) PF_KTHREAD is set, in which case the invoker of the task_work is * our fallback task_work. */ -static inline bool io_should_terminate_tw(void) +static inline bool io_should_terminate_tw(struct io_ring_ctx *ctx) { - return current->flags & (PF_KTHREAD | PF_EXITING); + return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs); } static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) diff --git a/io_uring/poll.c b/io_uring/poll.c index 1884726dac49..b6c8acd8625e 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -258,7 +258,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) { int v; - if (unlikely(io_should_terminate_tw())) + if (unlikely(io_should_terminate_tw(req->ctx))) return -ECANCELED; do { diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 018c0ceaf717..be2a0f6c209b 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -307,7 +307,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t int ret = -ENOENT; if (prev) { - if (!io_should_terminate_tw()) { + if (!io_should_terminate_tw(req->ctx)) { struct io_cancel_data cd = { .ctx = req->ctx, .data = prev->cqe.user_data,