mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00

commit 3539b1467e94336d5854ebf976d9627bfb65d6c3 upstream. When running task_work for an exiting task, rather than perform the issue retry attempt, the task_work is canceled. However, this isn't done for a ring that has been closed. This can lead to requests being successfully completed post the ring being closed, which is somewhat confusing and surprising to an application. Rather than just check the task exit state, also include the ring ref state in deciding whether or not to terminate a given request when run from task_work. Cc: stable@vger.kernel.org # 6.1+ Link: https://github.com/axboe/liburing/discussions/1459 Reported-by: Benedek Thaler <thaler@thaler.hu> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
313 lines
8.4 KiB
C
313 lines
8.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/file.h>
|
|
#include <linux/io_uring/cmd.h>
|
|
#include <linux/security.h>
|
|
#include <linux/nospec.h>
|
|
|
|
#include <uapi/linux/io_uring.h>
|
|
|
|
#include "io_uring.h"
|
|
#include "alloc_cache.h"
|
|
#include "rsrc.h"
|
|
#include "uring_cmd.h"
|
|
|
|
void io_cmd_cache_free(const void *entry)
|
|
{
|
|
struct io_async_cmd *ac = (struct io_async_cmd *)entry;
|
|
|
|
io_vec_free(&ac->vec);
|
|
kfree(ac);
|
|
}
|
|
|
|
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
|
|
{
|
|
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
|
struct io_async_cmd *ac = req->async_data;
|
|
struct io_uring_cmd_data *cache = &ac->data;
|
|
|
|
if (cache->op_data) {
|
|
kfree(cache->op_data);
|
|
cache->op_data = NULL;
|
|
}
|
|
|
|
if (issue_flags & IO_URING_F_UNLOCKED)
|
|
return;
|
|
|
|
io_alloc_cache_vec_kasan(&ac->vec);
|
|
if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
|
|
io_vec_free(&ac->vec);
|
|
|
|
if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
|
|
ioucmd->sqe = NULL;
|
|
req->async_data = NULL;
|
|
req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
|
|
}
|
|
}
|
|
|
|
void io_uring_cmd_cleanup(struct io_kiocb *req)
|
|
{
|
|
io_req_uring_cleanup(req, 0);
|
|
}
|
|
|
|
bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
|
struct io_uring_task *tctx, bool cancel_all)
|
|
{
|
|
struct hlist_node *tmp;
|
|
struct io_kiocb *req;
|
|
bool ret = false;
|
|
|
|
lockdep_assert_held(&ctx->uring_lock);
|
|
|
|
hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
|
|
hash_node) {
|
|
struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
|
|
struct io_uring_cmd);
|
|
struct file *file = req->file;
|
|
|
|
if (!cancel_all && req->tctx != tctx)
|
|
continue;
|
|
|
|
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
|
|
file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
|
|
IO_URING_F_COMPLETE_DEFER);
|
|
ret = true;
|
|
}
|
|
}
|
|
io_submit_flush_completions(ctx);
|
|
return ret;
|
|
}
|
|
|
|
static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
|
|
unsigned int issue_flags)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(cmd);
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
|
|
return;
|
|
|
|
cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
hlist_del(&req->hash_node);
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
|
}
|
|
|
|
/*
|
|
* Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
|
|
* will try to cancel this issued command by sending ->uring_cmd() with
|
|
* issue_flags of IO_URING_F_CANCEL.
|
|
*
|
|
* The command is guaranteed to not be done when calling ->uring_cmd()
|
|
* with IO_URING_F_CANCEL, but it is driver's responsibility to deal
|
|
* with race between io_uring canceling and normal completion.
|
|
*/
|
|
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
|
|
unsigned int issue_flags)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(cmd);
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
|
|
if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
|
|
cmd->flags |= IORING_URING_CMD_CANCELABLE;
|
|
io_ring_submit_lock(ctx, issue_flags);
|
|
hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
|
|
io_ring_submit_unlock(ctx, issue_flags);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
|
|
|
|
static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
|
|
{
|
|
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
|
unsigned int flags = IO_URING_F_COMPLETE_DEFER;
|
|
|
|
if (io_should_terminate_tw(req->ctx))
|
|
flags |= IO_URING_F_TASK_DEAD;
|
|
|
|
/* task_work executor checks the deffered list completion */
|
|
ioucmd->task_work_cb(ioucmd, flags);
|
|
}
|
|
|
|
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
|
|
void (*task_work_cb)(struct io_uring_cmd *, unsigned),
|
|
unsigned flags)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
|
|
|
|
ioucmd->task_work_cb = task_work_cb;
|
|
req->io_task_work.func = io_uring_cmd_work;
|
|
__io_req_task_work_add(req, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
|
|
|
|
static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
|
|
u64 extra1, u64 extra2)
|
|
{
|
|
req->big_cqe.extra1 = extra1;
|
|
req->big_cqe.extra2 = extra2;
|
|
}
|
|
|
|
/*
|
|
* Called by consumers of io_uring_cmd, if they originally returned
|
|
* -EIOCBQUEUED upon receiving the command.
|
|
*/
|
|
void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
|
|
unsigned issue_flags)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
|
|
|
|
io_uring_cmd_del_cancelable(ioucmd, issue_flags);
|
|
|
|
if (ret < 0)
|
|
req_set_fail(req);
|
|
|
|
io_req_set_res(req, ret, 0);
|
|
if (req->ctx->flags & IORING_SETUP_CQE32)
|
|
io_req_set_cqe32_extra(req, res2, 0);
|
|
io_req_uring_cleanup(req, issue_flags);
|
|
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
|
|
/* order with io_iopoll_req_issued() checking ->iopoll_complete */
|
|
smp_store_release(&req->iopoll_completed, 1);
|
|
} else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
|
|
if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
|
|
return;
|
|
io_req_complete_defer(req);
|
|
} else {
|
|
req->io_task_work.func = io_req_task_complete;
|
|
io_req_task_work_add(req);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
|
|
|
|
static int io_uring_cmd_prep_setup(struct io_kiocb *req,
|
|
const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
|
struct io_async_cmd *ac;
|
|
|
|
/* see io_uring_cmd_get_async_data() */
|
|
BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
|
|
|
|
ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
|
|
if (!ac)
|
|
return -ENOMEM;
|
|
ac->data.op_data = NULL;
|
|
|
|
/*
|
|
* Unconditionally cache the SQE for now - this is only needed for
|
|
* requests that go async, but prep handlers must ensure that any
|
|
* sqe data is stable beyond prep. Since uring_cmd is special in
|
|
* that it doesn't read in per-op data, play it safe and ensure that
|
|
* any SQE data is stable beyond prep. This can later get relaxed.
|
|
*/
|
|
memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
|
|
ioucmd->sqe = ac->sqes;
|
|
return 0;
|
|
}
|
|
|
|
int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|
{
|
|
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
|
|
|
if (sqe->__pad1)
|
|
return -EINVAL;
|
|
|
|
ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
|
|
if (ioucmd->flags & ~IORING_URING_CMD_MASK)
|
|
return -EINVAL;
|
|
|
|
if (ioucmd->flags & IORING_URING_CMD_FIXED)
|
|
req->buf_index = READ_ONCE(sqe->buf_index);
|
|
|
|
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
|
|
|
|
return io_uring_cmd_prep_setup(req, sqe);
|
|
}
|
|
|
|
int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
|
{
|
|
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
|
|
struct io_ring_ctx *ctx = req->ctx;
|
|
struct file *file = req->file;
|
|
int ret;
|
|
|
|
if (!file->f_op->uring_cmd)
|
|
return -EOPNOTSUPP;
|
|
|
|
ret = security_uring_cmd(ioucmd);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (ctx->flags & IORING_SETUP_SQE128)
|
|
issue_flags |= IO_URING_F_SQE128;
|
|
if (ctx->flags & IORING_SETUP_CQE32)
|
|
issue_flags |= IO_URING_F_CQE32;
|
|
if (io_is_compat(ctx))
|
|
issue_flags |= IO_URING_F_COMPAT;
|
|
if (ctx->flags & IORING_SETUP_IOPOLL) {
|
|
if (!file->f_op->uring_cmd_iopoll)
|
|
return -EOPNOTSUPP;
|
|
issue_flags |= IO_URING_F_IOPOLL;
|
|
req->iopoll_completed = 0;
|
|
if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
|
|
/* make sure every req only blocks once */
|
|
req->flags &= ~REQ_F_IOPOLL_STATE;
|
|
req->iopoll_start = ktime_get_ns();
|
|
}
|
|
}
|
|
|
|
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
|
|
if (ret == -EAGAIN || ret == -EIOCBQUEUED)
|
|
return ret;
|
|
if (ret < 0)
|
|
req_set_fail(req);
|
|
io_req_uring_cleanup(req, issue_flags);
|
|
io_req_set_res(req, ret, 0);
|
|
return IOU_COMPLETE;
|
|
}
|
|
|
|
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
|
|
struct iov_iter *iter,
|
|
struct io_uring_cmd *ioucmd,
|
|
unsigned int issue_flags)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
|
|
|
|
if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
|
|
return -EINVAL;
|
|
|
|
return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
|
|
|
|
int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
|
|
const struct iovec __user *uvec,
|
|
size_t uvec_segs,
|
|
int ddir, struct iov_iter *iter,
|
|
unsigned issue_flags)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
|
|
struct io_async_cmd *ac = req->async_data;
|
|
int ret;
|
|
|
|
if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
|
|
return -EINVAL;
|
|
|
|
ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
|
|
issue_flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
|
|
|
|
void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
|
|
{
|
|
struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
|
|
|
|
io_req_queue_iowq(req);
|
|
}
|