mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00

When the netfs_io_request struct's work item is queued, it must be supplied
with a ref to the work item struct to prevent it being deallocated whilst
on the queue or whilst it is being processed. This is tricky to manage as
we have to get a ref before we try and queue it and then we may find it's
already queued and is thus already holding a ref - in which case we have to
try and get rid of the ref again.
The problem comes if we're in BH or IRQ context and need to drop the ref:
if netfs_put_request() reduces the count to 0, we have to do the cleanup -
but the cleanup may need to wait.
Fix this by adding a new work item to the request, ->cleanup_work, and
dispatching that when the refcount hits zero. That can then synchronously
cancel any outstanding work on the main work item before doing the cleanup.
Adding a new work item also deals with another problem upstream where it's
sometimes changing the work func in the put function and requeuing it -
which has occasionally in the past caused the cleanup to happen
incorrectly.
As a bonus, this allows us to get rid of the 'was_async' parameter from a
bunch of functions. This indicated whether the put function might not be
permitted to sleep.
Fixes: 3d3c950467
("netfs: Provide readahead and readpage netfs helpers")
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/20250519090707.2848510-4-dhowells@redhat.com
cc: Paulo Alcantara <pc@manguebit.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Steve French <stfrench@microsoft.com>
cc: linux-cifs@vger.kernel.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
236 lines
6.6 KiB
C
236 lines
6.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Object lifetime handling and tracing.
|
|
*
|
|
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/delay.h>
|
|
#include "internal.h"
|
|
|
|
static void netfs_free_request(struct work_struct *work);
|
|
|
|
/*
|
|
* Allocate an I/O request and initialise it.
|
|
*/
|
|
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
|
struct file *file,
|
|
loff_t start, size_t len,
|
|
enum netfs_io_origin origin)
|
|
{
|
|
static atomic_t debug_ids;
|
|
struct inode *inode = file ? file_inode(file) : mapping->host;
|
|
struct netfs_inode *ctx = netfs_inode(inode);
|
|
struct netfs_io_request *rreq;
|
|
mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
|
|
struct kmem_cache *cache = mempool->pool_data;
|
|
int ret;
|
|
|
|
for (;;) {
|
|
rreq = mempool_alloc(mempool, GFP_KERNEL);
|
|
if (rreq)
|
|
break;
|
|
msleep(10);
|
|
}
|
|
|
|
memset(rreq, 0, kmem_cache_size(cache));
|
|
INIT_WORK(&rreq->cleanup_work, netfs_free_request);
|
|
rreq->start = start;
|
|
rreq->len = len;
|
|
rreq->origin = origin;
|
|
rreq->netfs_ops = ctx->ops;
|
|
rreq->mapping = mapping;
|
|
rreq->inode = inode;
|
|
rreq->i_size = i_size_read(inode);
|
|
rreq->debug_id = atomic_inc_return(&debug_ids);
|
|
rreq->wsize = INT_MAX;
|
|
rreq->io_streams[0].sreq_max_len = ULONG_MAX;
|
|
rreq->io_streams[0].sreq_max_segs = 0;
|
|
spin_lock_init(&rreq->lock);
|
|
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
|
|
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
|
|
init_waitqueue_head(&rreq->waitq);
|
|
refcount_set(&rreq->ref, 2);
|
|
|
|
if (origin == NETFS_READAHEAD ||
|
|
origin == NETFS_READPAGE ||
|
|
origin == NETFS_READ_GAPS ||
|
|
origin == NETFS_READ_SINGLE ||
|
|
origin == NETFS_READ_FOR_WRITE ||
|
|
origin == NETFS_DIO_READ) {
|
|
INIT_WORK(&rreq->work, netfs_read_collection_worker);
|
|
rreq->io_streams[0].avail = true;
|
|
} else {
|
|
INIT_WORK(&rreq->work, netfs_write_collection_worker);
|
|
}
|
|
|
|
/* The IN_PROGRESS flag comes with a ref. */
|
|
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
|
|
|
if (file && file->f_flags & O_NONBLOCK)
|
|
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
|
|
if (rreq->netfs_ops->init_request) {
|
|
ret = rreq->netfs_ops->init_request(rreq, file);
|
|
if (ret < 0) {
|
|
mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
|
|
return ERR_PTR(ret);
|
|
}
|
|
}
|
|
|
|
atomic_inc(&ctx->io_count);
|
|
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), netfs_rreq_trace_new);
|
|
netfs_proc_add_rreq(rreq);
|
|
netfs_stat(&netfs_n_rh_rreq);
|
|
return rreq;
|
|
}
|
|
|
|
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
|
|
{
|
|
int r;
|
|
|
|
__refcount_inc(&rreq->ref, &r);
|
|
trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
|
|
}
|
|
|
|
void netfs_clear_subrequests(struct netfs_io_request *rreq)
|
|
{
|
|
struct netfs_io_subrequest *subreq;
|
|
struct netfs_io_stream *stream;
|
|
int s;
|
|
|
|
for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
|
|
stream = &rreq->io_streams[s];
|
|
while (!list_empty(&stream->subrequests)) {
|
|
subreq = list_first_entry(&stream->subrequests,
|
|
struct netfs_io_subrequest, rreq_link);
|
|
list_del(&subreq->rreq_link);
|
|
netfs_put_subrequest(subreq, netfs_sreq_trace_put_clear);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void netfs_free_request_rcu(struct rcu_head *rcu)
|
|
{
|
|
struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
|
|
|
|
mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
|
|
netfs_stat_d(&netfs_n_rh_rreq);
|
|
}
|
|
|
|
static void netfs_free_request(struct work_struct *work)
|
|
{
|
|
struct netfs_io_request *rreq =
|
|
container_of(work, struct netfs_io_request, cleanup_work);
|
|
struct netfs_inode *ictx = netfs_inode(rreq->inode);
|
|
unsigned int i;
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
|
|
|
|
/* Cancel/flush the result collection worker. That does not carry a
|
|
* ref of its own, so we must wait for it somewhere.
|
|
*/
|
|
cancel_work_sync(&rreq->work);
|
|
|
|
netfs_proc_del_rreq(rreq);
|
|
netfs_clear_subrequests(rreq);
|
|
if (rreq->netfs_ops->free_request)
|
|
rreq->netfs_ops->free_request(rreq);
|
|
if (rreq->cache_resources.ops)
|
|
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
|
|
if (rreq->direct_bv) {
|
|
for (i = 0; i < rreq->direct_bv_count; i++) {
|
|
if (rreq->direct_bv[i].bv_page) {
|
|
if (rreq->direct_bv_unpin)
|
|
unpin_user_page(rreq->direct_bv[i].bv_page);
|
|
}
|
|
}
|
|
kvfree(rreq->direct_bv);
|
|
}
|
|
rolling_buffer_clear(&rreq->buffer);
|
|
|
|
if (atomic_dec_and_test(&ictx->io_count))
|
|
wake_up_var(&ictx->io_count);
|
|
call_rcu(&rreq->rcu, netfs_free_request_rcu);
|
|
}
|
|
|
|
void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
|
|
{
|
|
unsigned int debug_id;
|
|
bool dead;
|
|
int r;
|
|
|
|
if (rreq) {
|
|
debug_id = rreq->debug_id;
|
|
dead = __refcount_dec_and_test(&rreq->ref, &r);
|
|
trace_netfs_rreq_ref(debug_id, r - 1, what);
|
|
if (dead)
|
|
WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work));
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Allocate and partially initialise an I/O request structure.
|
|
*/
|
|
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
|
|
{
|
|
struct netfs_io_subrequest *subreq;
|
|
mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
|
|
struct kmem_cache *cache = mempool->pool_data;
|
|
|
|
for (;;) {
|
|
subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
|
|
GFP_KERNEL);
|
|
if (subreq)
|
|
break;
|
|
msleep(10);
|
|
}
|
|
|
|
memset(subreq, 0, kmem_cache_size(cache));
|
|
INIT_WORK(&subreq->work, NULL);
|
|
INIT_LIST_HEAD(&subreq->rreq_link);
|
|
refcount_set(&subreq->ref, 2);
|
|
subreq->rreq = rreq;
|
|
subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
|
|
netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
|
|
netfs_stat(&netfs_n_rh_sreq);
|
|
return subreq;
|
|
}
|
|
|
|
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
|
|
enum netfs_sreq_ref_trace what)
|
|
{
|
|
int r;
|
|
|
|
__refcount_inc(&subreq->ref, &r);
|
|
trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
|
|
what);
|
|
}
|
|
|
|
static void netfs_free_subrequest(struct netfs_io_subrequest *subreq)
|
|
{
|
|
struct netfs_io_request *rreq = subreq->rreq;
|
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_free);
|
|
if (rreq->netfs_ops->free_subrequest)
|
|
rreq->netfs_ops->free_subrequest(subreq);
|
|
mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
|
|
netfs_stat_d(&netfs_n_rh_sreq);
|
|
netfs_put_request(rreq, netfs_rreq_trace_put_subreq);
|
|
}
|
|
|
|
void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
|
|
enum netfs_sreq_ref_trace what)
|
|
{
|
|
unsigned int debug_index = subreq->debug_index;
|
|
unsigned int debug_id = subreq->rreq->debug_id;
|
|
bool dead;
|
|
int r;
|
|
|
|
dead = __refcount_dec_and_test(&subreq->ref, &r);
|
|
trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
|
|
if (dead)
|
|
netfs_free_subrequest(subreq);
|
|
}
|