mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
Merge patch series "netfs: Fix use of fscache with ceph"
David Howells <dhowells@redhat.com> says: Here are a couple of patches that fix the use of fscaching with ceph: (1) Fix the read collector to mark the write request that it creates to copy data to the cache with NETFS_RREQ_OFFLOAD_COLLECTION so that it will run the write collector on a workqueue as it's meant to run in the background and the app isn't going to wait for it. (2) Fix the read collector to wake up the copy-to-cache write request after it sets NETFS_RREQ_ALL_QUEUED if the write request doesn't have any subrequests left on it. ALL_QUEUED indicates that there won't be any more subreqs coming and the collector should clean up - except that an event is needed to trigger that, but it only gets events from subreq termination and so the last event can beat us to setting ALL_QUEUED. * patches from https://lore.kernel.org/20250711151005.2956810-1-dhowells@redhat.com: netfs: Fix race between cache write completion and ALL_QUEUED being set netfs: Fix copy-to-cache so that it performs collection with ceph+fscache Link: https://lore.kernel.org/20250711151005.2956810-1-dhowells@redhat.com Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
commit
86ab0c1009
|
@ -110,6 +110,8 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
|
|||
if (!creq->io_streams[1].avail)
|
||||
goto cancel_put;
|
||||
|
||||
__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags);
|
||||
trace_netfs_copy2cache(rreq, creq);
|
||||
trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
|
||||
netfs_stat(&netfs_n_wh_copy_to_cache);
|
||||
rreq->copy_to_cache = creq;
|
||||
|
@ -154,6 +156,9 @@ void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
|
|||
netfs_issue_write(creq, &creq->io_streams[1]);
|
||||
smp_wmb(); /* Write lists before ALL_QUEUED. */
|
||||
set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache);
|
||||
if (list_empty_careful(&creq->io_streams[1].subrequests))
|
||||
netfs_wake_collector(creq);
|
||||
|
||||
netfs_put_request(creq, netfs_rreq_trace_put_return);
|
||||
creq->copy_to_cache = NULL;
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
EM(netfs_rreq_trace_copy, "COPY ") \
|
||||
EM(netfs_rreq_trace_dirty, "DIRTY ") \
|
||||
EM(netfs_rreq_trace_done, "DONE ") \
|
||||
EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
|
||||
EM(netfs_rreq_trace_free, "FREE ") \
|
||||
EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
|
||||
EM(netfs_rreq_trace_recollect, "RECLLCT") \
|
||||
|
@ -559,6 +560,35 @@ TRACE_EVENT(netfs_write,
|
|||
__entry->start, __entry->start + __entry->len - 1)
|
||||
);
|
||||
|
||||
TRACE_EVENT(netfs_copy2cache,
|
||||
TP_PROTO(const struct netfs_io_request *rreq,
|
||||
const struct netfs_io_request *creq),
|
||||
|
||||
TP_ARGS(rreq, creq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, rreq)
|
||||
__field(unsigned int, creq)
|
||||
__field(unsigned int, cookie)
|
||||
__field(unsigned int, ino)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
struct netfs_inode *__ctx = netfs_inode(rreq->inode);
|
||||
struct fscache_cookie *__cookie = netfs_i_cookie(__ctx);
|
||||
__entry->rreq = rreq->debug_id;
|
||||
__entry->creq = creq->debug_id;
|
||||
__entry->cookie = __cookie ? __cookie->debug_id : 0;
|
||||
__entry->ino = rreq->inode->i_ino;
|
||||
),
|
||||
|
||||
TP_printk("R=%08x CR=%08x c=%08x i=%x ",
|
||||
__entry->rreq,
|
||||
__entry->creq,
|
||||
__entry->cookie,
|
||||
__entry->ino)
|
||||
);
|
||||
|
||||
TRACE_EVENT(netfs_collect,
|
||||
TP_PROTO(const struct netfs_io_request *wreq),
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user