iomap: build the writeback code without CONFIG_BLOCK

Allow fuse to use the iomap writeback code even when CONFIG_BLOCK is
not enabled.  Do this with an ifdef instead of a separate file to keep
the iomap_folio_state local to buffered-io.c.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/20250710133343.399917-15-hch@lst.de
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Christoph Hellwig 2025-07-10 15:33:38 +02:00 committed by Christian Brauner
parent c5690dd019
commit 5699b7e21d
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
2 changed files with 64 additions and 55 deletions

View File

@ -9,9 +9,9 @@ ccflags-y += -I $(src) # needed for trace events
obj-$(CONFIG_FS_IOMAP) += iomap.o
iomap-y += trace.o \
iter.o
iomap-$(CONFIG_BLOCK) += buffered-io.o \
direct-io.o \
iter.o \
buffered-io.o
iomap-$(CONFIG_BLOCK) += direct-io.o \
ioend.o \
fiemap.o \
seek.o

View File

@ -275,6 +275,46 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
*lenp = plen;
}
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
loff_t pos)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
return srcmap->type != IOMAP_MAPPED ||
(srcmap->flags & IOMAP_F_NEW) ||
pos >= i_size_read(iter->inode);
}
/**
* iomap_read_inline_data - copy inline data into the page cache
* @iter: iteration structure
* @folio: folio to copy to
*
* Copy the inline data in @iter into @folio and zero out the rest of the folio.
* Only a single IOMAP_INLINE extent is allowed at the end of each file.
* Returns zero for success to complete the read, or the usual negative errno.
*/
static int iomap_read_inline_data(const struct iomap_iter *iter,
struct folio *folio)
{
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
size_t offset = offset_in_folio(folio, iomap->offset);
if (folio_test_uptodate(folio))
return 0;
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
if (offset > 0)
ifs_alloc(iter->inode, folio, iter->flags);
folio_fill_tail(folio, offset, iomap->inline_data, size);
iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
return 0;
}
#ifdef CONFIG_BLOCK
static void iomap_finish_folio_read(struct folio *folio, size_t off,
size_t len, int error)
{
@ -314,45 +354,6 @@ struct iomap_readpage_ctx {
struct readahead_control *rac;
};
/**
* iomap_read_inline_data - copy inline data into the page cache
* @iter: iteration structure
* @folio: folio to copy to
*
* Copy the inline data in @iter into @folio and zero out the rest of the folio.
* Only a single IOMAP_INLINE extent is allowed at the end of each file.
* Returns zero for success to complete the read, or the usual negative errno.
*/
static int iomap_read_inline_data(const struct iomap_iter *iter,
struct folio *folio)
{
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
size_t offset = offset_in_folio(folio, iomap->offset);
if (folio_test_uptodate(folio))
return 0;
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
if (offset > 0)
ifs_alloc(iter->inode, folio, iter->flags);
folio_fill_tail(folio, offset, iomap->inline_data, size);
iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
return 0;
}
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
loff_t pos)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
return srcmap->type != IOMAP_MAPPED ||
(srcmap->flags & IOMAP_F_NEW) ||
pos >= i_size_read(iter->inode);
}
static int iomap_readpage_iter(struct iomap_iter *iter,
struct iomap_readpage_ctx *ctx)
{
@ -545,6 +546,27 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_readahead);
static int iomap_read_folio_range(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct bio_vec bvec;
struct bio bio;
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
return submit_bio_wait(&bio);
}
#else
static int iomap_read_folio_range(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
WARN_ON_ONCE(1);
return -EIO;
}
#endif /* CONFIG_BLOCK */
/*
* iomap_is_partially_uptodate checks whether blocks within a folio are
* uptodate or not.
@ -658,19 +680,6 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
pos + len - 1);
}
static int iomap_read_folio_range(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t len)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
struct bio_vec bvec;
struct bio bio;
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
return submit_bio_wait(&bio);
}
static int __iomap_write_begin(const struct iomap_iter *iter,
const struct iomap_write_ops *write_ops, size_t len,
struct folio *folio)