mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 15:03:53 +02:00
blk-mq: move the DMA mapping code to a separate file
While working on the new DMA API I kept getting annoyed how it was placed right in the middle of the bio splitting code in blk-merge.c. Split it out into a separate file. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20250513071433.836797-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7ee4fa04a8
commit
b0a4158554
|
@ -5,8 +5,8 @@
|
|||
|
||||
obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
|
||||
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
|
||||
blk-merge.o blk-timeout.o \
|
||||
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
|
||||
blk-merge.o blk-timeout.o blk-lib.o blk-mq.o \
|
||||
blk-mq-tag.o blk-mq-dma.o blk-stat.o \
|
||||
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
|
||||
genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \
|
||||
disk-events.o blk-ia-ranges.o early-lookup.o
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
#include <linux/bio.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-integrity.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/part_stat.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
|
||||
|
@ -225,27 +224,6 @@ static inline unsigned get_max_io_size(struct bio *bio,
|
|||
return max_sectors & ~(lbs - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_max_segment_size() - maximum number of bytes to add as a single segment
|
||||
* @lim: Request queue limits.
|
||||
* @paddr: address of the range to add
|
||||
* @len: maximum length available to add at @paddr
|
||||
*
|
||||
* Returns the maximum number of bytes of the range starting at @paddr that can
|
||||
* be added to a single segment.
|
||||
*/
|
||||
static inline unsigned get_max_segment_size(const struct queue_limits *lim,
|
||||
phys_addr_t paddr, unsigned int len)
|
||||
{
|
||||
/*
|
||||
* Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
|
||||
* after having calculated the minimum.
|
||||
*/
|
||||
return min_t(unsigned long, len,
|
||||
min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
|
||||
(unsigned long)lim->max_segment_size - 1) + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* bvec_split_segs - verify whether or not a bvec should be split in the middle
|
||||
* @lim: [in] queue limits to split based on
|
||||
|
@ -473,117 +451,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
|
|||
return nr_phys_segs;
|
||||
}
|
||||
|
||||
struct phys_vec {
|
||||
phys_addr_t paddr;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
static bool blk_map_iter_next(struct request *req,
|
||||
struct req_iterator *iter, struct phys_vec *vec)
|
||||
{
|
||||
unsigned int max_size;
|
||||
struct bio_vec bv;
|
||||
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
if (!iter->bio)
|
||||
return false;
|
||||
vec->paddr = bvec_phys(&req->special_vec);
|
||||
vec->len = req->special_vec.bv_len;
|
||||
iter->bio = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!iter->iter.bi_size)
|
||||
return false;
|
||||
|
||||
bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
|
||||
vec->paddr = bvec_phys(&bv);
|
||||
max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
|
||||
bv.bv_len = min(bv.bv_len, max_size);
|
||||
bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
|
||||
|
||||
/*
|
||||
* If we are entirely done with this bi_io_vec entry, check if the next
|
||||
* one could be merged into it. This typically happens when moving to
|
||||
* the next bio, but some callers also don't pack bvecs tight.
|
||||
*/
|
||||
while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
|
||||
struct bio_vec next;
|
||||
|
||||
if (!iter->iter.bi_size) {
|
||||
if (!iter->bio->bi_next)
|
||||
break;
|
||||
iter->bio = iter->bio->bi_next;
|
||||
iter->iter = iter->bio->bi_iter;
|
||||
}
|
||||
|
||||
next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
|
||||
if (bv.bv_len + next.bv_len > max_size ||
|
||||
!biovec_phys_mergeable(req->q, &bv, &next))
|
||||
break;
|
||||
|
||||
bv.bv_len += next.bv_len;
|
||||
bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
|
||||
}
|
||||
|
||||
vec->len = bv.bv_len;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
|
||||
struct scatterlist *sglist)
|
||||
{
|
||||
if (!*sg)
|
||||
return sglist;
|
||||
|
||||
/*
|
||||
* If the driver previously mapped a shorter list, we could see a
|
||||
* termination bit prematurely unless it fully inits the sg table
|
||||
* on each mapping. We KNOW that there must be more entries here
|
||||
* or the driver would be buggy, so force clear the termination bit
|
||||
* to avoid doing a full sg_init_table() in drivers for each command.
|
||||
*/
|
||||
sg_unmark_end(*sg);
|
||||
return sg_next(*sg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a request to scatterlist, return number of sg entries setup. Caller
|
||||
* must make sure sg can hold rq->nr_phys_segments entries.
|
||||
*/
|
||||
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
|
||||
struct scatterlist **last_sg)
|
||||
{
|
||||
struct req_iterator iter = {
|
||||
.bio = rq->bio,
|
||||
};
|
||||
struct phys_vec vec;
|
||||
int nsegs = 0;
|
||||
|
||||
/* the internal flush request may not have bio attached */
|
||||
if (iter.bio)
|
||||
iter.iter = iter.bio->bi_iter;
|
||||
|
||||
while (blk_map_iter_next(rq, &iter, &vec)) {
|
||||
*last_sg = blk_next_sg(last_sg, sglist);
|
||||
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
|
||||
offset_in_page(vec.paddr));
|
||||
nsegs++;
|
||||
}
|
||||
|
||||
if (*last_sg)
|
||||
sg_mark_end(*last_sg);
|
||||
|
||||
/*
|
||||
* Something must have been wrong if the figured number of
|
||||
* segment is bigger than number of req's physical segments
|
||||
*/
|
||||
WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
EXPORT_SYMBOL(__blk_rq_map_sg);
|
||||
|
||||
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
|
||||
sector_t offset)
|
||||
{
|
||||
|
|
113
block/blk-mq-dma.c
Normal file
113
block/blk-mq-dma.c
Normal file
|
@ -0,0 +1,113 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include "blk.h"
|
||||
|
||||
struct phys_vec {
|
||||
phys_addr_t paddr;
|
||||
u32 len;
|
||||
};
|
||||
|
||||
static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
|
||||
struct phys_vec *vec)
|
||||
{
|
||||
unsigned int max_size;
|
||||
struct bio_vec bv;
|
||||
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
if (!iter->bio)
|
||||
return false;
|
||||
vec->paddr = bvec_phys(&req->special_vec);
|
||||
vec->len = req->special_vec.bv_len;
|
||||
iter->bio = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!iter->iter.bi_size)
|
||||
return false;
|
||||
|
||||
bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
|
||||
vec->paddr = bvec_phys(&bv);
|
||||
max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
|
||||
bv.bv_len = min(bv.bv_len, max_size);
|
||||
bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
|
||||
|
||||
/*
|
||||
* If we are entirely done with this bi_io_vec entry, check if the next
|
||||
* one could be merged into it. This typically happens when moving to
|
||||
* the next bio, but some callers also don't pack bvecs tight.
|
||||
*/
|
||||
while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
|
||||
struct bio_vec next;
|
||||
|
||||
if (!iter->iter.bi_size) {
|
||||
if (!iter->bio->bi_next)
|
||||
break;
|
||||
iter->bio = iter->bio->bi_next;
|
||||
iter->iter = iter->bio->bi_iter;
|
||||
}
|
||||
|
||||
next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
|
||||
if (bv.bv_len + next.bv_len > max_size ||
|
||||
!biovec_phys_mergeable(req->q, &bv, &next))
|
||||
break;
|
||||
|
||||
bv.bv_len += next.bv_len;
|
||||
bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
|
||||
}
|
||||
|
||||
vec->len = bv.bv_len;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct scatterlist *
|
||||
blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
|
||||
{
|
||||
if (!*sg)
|
||||
return sglist;
|
||||
|
||||
/*
|
||||
* If the driver previously mapped a shorter list, we could see a
|
||||
* termination bit prematurely unless it fully inits the sg table
|
||||
* on each mapping. We KNOW that there must be more entries here
|
||||
* or the driver would be buggy, so force clear the termination bit
|
||||
* to avoid doing a full sg_init_table() in drivers for each command.
|
||||
*/
|
||||
sg_unmark_end(*sg);
|
||||
return sg_next(*sg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a request to scatterlist, return number of sg entries setup. Caller
|
||||
* must make sure sg can hold rq->nr_phys_segments entries.
|
||||
*/
|
||||
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
|
||||
struct scatterlist **last_sg)
|
||||
{
|
||||
struct req_iterator iter = {
|
||||
.bio = rq->bio,
|
||||
};
|
||||
struct phys_vec vec;
|
||||
int nsegs = 0;
|
||||
|
||||
/* the internal flush request may not have bio attached */
|
||||
if (iter.bio)
|
||||
iter.iter = iter.bio->bi_iter;
|
||||
|
||||
while (blk_map_iter_next(rq, &iter, &vec)) {
|
||||
*last_sg = blk_next_sg(last_sg, sglist);
|
||||
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
|
||||
offset_in_page(vec.paddr));
|
||||
nsegs++;
|
||||
}
|
||||
|
||||
if (*last_sg)
|
||||
sg_mark_end(*last_sg);
|
||||
|
||||
/*
|
||||
* Something must have been wrong if the figured number of
|
||||
* segment is bigger than number of req's physical segments
|
||||
*/
|
||||
WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
|
||||
|
||||
return nsegs;
|
||||
}
|
||||
EXPORT_SYMBOL(__blk_rq_map_sg);
|
21
block/blk.h
21
block/blk.h
|
@ -404,6 +404,27 @@ static inline struct bio *__bio_split_to_limits(struct bio *bio,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get_max_segment_size() - maximum number of bytes to add as a single segment
|
||||
* @lim: Request queue limits.
|
||||
* @paddr: address of the range to add
|
||||
* @len: maximum length available to add at @paddr
|
||||
*
|
||||
* Returns the maximum number of bytes of the range starting at @paddr that can
|
||||
* be added to a single segment.
|
||||
*/
|
||||
static inline unsigned get_max_segment_size(const struct queue_limits *lim,
|
||||
phys_addr_t paddr, unsigned int len)
|
||||
{
|
||||
/*
|
||||
* Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
|
||||
* after having calculated the minimum.
|
||||
*/
|
||||
return min_t(unsigned long, len,
|
||||
min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
|
||||
(unsigned long)lim->max_segment_size - 1) + 1);
|
||||
}
|
||||
|
||||
int ll_back_merge_fn(struct request *req, struct bio *bio,
|
||||
unsigned int nr_segs);
|
||||
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
|
|
Loading…
Reference in New Issue
Block a user