Merge tag 'md-6.16-20250513' of https://git.kernel.org/pub/scm/linux/kernel/git/mdraid/linux into for-6.16/block

Pull MD changes from Yu Kuai:

- Fix that normal IO can be starved by sync IO, found by mkfs on newly
  created large raid5, with some clean up patches for bdev inflight
  counters.

* tag 'md-6.16-20250513' of https://git.kernel.org/pub/scm/linux/kernel/git/mdraid/linux:
  md: clean up accounting for issued sync IO
  md: fix is_mddev_idle()
  md: add a new api sync_io_depth
  md: record dm-raid gendisk in mddev
  block: export API to get the number of bdev inflight IO
  block: clean up blk_mq_in_flight_rw()
  block: WARN if bdev inflight counter is negative
  block: reuse part_in_flight_rw for part_in_flight
  blk-mq: remove blk_mq_in_flight()
This commit is contained in:
Jens Axboe 2025-05-13 07:10:52 -06:00
commit cf724e5e41
13 changed files with 197 additions and 148 deletions

View File

@ -1018,7 +1018,7 @@ again:
stamp = READ_ONCE(part->bd_stamp);
if (unlikely(time_after(now, stamp)) &&
likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
(end || part_in_flight(part)))
(end || bdev_count_inflight(part)))
__part_stat_add(part, io_ticks, now - stamp);
if (bdev_is_partition(part)) {

View File

@ -89,7 +89,7 @@ struct mq_inflight {
unsigned int inflight[2];
};
static bool blk_mq_check_inflight(struct request *rq, void *priv)
static bool blk_mq_check_in_driver(struct request *rq, void *priv)
{
struct mq_inflight *mi = priv;
@ -101,24 +101,14 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv)
return true;
}
unsigned int blk_mq_in_flight(struct request_queue *q,
struct block_device *part)
void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2])
{
struct mq_inflight mi = { .part = part };
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
return mi.inflight[0] + mi.inflight[1];
}
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
unsigned int inflight[2])
{
struct mq_inflight mi = { .part = part };
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
inflight[0] = mi.inflight[0];
inflight[1] = mi.inflight[1];
blk_mq_queue_tag_busy_iter(bdev_get_queue(part), blk_mq_check_in_driver,
&mi);
inflight[READ] = mi.inflight[READ];
inflight[WRITE] = mi.inflight[WRITE];
}
#ifdef CONFIG_LOCKDEP

View File

@ -246,10 +246,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
return hctx->nr_ctx && hctx->tags;
}
unsigned int blk_mq_in_flight(struct request_queue *q,
struct block_device *part);
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
unsigned int inflight[2]);
void blk_mq_in_driver_rw(struct block_device *part, unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct request_queue *q,
int budget_token)

View File

@ -419,7 +419,6 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi,
int blk_dev_init(void);
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
unsigned int part_in_flight(struct block_device *part);
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{

View File

@ -125,38 +125,47 @@ static void part_stat_read_all(struct block_device *part,
}
}
unsigned int part_in_flight(struct block_device *part)
{
unsigned int inflight = 0;
int cpu;
for_each_possible_cpu(cpu) {
inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
part_stat_local_read_cpu(part, in_flight[1], cpu);
}
if ((int)inflight < 0)
inflight = 0;
return inflight;
}
static void part_in_flight_rw(struct block_device *part,
unsigned int inflight[2])
static void bdev_count_inflight_rw(struct block_device *part,
unsigned int inflight[2], bool mq_driver)
{
int cpu;
inflight[0] = 0;
inflight[1] = 0;
for_each_possible_cpu(cpu) {
inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
if (mq_driver) {
blk_mq_in_driver_rw(part, inflight);
} else {
for_each_possible_cpu(cpu) {
inflight[READ] += part_stat_local_read_cpu(
part, in_flight[READ], cpu);
inflight[WRITE] += part_stat_local_read_cpu(
part, in_flight[WRITE], cpu);
}
}
if ((int)inflight[0] < 0)
inflight[0] = 0;
if ((int)inflight[1] < 0)
inflight[1] = 0;
if (WARN_ON_ONCE((int)inflight[READ] < 0))
inflight[READ] = 0;
if (WARN_ON_ONCE((int)inflight[WRITE] < 0))
inflight[WRITE] = 0;
}
/**
* bdev_count_inflight - get the number of inflight IOs for a block device.
*
* @part: the block device.
*
* Inflight here means started IO accounting, from bdev_start_io_acct() for
* bio-based block device, and from blk_account_io_start() for rq-based block
* device.
*/
unsigned int bdev_count_inflight(struct block_device *part)
{
unsigned int inflight[2] = {0};
bdev_count_inflight_rw(part, inflight, false);
return inflight[READ] + inflight[WRITE];
}
EXPORT_SYMBOL_GPL(bdev_count_inflight);
/*
* Can be deleted altogether. Later.
*
@ -1053,7 +1062,7 @@ ssize_t part_stat_show(struct device *dev,
struct disk_stats stat;
unsigned int inflight;
inflight = part_in_flight(bdev);
inflight = bdev_count_inflight(bdev);
if (inflight) {
part_stat_lock();
update_io_ticks(bdev, jiffies, true);
@ -1090,19 +1099,21 @@ ssize_t part_stat_show(struct device *dev,
(unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
}
/*
* Show the number of IOs issued to driver.
* For bio-based device, started from bdev_start_io_acct();
* For rq-based device, started from blk_mq_start_request();
*/
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct block_device *bdev = dev_to_bdev(dev);
struct request_queue *q = bdev_get_queue(bdev);
unsigned int inflight[2];
unsigned int inflight[2] = {0};
if (queue_is_mq(q))
blk_mq_in_flight_rw(q, bdev, inflight);
else
part_in_flight_rw(bdev, inflight);
bdev_count_inflight_rw(bdev, inflight, queue_is_mq(q));
return sysfs_emit(buf, "%8u %8u\n", inflight[0], inflight[1]);
return sysfs_emit(buf, "%8u %8u\n", inflight[READ], inflight[WRITE]);
}
static ssize_t disk_capability_show(struct device *dev,
@ -1355,7 +1366,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
continue;
inflight = part_in_flight(hd);
inflight = bdev_count_inflight(hd);
if (inflight) {
part_stat_lock();
update_io_ticks(hd, jiffies, true);

View File

@ -14,6 +14,7 @@
#include "raid5.h"
#include "raid10.h"
#include "md-bitmap.h"
#include "dm-core.h"
#include <linux/device-mapper.h>
@ -3308,6 +3309,7 @@ size_check:
/* Disable/enable discard support on raid set. */
configure_discard_support(rs);
rs->md.dm_gendisk = ti->table->md->disk;
mddev_unlock(&rs->md);
return 0;
@ -3327,6 +3329,7 @@ static void raid_dtr(struct dm_target *ti)
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
rs->md.dm_gendisk = NULL;
mddev_unlock(&rs->md);
if (work_pending(&rs->md.event_work))

View File

@ -111,32 +111,48 @@ static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
/* Default safemode delay: 200 msec */
#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
/*
* Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
* is 1000 KB/sec, so the extra system load does not show up that much.
* Increase it if you want to have more _guaranteed_ speed. Note that
* the RAID driver will use the maximum available bandwidth if the IO
* subsystem is idle. There is also an 'absolute maximum' reconstruction
* speed limit - in case reconstruction slows down your system despite
* idle IO detection.
* Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit'
* is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load
* does not show up that much. Increase it if you want to have more guaranteed
* speed. Note that the RAID driver will use the maximum bandwidth
* sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle.
*
* you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
* or /sys/block/mdX/md/sync_speed_{min,max}
* Background sync IO speed control:
*
* - below speed min:
* no limit;
* - above speed min and below speed max:
* a) if mddev is idle, then no limit;
* b) if mddev is busy handling normal IO, then limit inflight sync IO
* to sync_io_depth;
* - above speed max:
* sync IO can't be issued;
*
* Following configurations can be changed via /proc/sys/dev/raid/ for system
* or /sys/block/mdX/md/ for one array.
*/
static int sysctl_speed_limit_min = 1000;
static int sysctl_speed_limit_max = 200000;
static inline int speed_min(struct mddev *mddev)
static int sysctl_sync_io_depth = 32;
static int speed_min(struct mddev *mddev)
{
return mddev->sync_speed_min ?
mddev->sync_speed_min : sysctl_speed_limit_min;
}
static inline int speed_max(struct mddev *mddev)
static int speed_max(struct mddev *mddev)
{
return mddev->sync_speed_max ?
mddev->sync_speed_max : sysctl_speed_limit_max;
}
static int sync_io_depth(struct mddev *mddev)
{
return mddev->sync_io_depth ?
mddev->sync_io_depth : sysctl_sync_io_depth;
}
static void rdev_uninit_serial(struct md_rdev *rdev)
{
if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
@ -293,14 +309,21 @@ static const struct ctl_table raid_table[] = {
.procname = "speed_limit_min",
.data = &sysctl_speed_limit_min,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "speed_limit_max",
.data = &sysctl_speed_limit_max,
.maxlen = sizeof(int),
.mode = S_IRUGO|S_IWUSR,
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "sync_io_depth",
.data = &sysctl_sync_io_depth,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
};
@ -5091,7 +5114,7 @@ static ssize_t
sync_min_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_min(mddev),
mddev->sync_speed_min ? "local": "system");
mddev->sync_speed_min ? "local" : "system");
}
static ssize_t
@ -5100,7 +5123,7 @@ sync_min_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int min;
int rv;
if (strncmp(buf, "system", 6)==0) {
if (strncmp(buf, "system", 6) == 0) {
min = 0;
} else {
rv = kstrtouint(buf, 10, &min);
@ -5120,7 +5143,7 @@ static ssize_t
sync_max_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", speed_max(mddev),
mddev->sync_speed_max ? "local": "system");
mddev->sync_speed_max ? "local" : "system");
}
static ssize_t
@ -5129,7 +5152,7 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len)
unsigned int max;
int rv;
if (strncmp(buf, "system", 6)==0) {
if (strncmp(buf, "system", 6) == 0) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
@ -5145,6 +5168,35 @@ sync_max_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_sync_max =
__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
static ssize_t
sync_io_depth_show(struct mddev *mddev, char *page)
{
return sprintf(page, "%d (%s)\n", sync_io_depth(mddev),
mddev->sync_io_depth ? "local" : "system");
}
static ssize_t
sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int max;
int rv;
if (strncmp(buf, "system", 6) == 0) {
max = 0;
} else {
rv = kstrtouint(buf, 10, &max);
if (rv < 0)
return rv;
if (max == 0)
return -EINVAL;
}
mddev->sync_io_depth = max;
return len;
}
static struct md_sysfs_entry md_sync_io_depth =
__ATTR_RW(sync_io_depth);
static ssize_t
degraded_show(struct mddev *mddev, char *page)
{
@ -5671,6 +5723,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_mismatches.attr,
&md_sync_min.attr,
&md_sync_max.attr,
&md_sync_io_depth.attr,
&md_sync_speed.attr,
&md_sync_force_parallel.attr,
&md_sync_completed.attr,
@ -8572,50 +8625,55 @@ void md_cluster_stop(struct mddev *mddev)
put_cluster_ops(mddev);
}
static int is_mddev_idle(struct mddev *mddev, int init)
static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init)
{
unsigned long last_events = rdev->last_events;
if (!bdev_is_partition(rdev->bdev))
return true;
/*
* If rdev is partition, and user doesn't issue IO to the array, the
* array is still not idle if user issues IO to other partitions.
*/
rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0,
sectors) -
part_stat_read_accum(rdev->bdev, sectors);
return init || rdev->last_events <= last_events;
}
/*
* mddev is idle if following conditions are matched since last check:
* 1) mddev doesn't have normal IO completed;
* 2) mddev doesn't have inflight normal IO;
* 3) if any member disk is partition, and other partitions don't have IO
* completed;
*
* Noted this checking rely on IO accounting is enabled.
*/
static bool is_mddev_idle(struct mddev *mddev, int init)
{
unsigned long last_events = mddev->normal_io_events;
struct gendisk *disk;
struct md_rdev *rdev;
int idle;
int curr_events;
bool idle = true;
disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk;
if (!disk)
return true;
mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors);
if (!init && (mddev->normal_io_events > last_events ||
bdev_count_inflight(disk->part0)))
idle = false;
idle = 1;
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk;
if (!init && !blk_queue_io_stat(disk->queue))
continue;
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and
* disk_stats is counted when it completes.
* So resync activity will cause curr_events to be smaller than
* when there was no such activity.
* non-sync IO will cause disk_stat to increase without
* increasing sync_io so curr_events will (eventually)
* be larger than it was before. Once it becomes
* substantially larger, the test below will cause
* the array to appear non-idle, and resync will slow
* down.
* If there is a lot of outstanding resync activity when
* we set last_event to curr_events, then all that activity
* completing might cause the array to appear non-idle
* and resync will be slowed down even though there might
* not have been non-resync activity. This will only
* happen once though. 'last_events' will soon reflect
* the state where there is little or no outstanding
* resync requests, and further resync activity will
* always make curr_events less than last_events.
*
*/
if (init || curr_events - rdev->last_events > 64) {
rdev->last_events = curr_events;
idle = 0;
}
}
rdev_for_each_rcu(rdev, mddev)
if (!is_rdev_holder_idle(rdev, init))
idle = false;
rcu_read_unlock();
return idle;
}
@ -8927,6 +8985,23 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
}
}
static bool sync_io_within_limit(struct mddev *mddev)
{
int io_sectors;
/*
* For raid456, sync IO is stripe(4k) per IO, for other levels, it's
* RESYNC_PAGES(64k) per IO.
*/
if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6)
io_sectors = 8;
else
io_sectors = 128;
return atomic_read(&mddev->recovery_active) <
io_sectors * sync_io_depth(mddev);
}
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
#define UPDATE_FREQUENCY (5*60*HZ)
@ -9195,7 +9270,8 @@ void md_do_sync(struct md_thread *thread)
msleep(500);
goto repeat;
}
if (!is_mddev_idle(mddev, 0)) {
if (!sync_io_within_limit(mddev) &&
!is_mddev_idle(mddev, 0)) {
/*
* Give other IO more of a chance.
* The faster the devices, the less we wait.

View File

@ -132,7 +132,7 @@ struct md_rdev {
sector_t sectors; /* Device size (in 512bytes sectors) */
struct mddev *mddev; /* RAID array if running */
int last_events; /* IO event timestamp */
unsigned long last_events; /* IO event timestamp */
/*
* If meta_bdev is non-NULL, it means that a separate device is
@ -404,7 +404,8 @@ struct mddev {
* are happening, so run/
* takeover/stop are not safe
*/
struct gendisk *gendisk;
struct gendisk *gendisk; /* mdraid gendisk */
struct gendisk *dm_gendisk; /* dm-raid gendisk */
struct kobject kobj;
int hold_active;
@ -483,6 +484,7 @@ struct mddev {
/* if zero, use the system-wide default */
int sync_speed_min;
int sync_speed_max;
int sync_io_depth;
/* resync even though the same disks are shared among md-devices */
int parallel_resync;
@ -518,6 +520,7 @@ struct mddev {
* adding a spare
*/
unsigned long normal_io_events; /* IO event timestamp */
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
sector_t recovery_cp;
@ -714,17 +717,6 @@ static inline int mddev_trylock(struct mddev *mddev)
}
extern void mddev_unlock(struct mddev *mddev);
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
if (blk_queue_io_stat(bdev->bd_disk->queue))
atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
}
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
md_sync_acct(bio->bi_bdev, nr_sectors);
}
struct md_personality
{
struct md_submodule_head head;

View File

@ -2382,7 +2382,6 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
submit_bio_noacct(wbio);
}
@ -3055,7 +3054,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
read_targets--;
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
submit_bio_noacct(bio);
@ -3064,7 +3062,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else {
atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST;
submit_bio_noacct(bio);

View File

@ -2426,7 +2426,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST;
@ -2448,8 +2447,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_copy_data(tbio, fbio);
d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(tbio));
submit_bio_noacct(tbio);
}
@ -2583,13 +2580,10 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[1].devnum;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
submit_bio_noacct(wbio);
}
if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
submit_bio_noacct(wbio2);
}
}
@ -3757,7 +3751,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors;
if (bio->bi_end_io == end_sync_read) {
md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0;
submit_bio_noacct(bio);
}
@ -4880,7 +4873,6 @@ read_more:
r10_bio->sectors = nr_sectors;
/* Now submit the read */
md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
submit_bio_noacct(read_bio);
@ -4940,7 +4932,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
continue;
atomic_inc(&rdev->nr_pending);
md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining);
b->bi_next = NULL;
submit_bio_noacct(b);

View File

@ -1240,10 +1240,6 @@ again:
}
if (rdev) {
if (s->syncing || s->expanding || s->expanded
|| s->replacing)
md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
set_bit(STRIPE_IO_STARTED, &sh->state);
bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
@ -1300,10 +1296,6 @@ again:
submit_bio_noacct(bi);
}
if (rrdev) {
if (s->syncing || s->expanding || s->expanded
|| s->replacing)
md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf));
set_bit(STRIPE_IO_STARTED, &sh->state);
bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);

View File

@ -182,7 +182,6 @@ struct gendisk {
struct list_head slave_bdevs;
#endif
struct timer_rand_state *random;
atomic_t sync_io; /* RAID */
struct disk_events *ev;
#ifdef CONFIG_BLK_DEV_ZONED

View File

@ -79,4 +79,6 @@ static inline void part_stat_set_all(struct block_device *part, int value)
#define part_stat_local_read_cpu(part, field, cpu) \
local_read(&(part_stat_get_cpu(part, field, cpu)))
unsigned int bdev_count_inflight(struct block_device *part);
#endif /* _LINUX_PART_STAT_H */