mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2026-01-27 12:47:24 +01:00
blk-mq: fix potential deadlock while nr_requests grown
[ Upstream commitb86433721f] Allocate and free sched_tags while queue is freezed can deadlock[1], this is a long term problem, hence allocate memory before freezing queue and free memory after queue is unfreezed. [1] https://lore.kernel.org/all/0659ea8d-a463-47c8-9180-43c719e106eb@linux.ibm.com/ Fixes:e3a2b3f931("blk-mq: allow changing of queue depth through sysfs") Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Nilay Shroff <nilay@linux.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
b75c7a8020
commit
8d26acf847
|
|
@ -4925,11 +4925,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_free_tag_set);
|
EXPORT_SYMBOL(blk_mq_free_tag_set);
|
||||||
|
|
||||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
|
||||||
|
struct elevator_tags *et,
|
||||||
|
unsigned int nr)
|
||||||
{
|
{
|
||||||
struct blk_mq_tag_set *set = q->tag_set;
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
struct elevator_tags *old_et = NULL;
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int ret = 0;
|
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
blk_mq_quiesce_queue(q);
|
blk_mq_quiesce_queue(q);
|
||||||
|
|
@ -4964,24 +4966,18 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Non-shared sched tags, and tags grow */
|
/* Non-shared sched tags, and tags grow */
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
if (!hctx->sched_tags)
|
hctx->sched_tags = et->tags[i];
|
||||||
continue;
|
old_et = q->elevator->et;
|
||||||
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
|
q->elevator->et = et;
|
||||||
nr);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
q->nr_requests = nr;
|
q->nr_requests = nr;
|
||||||
if (q->elevator && q->elevator->type->ops.depth_updated)
|
if (q->elevator && q->elevator->type->ops.depth_updated)
|
||||||
q->elevator->type->ops.depth_updated(q);
|
q->elevator->type->ops.depth_updated(q);
|
||||||
|
|
||||||
out:
|
|
||||||
blk_mq_unquiesce_queue(q);
|
blk_mq_unquiesce_queue(q);
|
||||||
|
return old_et;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@
|
||||||
#include "blk-stat.h"
|
#include "blk-stat.h"
|
||||||
|
|
||||||
struct blk_mq_tag_set;
|
struct blk_mq_tag_set;
|
||||||
|
struct elevator_tags;
|
||||||
|
|
||||||
struct blk_mq_ctxs {
|
struct blk_mq_ctxs {
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
|
|
@ -45,7 +46,9 @@ void blk_mq_submit_bio(struct bio *bio);
|
||||||
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
|
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
void blk_mq_exit_queue(struct request_queue *q);
|
void blk_mq_exit_queue(struct request_queue *q);
|
||||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
|
||||||
|
struct elevator_tags *tags,
|
||||||
|
unsigned int nr);
|
||||||
void blk_mq_wake_waiters(struct request_queue *q);
|
void blk_mq_wake_waiters(struct request_queue *q);
|
||||||
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
|
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
|
||||||
bool);
|
bool);
|
||||||
|
|
|
||||||
|
|
@ -64,11 +64,12 @@ static ssize_t queue_requests_show(struct gendisk *disk, char *page)
|
||||||
static ssize_t
|
static ssize_t
|
||||||
queue_requests_store(struct gendisk *disk, const char *page, size_t count)
|
queue_requests_store(struct gendisk *disk, const char *page, size_t count)
|
||||||
{
|
{
|
||||||
unsigned long nr;
|
|
||||||
int ret, err;
|
|
||||||
unsigned int memflags;
|
|
||||||
struct request_queue *q = disk->queue;
|
struct request_queue *q = disk->queue;
|
||||||
struct blk_mq_tag_set *set = q->tag_set;
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
struct elevator_tags *et = NULL;
|
||||||
|
unsigned int memflags;
|
||||||
|
unsigned long nr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!queue_is_mq(q))
|
if (!queue_is_mq(q))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
@ -102,16 +103,28 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!blk_mq_is_shared_tags(set->flags) && q->elevator &&
|
||||||
|
nr > q->elevator->et->nr_requests) {
|
||||||
|
/*
|
||||||
|
* Tags will grow, allocate memory before freezing queue to
|
||||||
|
* prevent deadlock.
|
||||||
|
*/
|
||||||
|
et = blk_mq_alloc_sched_tags(set, q->nr_hw_queues, nr);
|
||||||
|
if (!et) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
memflags = blk_mq_freeze_queue(q);
|
memflags = blk_mq_freeze_queue(q);
|
||||||
mutex_lock(&q->elevator_lock);
|
mutex_lock(&q->elevator_lock);
|
||||||
|
et = blk_mq_update_nr_requests(q, et, nr);
|
||||||
err = blk_mq_update_nr_requests(disk->queue, nr);
|
|
||||||
if (err)
|
|
||||||
ret = err;
|
|
||||||
|
|
||||||
mutex_unlock(&q->elevator_lock);
|
mutex_unlock(&q->elevator_lock);
|
||||||
blk_mq_unfreeze_queue(q, memflags);
|
blk_mq_unfreeze_queue(q, memflags);
|
||||||
|
|
||||||
|
if (et)
|
||||||
|
blk_mq_free_sched_tags(et, set);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
up_write(&set->update_nr_hwq_lock);
|
up_write(&set->update_nr_hwq_lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user