mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
io_uring/io-wq: inherit cpuset of cgroup in io worker
commit84eacf177f
upstream. The io worker threads are userland threads that just never exit to the userland. By that, they are also assigned to a cgroup (the group of the creating task). When creating a new io worker, this worker should inherit the cpuset of the cgroup. Fixes:da64d6db3b
("io_uring: One wqe per wq") Signed-off-by: Felix Moessbauer <felix.moessbauer@siemens.com> Link: https://lore.kernel.org/r/20240910171157.166423-3-felix.moessbauer@siemens.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
fce514611f
commit
657ca82526
|
@ -1157,6 +1157,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||||
{
|
{
|
||||||
int ret, node, i;
|
int ret, node, i;
|
||||||
struct io_wq *wq;
|
struct io_wq *wq;
|
||||||
|
cpumask_var_t allowed_mask;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!data->free_work || !data->do_work))
|
if (WARN_ON_ONCE(!data->free_work || !data->do_work))
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
@ -1176,6 +1177,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||||
wq->do_work = data->do_work;
|
wq->do_work = data->do_work;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL))
|
||||||
|
goto err;
|
||||||
|
cpuset_cpus_allowed(current, allowed_mask);
|
||||||
for_each_node(node) {
|
for_each_node(node) {
|
||||||
struct io_wqe *wqe;
|
struct io_wqe *wqe;
|
||||||
int alloc_node = node;
|
int alloc_node = node;
|
||||||
|
@ -1188,7 +1192,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
||||||
wq->wqes[node] = wqe;
|
wq->wqes[node] = wqe;
|
||||||
if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
|
if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
|
||||||
goto err;
|
goto err;
|
||||||
cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
|
if (!cpumask_and(wqe->cpu_mask, cpumask_of_node(node), allowed_mask))
|
||||||
|
cpumask_copy(wqe->cpu_mask, allowed_mask);
|
||||||
wqe->node = alloc_node;
|
wqe->node = alloc_node;
|
||||||
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
|
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
|
||||||
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
|
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
|
||||||
|
@ -1222,6 +1227,7 @@ err:
|
||||||
free_cpumask_var(wq->wqes[node]->cpu_mask);
|
free_cpumask_var(wq->wqes[node]->cpu_mask);
|
||||||
kfree(wq->wqes[node]);
|
kfree(wq->wqes[node]);
|
||||||
}
|
}
|
||||||
|
free_cpumask_var(allowed_mask);
|
||||||
err_wq:
|
err_wq:
|
||||||
kfree(wq);
|
kfree(wq);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user