mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 15:03:53 +02:00
net: page_pool: don't try to stash the napi id
Page ppol tried to cache the NAPI ID in page pool info to avoid
having a dependency on the life cycle of the NAPI instance.
Since commit under Fixes the NAPI ID is not populated until
napi_enable() and there's a good chance that page pool is
created before NAPI gets enabled.
Protect the NAPI pointer with the existing page pool mutex,
the reading path already holds it. napi_id itself we need
to READ_ONCE(), it's protected by netdev_lock() which are
not holding in page pool.
Before this patch napi IDs were missing for mlx5:
# ./cli.py --spec netlink/specs/netdev.yaml --dump page-pool-get
[{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912},
{'id': 143, 'ifindex': 2, 'inflight': 5568, 'inflight-mem': 22806528},
{'id': 142, 'ifindex': 2, 'inflight': 5120, 'inflight-mem': 20971520},
{'id': 141, 'ifindex': 2, 'inflight': 4992, 'inflight-mem': 20447232},
...
After:
[{'id': 144, 'ifindex': 2, 'inflight': 3072, 'inflight-mem': 12582912,
'napi-id': 565},
{'id': 143, 'ifindex': 2, 'inflight': 4224, 'inflight-mem': 17301504,
'napi-id': 525},
{'id': 142, 'ifindex': 2, 'inflight': 4288, 'inflight-mem': 17563648,
'napi-id': 524},
...
Fixes: 86e25f40aa
("net: napi: Add napi_config")
Reviewed-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20250123231620.1086401-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
6db9d3a536
commit
67e4bb2ced
|
@ -237,7 +237,6 @@ struct page_pool {
|
|||
struct {
|
||||
struct hlist_node list;
|
||||
u64 detach_time;
|
||||
u32 napi_id;
|
||||
u32 id;
|
||||
} user;
|
||||
};
|
||||
|
|
|
@ -6708,7 +6708,7 @@ void napi_resume_irqs(unsigned int napi_id)
|
|||
static void __napi_hash_add_with_id(struct napi_struct *napi,
|
||||
unsigned int napi_id)
|
||||
{
|
||||
napi->napi_id = napi_id;
|
||||
WRITE_ONCE(napi->napi_id, napi_id);
|
||||
hlist_add_head_rcu(&napi->napi_hash_node,
|
||||
&napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
|
||||
}
|
||||
|
|
|
@ -1147,7 +1147,9 @@ void page_pool_disable_direct_recycling(struct page_pool *pool)
|
|||
WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state));
|
||||
WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1);
|
||||
|
||||
mutex_lock(&page_pools_lock);
|
||||
WRITE_ONCE(pool->p.napi, NULL);
|
||||
mutex_unlock(&page_pools_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(page_pool_disable_direct_recycling);
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
#include "netmem_priv.h"
|
||||
|
||||
extern struct mutex page_pools_lock;
|
||||
|
||||
s32 page_pool_inflight(const struct page_pool *pool, bool strict);
|
||||
|
||||
int page_pool_list(struct page_pool *pool);
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/net_debug.h>
|
||||
#include <net/netdev_rx_queue.h>
|
||||
#include <net/page_pool/helpers.h>
|
||||
|
@ -14,10 +15,11 @@
|
|||
#include "netdev-genl-gen.h"
|
||||
|
||||
static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1);
|
||||
/* Protects: page_pools, netdevice->page_pools, pool->slow.netdev, pool->user.
|
||||
/* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev,
|
||||
* pool->user.
|
||||
* Ordering: inside rtnl_lock
|
||||
*/
|
||||
static DEFINE_MUTEX(page_pools_lock);
|
||||
DEFINE_MUTEX(page_pools_lock);
|
||||
|
||||
/* Page pools are only reachable from user space (via netlink) if they are
|
||||
* linked to a netdev at creation time. Following page pool "visibility"
|
||||
|
@ -216,6 +218,7 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
|
|||
{
|
||||
struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
|
||||
size_t inflight, refsz;
|
||||
unsigned int napi_id;
|
||||
void *hdr;
|
||||
|
||||
hdr = genlmsg_iput(rsp, info);
|
||||
|
@ -229,8 +232,10 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
|
|||
nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
|
||||
pool->slow.netdev->ifindex))
|
||||
goto err_cancel;
|
||||
if (pool->user.napi_id &&
|
||||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, pool->user.napi_id))
|
||||
|
||||
napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0;
|
||||
if (napi_id >= MIN_NAPI_ID &&
|
||||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id))
|
||||
goto err_cancel;
|
||||
|
||||
inflight = page_pool_inflight(pool, false);
|
||||
|
@ -319,8 +324,6 @@ int page_pool_list(struct page_pool *pool)
|
|||
if (pool->slow.netdev) {
|
||||
hlist_add_head(&pool->user.list,
|
||||
&pool->slow.netdev->page_pools);
|
||||
pool->user.napi_id = pool->p.napi ? pool->p.napi->napi_id : 0;
|
||||
|
||||
netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user