mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
tcp: bring back NUMA dispersion in inet_ehash_locks_alloc()
[ Upstream commit f8ece40786
]
We have platforms with 6 NUMA nodes and 480 cpus.
inet_ehash_locks_alloc() currently allocates a single 64KB page
to hold all ehash spinlocks. This adds more pressure on a single node.
Change inet_ehash_locks_alloc() to use vmalloc() to spread
the spinlocks on all online nodes, driven by NUMA policies.
At boot time, NUMA policy is interleave=all, meaning that
tcp_hashinfo.ehash_locks gets hash dispersion on all nodes.
Tested:
lack5:~# grep inet_ehash_locks_alloc /proc/vmallocinfo
0x00000000d9aec4d1-0x00000000a828b652 69632 inet_ehash_locks_alloc+0x90/0x100 pages=16 vmalloc N0=2 N1=3 N2=3 N3=3 N4=3 N5=2
lack5:~# echo 8192 >/proc/sys/net/ipv4/tcp_child_ehash_entries
lack5:~# numactl --interleave=all unshare -n bash -c "grep inet_ehash_locks_alloc /proc/vmallocinfo"
0x000000004e99d30c-0x00000000763f3279 36864 inet_ehash_locks_alloc+0x90/0x100 pages=8 vmalloc N0=1 N1=2 N2=2 N3=1 N4=1 N5=1
0x00000000d9aec4d1-0x00000000a828b652 69632 inet_ehash_locks_alloc+0x90/0x100 pages=16 vmalloc N0=2 N1=3 N2=3 N3=3 N4=3 N5=2
lack5:~# numactl --interleave=0,5 unshare -n bash -c "grep inet_ehash_locks_alloc /proc/vmallocinfo"
0x00000000fd73a33e-0x0000000004b9a177 36864 inet_ehash_locks_alloc+0x90/0x100 pages=8 vmalloc N0=4 N5=4
0x00000000d9aec4d1-0x00000000a828b652 69632 inet_ehash_locks_alloc+0x90/0x100 pages=16 vmalloc N0=2 N1=3 N2=3 N3=3 N4=3 N5=2
lack5:~# echo 1024 >/proc/sys/net/ipv4/tcp_child_ehash_entries
lack5:~# numactl --interleave=all unshare -n bash -c "grep inet_ehash_locks_alloc /proc/vmallocinfo"
0x00000000db07d7a2-0x00000000ad697d29 8192 inet_ehash_locks_alloc+0x90/0x100 pages=1 vmalloc N2=1
0x00000000d9aec4d1-0x00000000a828b652 69632 inet_ehash_locks_alloc+0x90/0x100 pages=16 vmalloc N0=2 N1=3 N2=3 N3=3 N4=3 N5=2
Signed-off-by: Eric Dumazet <edumazet@google.com>
Tested-by: Jason Xing <kerneljasonxing@gmail.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Link: https://patch.msgid.link/20250305130550.1865988-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
5cb296e942
commit
f7dd2a7290
|
@ -879,22 +879,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
|
|||
{
|
||||
unsigned int locksz = sizeof(spinlock_t);
|
||||
unsigned int i, nblocks = 1;
|
||||
spinlock_t *ptr = NULL;
|
||||
|
||||
if (locksz != 0) {
|
||||
/* allocate 2 cache lines or at least one spinlock per cpu */
|
||||
nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
|
||||
nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
|
||||
if (locksz == 0)
|
||||
goto set_mask;
|
||||
|
||||
/* no more locks than number of hash buckets */
|
||||
nblocks = min(nblocks, hashinfo->ehash_mask + 1);
|
||||
/* Allocate 2 cache lines or at least one spinlock per cpu. */
|
||||
nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus();
|
||||
|
||||
hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
|
||||
if (!hashinfo->ehash_locks)
|
||||
return -ENOMEM;
|
||||
/* At least one page per NUMA node. */
|
||||
nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz);
|
||||
|
||||
for (i = 0; i < nblocks; i++)
|
||||
spin_lock_init(&hashinfo->ehash_locks[i]);
|
||||
nblocks = roundup_pow_of_two(nblocks);
|
||||
|
||||
/* No more locks than number of hash buckets. */
|
||||
nblocks = min(nblocks, hashinfo->ehash_mask + 1);
|
||||
|
||||
if (num_online_nodes() > 1) {
|
||||
/* Use vmalloc() to allow NUMA policy to spread pages
|
||||
* on all available nodes if desired.
|
||||
*/
|
||||
ptr = vmalloc_array(nblocks, locksz);
|
||||
}
|
||||
if (!ptr) {
|
||||
ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < nblocks; i++)
|
||||
spin_lock_init(&ptr[i]);
|
||||
hashinfo->ehash_locks = ptr;
|
||||
set_mask:
|
||||
hashinfo->ehash_locks_mask = nblocks - 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user