mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2026-01-27 12:47:24 +01:00
Revert "net: group sk_backlog and sk_receive_queue"
This reverts commit4effb335b5. This was a benefit for UDP flood case, which was later greatly improved with commits6471658dc6("udp: use skb_attempt_defer_free()") andb650bf0977("udp: remove busylock and add per NUMA queues"). Apparently blamed commit added a regression for RAW sockets, possibly because they do not use the dual RX queue strategy that UDP has. sock_queue_rcv_skb_reason() and RAW recvmsg() compete for sk_receive_buf and sk_rmem_alloc changes, and them being in the same cache line reduce performance. Fixes:4effb335b5("net: group sk_backlog and sk_receive_queue") Reported-by: kernel test robot <oliver.sang@intel.com> Closes: https://lore.kernel.org/oe-lkp/202509281326.f605b4eb-lkp@intel.com Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Willem de Bruijn <willemb@google.com> Cc: David Ahern <dsahern@kernel.org> Cc: Kuniyuki Iwashima <kuniyu@google.com> Link: https://patch.msgid.link/20250929182112.824154-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
9dd4e022bf
commit
7d452516b6
|
|
@ -395,6 +395,7 @@ struct sock {
|
|||
|
||||
atomic_t sk_drops;
|
||||
__s32 sk_peek_off;
|
||||
struct sk_buff_head sk_error_queue;
|
||||
struct sk_buff_head sk_receive_queue;
|
||||
/*
|
||||
* The backlog queue is special, it is always used with
|
||||
|
|
@ -412,7 +413,6 @@ struct sock {
|
|||
} sk_backlog;
|
||||
#define sk_rmem_alloc sk_backlog.rmem_alloc
|
||||
|
||||
struct sk_buff_head sk_error_queue;
|
||||
__cacheline_group_end(sock_write_rx);
|
||||
|
||||
__cacheline_group_begin(sock_read_rx);
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user