net: skbuff: generalize the skb->decrypted bit

The ->decrypted bit can be reused for other crypto protocols.
Remove the direct dependency on TLS, add helpers to clean up
the ifdefs leaking out everywhere.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2024-04-03 13:21:39 -07:00 committed by David S. Miller
parent 0d875bb4a7
commit 9f06f87fef
8 changed files with 24 additions and 24 deletions

View File

@ -992,7 +992,7 @@ struct sk_buff {
#ifdef CONFIG_NETFILTER_SKIP_EGRESS #ifdef CONFIG_NETFILTER_SKIP_EGRESS
__u8 nf_skip_egress:1; __u8 nf_skip_egress:1;
#endif #endif
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_SKB_DECRYPTED
__u8 decrypted:1; __u8 decrypted:1;
#endif #endif
__u8 slow_gro:1; __u8 slow_gro:1;
@ -1615,17 +1615,26 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
static inline int skb_cmp_decrypted(const struct sk_buff *skb1, static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
const struct sk_buff *skb2) const struct sk_buff *skb2)
{ {
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_SKB_DECRYPTED
return skb2->decrypted - skb1->decrypted; return skb2->decrypted - skb1->decrypted;
#else #else
return 0; return 0;
#endif #endif
} }
static inline bool skb_is_decrypted(const struct sk_buff *skb)
{
#ifdef CONFIG_SKB_DECRYPTED
return skb->decrypted;
#else
return false;
#endif
}
static inline void skb_copy_decrypted(struct sk_buff *to, static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from) const struct sk_buff *from)
{ {
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_SKB_DECRYPTED
to->decrypted = from->decrypted; to->decrypted = from->decrypted;
#endif #endif
} }

View File

@ -2835,12 +2835,10 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
skb = sk->sk_validate_xmit_skb(sk, dev, skb); skb = sk->sk_validate_xmit_skb(sk, dev, skb);
#ifdef CONFIG_TLS_DEVICE } else if (unlikely(skb_is_decrypted(skb))) {
} else if (unlikely(skb->decrypted)) {
pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
kfree_skb(skb); kfree_skb(skb);
skb = NULL; skb = NULL;
#endif
} }
#endif #endif

View File

@ -60,6 +60,9 @@ config NET_XGRESS
config NET_REDIRECT config NET_REDIRECT
bool bool
config SKB_DECRYPTED
bool
config SKB_EXTENSIONS config SKB_EXTENSIONS
bool bool

View File

@ -2526,13 +2526,12 @@ EXPORT_SYMBOL(skb_set_owner_w);
static bool can_skb_orphan_partial(const struct sk_buff *skb) static bool can_skb_orphan_partial(const struct sk_buff *skb)
{ {
#ifdef CONFIG_TLS_DEVICE
/* Drivers depend on in-order delivery for crypto offload, /* Drivers depend on in-order delivery for crypto offload,
* partial orphan breaks out-of-order-OK logic. * partial orphan breaks out-of-order-OK logic.
*/ */
if (skb->decrypted) if (skb_is_decrypted(skb))
return false; return false;
#endif
return (skb->destructor == sock_wfree || return (skb->destructor == sock_wfree ||
(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
} }

View File

@ -4805,10 +4805,8 @@ static bool tcp_try_coalesce(struct sock *sk,
if (!mptcp_skb_can_collapse(to, from)) if (!mptcp_skb_can_collapse(to, from))
return false; return false;
#ifdef CONFIG_TLS_DEVICE if (skb_cmp_decrypted(from, to))
if (from->decrypted != to->decrypted)
return false; return false;
#endif
if (!skb_try_coalesce(to, from, fragstolen, &delta)) if (!skb_try_coalesce(to, from, fragstolen, &delta))
return false; return false;
@ -5377,9 +5375,7 @@ restart:
break; break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
#ifdef CONFIG_TLS_DEVICE skb_copy_decrypted(nskb, skb);
nskb->decrypted = skb->decrypted;
#endif
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
if (list) if (list)
__skb_queue_before(list, skb, nskb); __skb_queue_before(list, skb, nskb);
@ -5409,10 +5405,8 @@ restart:
!mptcp_skb_can_collapse(nskb, skb) || !mptcp_skb_can_collapse(nskb, skb) ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
goto end; goto end;
#ifdef CONFIG_TLS_DEVICE if (skb_cmp_decrypted(skb, nskb))
if (skb->decrypted != nskb->decrypted)
goto end; goto end;
#endif
} }
} }
} }

View File

@ -2044,10 +2044,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
((TCP_SKB_CB(tail)->tcp_flags ^ ((TCP_SKB_CB(tail)->tcp_flags ^
TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
#ifdef CONFIG_TLS_DEVICE
tail->decrypted != skb->decrypted ||
#endif
!mptcp_skb_can_collapse(tail, skb) || !mptcp_skb_can_collapse(tail, skb) ||
skb_cmp_decrypted(tail, skb) ||
thtail->doff != th->doff || thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce; goto no_coalesce;

View File

@ -265,9 +265,7 @@ found:
flush |= (len - 1) >= mss; flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
#ifdef CONFIG_TLS_DEVICE flush |= skb_cmp_decrypted(p, skb);
flush |= p->decrypted ^ skb->decrypted;
#endif
if (flush || skb_gro_receive(p, skb)) { if (flush || skb_gro_receive(p, skb)) {
mss = 1; mss = 1;

View File

@ -20,6 +20,7 @@ config TLS
config TLS_DEVICE config TLS_DEVICE
bool "Transport Layer Security HW offload" bool "Transport Layer Security HW offload"
depends on TLS depends on TLS
select SKB_DECRYPTED
select SOCK_VALIDATE_XMIT select SOCK_VALIDATE_XMIT
select SOCK_RX_QUEUE_MAPPING select SOCK_RX_QUEUE_MAPPING
default n default n