From 5fc88f93edf2f797f1aa63334cc6c86f9c15d585 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 28 Jul 2021 18:23:59 +0200 Subject: sk_buff: introduce 'slow_gro' flags The new flag tracks if any state field is set, so that GRO requires 'unusual'/slow prepare steps. Set such flag when a ct entry is attached to the skb, and never clear it. The new bit uses an existing hole into the sk_buff struct RFC -> v1: - use a single state bit, never clear it - avoid moving the _nfct field Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f19190820e63..3ff18300d210 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -689,6 +689,7 @@ typedef unsigned char *sk_buff_data_t; * CHECKSUM_UNNECESSARY (max 3) * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB + * @slow_gro: state present at GRO time, slower prepare step required * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @secmark: security marking @@ -870,6 +871,7 @@ struct sk_buff { #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; #endif + __u8 slow_gro:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -4216,6 +4218,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb) static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->slow_gro |= !!nfct; skb->_nfct = nfct; #endif } @@ -4375,6 +4378,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif + dst->slow_gro = src->slow_gro; __nf_copy(dst, src, true); } -- cgit v1.2.3-71-gd317 From 8a886b142bd03d36612747e9aefdf0282c8b02dd Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 28 Jul 2021 18:24:00 +0200 Subject: sk_buff: track dst status in slow_gro Similar to the previous patch, but covering the dst field: the slow_gro flag is additionally set when a dst is attached to the skb RFC -> v1: - use the existing flag instead of adding a new one Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 ++ include/net/dst.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ff18300d210..b1e5bbfcc926 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -992,6 +992,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) */ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) { + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst; } @@ -1008,6 +1009,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->slow_gro = !!dst; skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } diff --git a/include/net/dst.h b/include/net/dst.h index 75b1e734e9c2..a057319aabef 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -277,6 +277,7 @@ static inline void skb_dst_drop(struct sk_buff *skb) static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst) { + nskb->slow_gro |= !!refdst; nskb->_skb_refdst = refdst; if (!(nskb->_skb_refdst & SKB_DST_NOREF)) dst_clone(skb_dst(nskb)); @@ -316,6 +317,7 @@ static inline bool skb_dst_force(struct sk_buff *skb) dst = NULL; skb->_skb_refdst = (unsigned long)dst; + skb->slow_gro |= !!dst; } return skb->_skb_refdst != 0UL; -- cgit v1.2.3-71-gd317 From 5e10da5385d20c4bae587bc2921e5fdd9655d5fc Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 28 Jul 2021 18:24:03 +0200 Subject: skbuff: allow 'slow_gro' for skb carring sock reference This change leverages the infrastructure introduced by the previous patches to allow soft devices passing to the GRO engine owned skbs without impacting the fast-path. It's up to the GRO caller ensuring the slow_gro bit validity before invoking the GRO engine. The new helper skb_prepare_for_gro() is introduced for that goal. On slow_gro, skbs are aggregated only with equal sk. Additionally, skb truesize on GRO recycle and free is correctly updated so that sk wmem is not changed by the GRO processing. rfc-> v1: - fixed bad truesize on dev_gro_receive NAPI_FREE - use the existing state bit Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/sock.h | 9 +++++++++ net/core/dev.c | 2 ++ net/core/skbuff.c | 17 +++++++++++++---- 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/net/sock.h b/include/net/sock.h index f23cb259b0e2..ff1be7e7e90b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2249,6 +2249,15 @@ static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struc return false; } +static inline void skb_prepare_for_gro(struct sk_buff *skb) +{ + if (skb->destructor != sock_wfree) { + skb_orphan(skb); + return; + } + skb->slow_gro = 1; +} + void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); diff --git a/net/core/dev.c b/net/core/dev.c index 19565f7497ee..dcc87fcd64ba 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6034,6 +6034,7 @@ static void gro_list_prepare(const struct list_head *head, struct tc_skb_ext *p_ext; #endif + diffs |= p->sk != skb->sk; diffs |= skb_metadata_dst_cmp(p, skb); diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); @@ -6311,6 +6312,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb_shinfo(skb)->gso_type = 0; skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); if (unlikely(skb->slow_gro)) { + skb_orphan(skb); skb_ext_reset(skb); nf_reset_ct(skb); skb->slow_gro = 0; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d04e286149cc..fcbd977186b0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -958,6 +958,7 @@ void napi_skb_free_stolen_head(struct sk_buff *skb) nf_reset_ct(skb); skb_dst_drop(skb); skb_ext_put(skb); + skb_orphan(skb); skb->slow_gro = 0; } napi_skb_cache_put(skb); @@ -3892,6 +3893,9 @@ int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) NAPI_GRO_CB(p)->last = skb; NAPI_GRO_CB(p)->count++; p->data_len += skb->len; + + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; p->truesize += skb->truesize; p->len += skb->len; @@ -4259,6 +4263,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; + unsigned int new_truesize; struct sk_buff *lp; if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) @@ -4290,10 +4295,10 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) skb_frag_size_sub(frag, offset); /* all fragments truesize : remove (head size + sk_buff) */ - delta_truesize = skb->truesize - - SKB_TRUESIZE(skb_end_offset(skb)); + new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); + delta_truesize = skb->truesize - new_truesize; - skb->truesize -= skb->data_len; + skb->truesize = new_truesize; skb->len -= skb->data_len; skb->data_len = 0; @@ -4322,12 +4327,16 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); /* We dont need to clear skbinfo->nr_frags here */ - delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + new_truesize = SKB_TRUESIZE(sizeof(struct sk_buff)); + delta_truesize = skb->truesize - new_truesize; + skb->truesize = new_truesize; NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } merge: + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; delta_truesize = skb->truesize; if (offset > headlen) { unsigned int eat = offset - headlen; -- cgit v1.2.3-71-gd317