diff options
| author | Eric Dumazet <edumazet@google.com> | 2026-04-04 01:15:39 +0300 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2026-04-08 05:02:13 +0300 |
| commit | 30e02ec3b4b6bd429a4824f125eb843a291dcccf (patch) | |
| tree | ea5b92a2cbea24605d76d075877a33c7f248219a | |
| parent | e65d8b6f3092398efd7c74e722cb7a516d9a0d6d (diff) | |
| download | linux-30e02ec3b4b6bd429a4824f125eb843a291dcccf.tar.xz | |
net: qdisc_pkt_len_segs_init() cleanup
Reduce indentation level by returning early if the transport header
was not set.
Add an unlikely() clause as this is not the common case.
No functional change.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Joe Damato <joe@dama.to>
Link: https://patch.msgid.link/20260403221540.3297753-2-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| -rw-r--r-- | net/core/dev.c | 62 |
1 files changed, 31 insertions, 31 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 4519f0e59beb..3eb2f50f5165 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4104,6 +4104,7 @@ EXPORT_SYMBOL_GPL(validate_xmit_skb_list); static void qdisc_pkt_len_segs_init(struct sk_buff *skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int hdr_len; u16 gso_segs; qdisc_skb_cb(skb)->pkt_len = skb->len; @@ -4117,44 +4118,43 @@ static void qdisc_pkt_len_segs_init(struct sk_buff *skb) /* To get more precise estimation of bytes sent on wire, * we add to pkt_len the headers size of all segments */ - if (skb_transport_header_was_set(skb)) { - unsigned int hdr_len; + if (unlikely(!skb_transport_header_was_set(skb))) + return; - /* mac layer + network layer */ - if (!skb->encapsulation) - hdr_len = skb_transport_offset(skb); - else - hdr_len = skb_inner_transport_offset(skb); + /* mac layer + network layer */ + if (!skb->encapsulation) + hdr_len = skb_transport_offset(skb); + else + hdr_len = skb_inner_transport_offset(skb); - /* + transport layer */ - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { - const struct tcphdr *th; - struct tcphdr _tcphdr; + /* + transport layer */ + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { + const struct tcphdr *th; + struct tcphdr _tcphdr; - th = skb_header_pointer(skb, hdr_len, - sizeof(_tcphdr), &_tcphdr); - if (likely(th)) - hdr_len += __tcp_hdrlen(th); - } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { - struct udphdr _udphdr; + th = skb_header_pointer(skb, hdr_len, + sizeof(_tcphdr), &_tcphdr); + if (likely(th)) + hdr_len += __tcp_hdrlen(th); + } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { + struct udphdr _udphdr; - if (skb_header_pointer(skb, hdr_len, - sizeof(_udphdr), &_udphdr)) - hdr_len += sizeof(struct udphdr); - } + if (skb_header_pointer(skb, hdr_len, + sizeof(_udphdr), &_udphdr)) + hdr_len += sizeof(struct udphdr); + } - if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { - int payload = skb->len - hdr_len; + if (unlikely(shinfo->gso_type & SKB_GSO_DODGY)) { + int payload = skb->len - hdr_len; - /* Malicious packet. */ - if (payload <= 0) - return; - gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size); - shinfo->gso_segs = gso_segs; - qdisc_skb_cb(skb)->pkt_segs = gso_segs; - } - qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; + /* Malicious packet. */ + if (payload <= 0) + return; + gso_segs = DIV_ROUND_UP(payload, shinfo->gso_size); + shinfo->gso_segs = gso_segs; + qdisc_skb_cb(skb)->pkt_segs = gso_segs; } + qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len; } static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q, |
