diff options
| author | Jakub Kicinski <kuba@kernel.org> | 2026-04-12 19:05:59 +0300 |
|---|---|---|
| committer | Jakub Kicinski <kuba@kernel.org> | 2026-04-12 19:06:00 +0300 |
| commit | 4431c239a3010c12147d166c409ad6867d08f2f1 (patch) | |
| tree | 27146319a8ee3c7a2124fe91fe783381301a3437 | |
| parent | b258cba1e05df758e4e99a0e374da3e044618475 (diff) | |
| parent | 6dd82499fa6c468237801541589eb83023d9fd46 (diff) | |
| download | linux-4431c239a3010c12147d166c409ad6867d08f2f1.tar.xz | |
Merge branch 'ipvlan-multicast-delivery-changes'
Eric Dumazet says:
====================
ipvlan: multicast delivery changes
As we did recently for macvlan, this series adds some relief
when ipvlan is under multicast storms.
====================
Link: https://patch.msgid.link/20260409085238.1122947-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
| -rw-r--r-- | drivers/net/ipvlan/ipvlan_core.c | 42 |
1 files changed, 23 insertions, 19 deletions
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 0b493a8aa338..1be8620ad397 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -744,34 +744,38 @@ out: static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, struct ipvl_port *port) { - struct sk_buff *skb = *pskb; + struct sk_buff *nskb, *skb = *pskb; struct ethhdr *eth = eth_hdr(skb); - rx_handler_result_t ret = RX_HANDLER_PASS; if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; - if (is_multicast_ether_addr(eth->h_dest)) { - if (ipvlan_external_frame(skb, port)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + /* Perform like l3 mode for non-multicast packet */ + if (likely(!is_multicast_ether_addr(eth->h_dest))) + return ipvlan_handle_mode_l3(pskb, port); - /* External frames are queued for device local - * distribution, but a copy is given to master - * straight away to avoid sending duplicates later - * when work-queue processes this frame. This is - * achieved by returning RX_HANDLER_PASS. - */ - if (nskb) { - ipvlan_skb_crossing_ns(nskb, NULL); - ipvlan_multicast_enqueue(port, nskb, false); - } - } + /* External frames are queued for device local + * distribution, but a copy is given to master + * straight away to avoid sending duplicates later + * when work-queue processes this frame. + * This is achieved by returning RX_HANDLER_PASS. + */ + if (!ipvlan_external_frame(skb, port)) + return RX_HANDLER_PASS; + + if (skb_queue_len_lockless(&port->backlog) >= IPVLAN_QBACKLOG_LIMIT) + nskb = NULL; + else + nskb = skb_clone(skb, GFP_ATOMIC); + + if (nskb) { + ipvlan_skb_crossing_ns(nskb, NULL); + ipvlan_multicast_enqueue(port, nskb, false); } else { - /* Perform like l3 mode for non-multicast packet */ - ret = ipvlan_handle_mode_l3(pskb, port); + dev_core_stats_rx_dropped_inc(skb->dev); } - return ret; + return RX_HANDLER_PASS; } rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) |
