From 636e8adf7878eab3614250234341bde45537f47a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 14 Mar 2023 20:24:04 +0200 Subject: net: dsa: don't error out when drivers return ETH_DATA_LEN in .port_max_mtu() Currently, when dsa_slave_change_mtu() is called on a user port where dev->max_mtu is 1500 (as returned by ds->ops->port_max_mtu()), the code will stumble upon this check: if (new_master_mtu > mtu_limit) return -ERANGE; because new_master_mtu is adjusted for the tagger overhead but mtu_limit is not. But it would be good if the logic went through, for example if the DSA master really depends on an MTU adjustment to accept DSA-tagged frames. To make the code pass through the check, we need to adjust mtu_limit for the overhead as well, if the minimum restriction was caused by the DSA user port's MTU (dev->max_mtu). A DSA user port MTU and a DSA master MTU are always offset by the protocol overhead. Currently no drivers return 1500 .port_max_mtu(), but this is only temporary and a bug in itself - mv88e6xxx should have done that, but since commit b9c587fed61c ("dsa: mv88e6xxx: Include tagger overhead when setting MTU for DSA and CPU ports") it no longer does. This is a preparation for fixing that. Fixes: bfcb813203e6 ("net: dsa: configure the MTU for switch ports") Signed-off-by: Vladimir Oltean Reviewed-by: Simon Horman Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/slave.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net/dsa/slave.c') diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6957971c2db2..cac17183589f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1933,6 +1933,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) int new_master_mtu; int old_master_mtu; int mtu_limit; + int overhead; int cpu_mtu; int err; @@ -1961,9 +1962,10 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) largest_mtu = slave_mtu; } - mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); + overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops); + mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead); old_master_mtu = master->mtu; - new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops); + new_master_mtu = largest_mtu + overhead; if (new_master_mtu > mtu_limit) return -ERANGE; @@ -1998,8 +2000,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) out_port_failed: if (new_master_mtu != old_master_mtu) - dsa_port_mtu_change(cpu_dp, old_master_mtu - - dsa_tag_protocol_overhead(cpu_dp->tag_ops)); + dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead); out_cpu_failed: if (new_master_mtu != old_master_mtu) dev_set_mtu(master, old_master_mtu); -- cgit v1.2.3 From 64fdc5f341db01200e33105265d4b8450122a82e Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 29 Mar 2023 18:18:21 +0300 Subject: net: dsa: sync unicast and multicast addresses for VLAN filters too If certain conditions are met, DSA can install all necessary MAC addresses on the CPU ports as FDB entries and disable flooding towards the CPU (we call this RX filtering). There is one corner case where this does not work. ip link add br0 type bridge vlan_filtering 1 && ip link set br0 up ip link set swp0 master br0 && ip link set swp0 up ip link add link swp0 name swp0.100 type vlan id 100 ip link set swp0.100 up && ip addr add 192.168.100.1/24 dev swp0.100 Traffic through swp0.100 is broken, because the bridge turns on VLAN filtering in the swp0 port (causing RX packets to be classified to the FDB database corresponding to the VID from their 802.1Q header), and although the 8021q module does call dev_uc_add() towards the real device, that API is VLAN-unaware, so it only contains the MAC address, not the VID; and DSA's current implementation of ndo_set_rx_mode() is only for VID 0 (corresponding to FDB entries which are installed in an FDB database which is only hit when the port is VLAN-unaware). It's interesting to understand why the bridge does not turn on IFF_PROMISC for its swp0 bridge port, and it may appear at first glance that this is a regression caused by the logic in commit 2796d0c648c9 ("bridge: Automatically manage port promiscuous mode."). After all, a bridge port needs to have IFF_PROMISC by its very nature - it needs to receive and forward frames with a MAC DA different from the bridge ports' MAC addresses. While that may be true, when the bridge is VLAN-aware *and* it has a single port, there is no real reason to enable promiscuity even if that is an automatic port, with flooding and learning (there is nowhere for packets to go except to the BR_FDB_LOCAL entries), and this is how the corner case appears. Adding a second automatic interface to the bridge would make swp0 promisc as well, and would mask the corner case. Given the dev_uc_add() / ndo_set_rx_mode() API is what it is (it doesn't pass a VLAN ID), the only way to address that problem is to install host FDB entries for the cartesian product of RX filtering MAC addresses and VLAN RX filters. Fixes: 7569459a52c9 ("net: dsa: manage flooding on the CPU ports") Signed-off-by: Vladimir Oltean Reviewed-by: Simon Horman Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/20230329151821.745752-1-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski --- net/dsa/slave.c | 121 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 116 insertions(+), 5 deletions(-) (limited to 'net/dsa/slave.c') diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cac17183589f..165bb2cb8431 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -57,6 +57,12 @@ struct dsa_standalone_event_work { u16 vid; }; +struct dsa_host_vlan_rx_filtering_ctx { + struct net_device *dev; + const unsigned char *addr; + enum dsa_standalone_event event; +}; + static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds) { return ds->ops->port_fdb_add && ds->ops->port_fdb_del && @@ -155,18 +161,37 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev, return 0; } +static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid, + void *arg) +{ + struct dsa_host_vlan_rx_filtering_ctx *ctx = arg; + + return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event, + ctx->addr, vid); +} + static int dsa_slave_sync_uc(struct net_device *dev, const unsigned char *addr) { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_UC_ADD, + }; + int err; dev_uc_add(master, addr); if (!dsa_switch_supports_uc_filtering(dp->ds)) return 0; - return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); + err = dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); + if (err) + return err; + + return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); } static int dsa_slave_unsync_uc(struct net_device *dev, @@ -174,13 +199,23 @@ static int dsa_slave_unsync_uc(struct net_device *dev, { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_UC_DEL, + }; + int err; dev_uc_del(master, addr); if (!dsa_switch_supports_uc_filtering(dp->ds)) return 0; - return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); + err = dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); + if (err) + return err; + + return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); } static int dsa_slave_sync_mc(struct net_device *dev, @@ -188,13 +223,23 @@ static int dsa_slave_sync_mc(struct net_device *dev, { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_MC_ADD, + }; + int err; dev_mc_add(master, addr); if (!dsa_switch_supports_mc_filtering(dp->ds)) return 0; - return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); + err = dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); + if (err) + return err; + + return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); } static int dsa_slave_unsync_mc(struct net_device *dev, @@ -202,13 +247,23 @@ static int dsa_slave_unsync_mc(struct net_device *dev, { struct net_device *master = dsa_slave_to_master(dev); struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_MC_DEL, + }; + int err; dev_mc_del(master, addr); if (!dsa_switch_supports_mc_filtering(dp->ds)) return 0; - return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); + err = dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); + if (err) + return err; + + return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); } void dsa_slave_sync_ha(struct net_device *dev) @@ -1702,6 +1757,8 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, .flags = 0, }; struct netlink_ext_ack extack = {0}; + struct dsa_switch *ds = dp->ds; + struct netdev_hw_addr *ha; int ret; /* User port... */ @@ -1721,6 +1778,30 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, return ret; } + if (!dsa_switch_supports_uc_filtering(ds) && + !dsa_switch_supports_mc_filtering(ds)) + return 0; + + netif_addr_lock_bh(dev); + + if (dsa_switch_supports_mc_filtering(ds)) { + netdev_for_each_synced_mc_addr(ha, dev) { + dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, + ha->addr, vid); + } + } + + if (dsa_switch_supports_uc_filtering(ds)) { + netdev_for_each_synced_uc_addr(ha, dev) { + dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, + ha->addr, vid); + } + } + + netif_addr_unlock_bh(dev); + + dsa_flush_workqueue(); + return 0; } @@ -1733,13 +1814,43 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, /* This API only allows programming tagged, non-PVID VIDs */ .flags = 0, }; + struct dsa_switch *ds = dp->ds; + struct netdev_hw_addr *ha; int err; err = dsa_port_vlan_del(dp, &vlan); if (err) return err; - return dsa_port_host_vlan_del(dp, &vlan); + err = dsa_port_host_vlan_del(dp, &vlan); + if (err) + return err; + + if (!dsa_switch_supports_uc_filtering(ds) && + !dsa_switch_supports_mc_filtering(ds)) + return 0; + + netif_addr_lock_bh(dev); + + if (dsa_switch_supports_mc_filtering(ds)) { + netdev_for_each_synced_mc_addr(ha, dev) { + dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, + ha->addr, vid); + } + } + + if (dsa_switch_supports_uc_filtering(ds)) { + netdev_for_each_synced_uc_addr(ha, dev) { + dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, + ha->addr, vid); + } + } + + netif_addr_unlock_bh(dev); + + dsa_flush_workqueue(); + + return 0; } static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg) -- cgit v1.2.3