summaryrefslogtreecommitdiff
path: root/net/dsa
diff options
context:
space:
mode:
Diffstat (limited to 'net/dsa')
-rw-r--r--net/dsa/Kconfig5
-rw-r--r--net/dsa/dsa2.c127
-rw-r--r--net/dsa/switch.c2
-rw-r--r--net/dsa/tag_dsa.c30
-rw-r--r--net/dsa/tag_ocelot.c3
-rw-r--r--net/dsa/tag_ocelot_8021q.c42
-rw-r--r--net/dsa/tag_sja1105.c43
7 files changed, 199 insertions, 53 deletions
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 548285539752..d8ee15f1c7a9 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -101,8 +101,6 @@ config NET_DSA_TAG_RTL4_A
config NET_DSA_TAG_OCELOT
tristate "Tag driver for Ocelot family of switches, using NPI port"
- depends on MSCC_OCELOT_SWITCH_LIB || \
- (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
select PACKING
help
Say Y or M if you want to enable NPI tagging for the Ocelot switches
@@ -114,8 +112,6 @@ config NET_DSA_TAG_OCELOT
config NET_DSA_TAG_OCELOT_8021Q
tristate "Tag driver for Ocelot family of switches, using VLAN"
- depends on MSCC_OCELOT_SWITCH_LIB || \
- (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
help
Say Y or M if you want to enable support for tagging frames with a
custom VLAN-based header. Frames that require timestamping, such as
@@ -138,7 +134,6 @@ config NET_DSA_TAG_LAN9303
config NET_DSA_TAG_SJA1105
tristate "Tag driver for NXP SJA1105 switches"
- depends on NET_DSA_SJA1105 || !NET_DSA_SJA1105
select PACKING
help
Say Y or M if you want to enable support for tagging frames with the
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index eef13cd20f19..e9911b18bdbf 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -170,7 +170,7 @@ void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
/* Check if the bridge is still in use, otherwise it is time
* to clean it up so we can reuse this bridge_num later.
*/
- if (!dsa_bridge_num_find(bridge_dev))
+ if (dsa_bridge_num_find(bridge_dev) < 0)
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
@@ -429,6 +429,7 @@ static int dsa_port_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
+ struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
int err = 0;
@@ -438,6 +439,12 @@ static int dsa_port_setup(struct dsa_port *dp)
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
+ if (ds->ops->port_setup) {
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
@@ -480,8 +487,11 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_port_link_unregister_of(dp);
- if (err)
+ if (err) {
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
return err;
+ }
dp->setup = true;
@@ -533,11 +543,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
+
devlink_port_type_clear(dlp);
switch (dp->type) {
@@ -581,6 +595,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
dp->devlink_port_setup = false;
}
+/* Destroy the current devlink port, and create a new one which has the UNUSED
+ * flavour. At this point, any call to ds->ops->port_setup has been already
+ * balanced out by a call to ds->ops->port_teardown, so we know that any
+ * devlink port regions the driver had are now unregistered. We then call its
+ * ds->ops->port_setup again, in order for the driver to re-create them on the
+ * new devlink port.
+ */
+static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ dsa_port_devlink_teardown(dp);
+ dp->type = DSA_PORT_TYPE_UNUSED;
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ return err;
+
+ if (ds->ops->port_setup) {
+ /* On error, leave the devlink port registered,
+ * dsa_switch_teardown will clean it up later.
+ */
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int dsa_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -767,7 +811,9 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
if (!dsa_is_cpu_port(ds, port))
continue;
+ rtnl_lock();
err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
+ rtnl_unlock();
if (err) {
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
tag_ops->name, ERR_PTR(err));
@@ -836,7 +882,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
devlink_params_publish(ds->devlink);
if (!ds->slave_mii_bus && ds->ops->phy_read) {
- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus) {
err = -ENOMEM;
goto teardown;
@@ -846,13 +892,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
- goto teardown;
+ goto free_slave_mii_bus;
}
ds->setup = true;
return 0;
+free_slave_mii_bus:
+ if (ds->slave_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->slave_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
@@ -877,8 +926,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
if (!ds->setup)
return;
- if (ds->slave_mii_bus && ds->ops->phy_read)
+ if (ds->slave_mii_bus && ds->ops->phy_read) {
mdiobus_unregister(ds->slave_mii_bus);
+ mdiobus_free(ds->slave_mii_bus);
+ ds->slave_mii_bus = NULL;
+ }
dsa_switch_unregister_notifier(ds);
@@ -938,12 +990,9 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_port_setup(dp);
if (err) {
- dsa_port_devlink_teardown(dp);
- dp->type = DSA_PORT_TYPE_UNUSED;
- err = dsa_port_devlink_setup(dp);
+ err = dsa_port_reinit_as_unused(dp);
if (err)
goto teardown;
- continue;
}
}
@@ -1048,6 +1097,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
teardown_master:
dsa_tree_teardown_master(dst);
teardown_switches:
+ dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
@@ -1324,12 +1374,15 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
- if (err)
+ if (err) {
+ of_node_put(port);
goto out_put_node;
+ }
if (reg >= ds->num_ports) {
dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
port, reg, ds->num_ports);
+ of_node_put(port);
err = -EINVAL;
goto out_put_node;
}
@@ -1337,8 +1390,10 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
dp = dsa_to_port(ds, reg);
err = dsa_port_parse_of(dp, port);
- if (err)
+ if (err) {
+ of_node_put(port);
goto out_put_node;
+ }
}
out_put_node:
@@ -1562,3 +1617,53 @@ void dsa_unregister_switch(struct dsa_switch *ds)
mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
+
+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+ * blocking that operation from completion, due to the dev_hold taken inside
+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
+ * the DSA master, so that the system can reboot successfully.
+ */
+void dsa_switch_shutdown(struct dsa_switch *ds)
+{
+ struct net_device *master, *slave_dev;
+ LIST_HEAD(unregister_list);
+ struct dsa_port *dp;
+
+ mutex_lock(&dsa2_mutex);
+ rtnl_lock();
+
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+
+ if (!dsa_port_is_user(dp))
+ continue;
+
+ master = dp->cpu_dp->master;
+ slave_dev = dp->slave;
+
+ netdev_upper_dev_unlink(master, slave_dev);
+ /* Just unlinking ourselves as uppers of the master is not
+ * sufficient. When the master net device unregisters, that will
+ * also call dev_close, which we will catch as NETDEV_GOING_DOWN
+ * and trigger a dev_close on our own devices (dsa_slave_close).
+ * In turn, that will call dev_mc_unsync on the master's net
+ * device. If the master is also a DSA switch port, this will
+ * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
+ * its own master. Lockdep will complain about the fact that
+ * all cascaded masters have the same dsa_master_addr_list_lock_key,
+ * which it normally would not do if the cascaded masters would
+ * be in a proper upper/lower relationship, which we've just
+ * destroyed.
+ * To suppress the lockdep warnings, let's actually unregister
+ * the DSA slave interfaces too, to avoid the nonsensical
+ * multicast address list synchronization on shutdown.
+ */
+ unregister_netdevice_queue(slave_dev, &unregister_list);
+ }
+ unregister_netdevice_many(&unregister_list);
+
+ rtnl_unlock();
+ mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 1c797ec8e2c2..6466d0539af9 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -168,7 +168,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
if (extack._msg)
dev_err(ds->dev, "port %d: %s\n", info->port,
extack._msg);
- if (err && err != EOPNOTSUPP)
+ if (err && err != -EOPNOTSUPP)
return err;
}
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 77d0ce89ab77..b3da4b2ea11c 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -45,6 +45,7 @@
* 6 6 2 2 4 2 N
*/
+#include <linux/dsa/mv88e6xxx.h>
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -129,12 +130,9 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 tag_dev, tag_port;
enum dsa_cmd cmd;
u8 *dsa_header;
- u16 pvid = 0;
- int err;
if (skb->offload_fwd_mark) {
struct dsa_switch_tree *dst = dp->ds->dst;
- struct net_device *br = dp->bridge_dev;
cmd = DSA_CMD_FORWARD;
@@ -144,19 +142,6 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
*/
tag_dev = dst->last_switch + 1 + dp->bridge_num;
tag_port = 0;
-
- /* If we are offloading forwarding for a VLAN-unaware bridge,
- * inject packets to hardware using the bridge's pvid, since
- * that's where the packets ingressed from.
- */
- if (!br_vlan_enabled(br)) {
- /* Safe because __dev_queue_xmit() runs under
- * rcu_read_lock_bh()
- */
- err = br_vlan_get_pvid_rcu(br, &pvid);
- if (err)
- return NULL;
- }
} else {
cmd = DSA_CMD_FROM_CPU;
tag_dev = dp->ds->index;
@@ -180,16 +165,21 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
dsa_header[2] &= ~0x10;
}
} else {
+ struct net_device *br = dp->bridge_dev;
+ u16 vid;
+
+ vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
+
skb_push(skb, DSA_HLEN + extra);
dsa_alloc_etype_header(skb, DSA_HLEN + extra);
- /* Construct untagged DSA tag. */
+ /* Construct DSA header from untagged frame. */
dsa_header = dsa_etype_header_pos_tx(skb) + extra;
dsa_header[0] = (cmd << 6) | tag_dev;
dsa_header[1] = tag_port << 3;
- dsa_header[2] = pvid >> 8;
- dsa_header[3] = pvid & 0xff;
+ dsa_header[2] = vid >> 8;
+ dsa_header[3] = vid & 0xff;
}
return skb;
@@ -210,7 +200,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
cmd = dsa_header[0] >> 6;
switch (cmd) {
case DSA_CMD_FORWARD:
- trunk = !!(dsa_header[1] & 7);
+ trunk = !!(dsa_header[1] & 4);
break;
case DSA_CMD_TO_CPU:
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index d37ab98e7fe1..605b51ca6921 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -1,8 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
-#include <soc/mscc/ocelot.h>
#include "dsa_priv.h"
static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 3038a257ba05..3412051981d7 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*
* An implementation of the software-defined tag_8021q.c tagger format, which
* also preserves full functionality under a vlan_filtering bridge. It does
@@ -9,10 +9,32 @@
* that on egress
*/
#include <linux/dsa/8021q.h>
-#include <soc/mscc/ocelot.h>
-#include <soc/mscc/ocelot_ptp.h>
+#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
+static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
+ struct sk_buff *skb)
+{
+ struct felix_deferred_xmit_work *xmit_work;
+ struct felix_port *felix_port = dp->priv;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+
+ /* Calls felix_port_deferred_xmit in felix.c */
+ kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+ /* Increase refcount so the kfree_skb in dsa_slave_xmit
+ * won't really free the packet.
+ */
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+
+ return NULL;
+}
+
static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -20,18 +42,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index);
u16 queue_mapping = skb_get_queue_mapping(skb);
u8 pcp = netdev_txq_to_tc(netdev, queue_mapping);
- struct ocelot *ocelot = dp->ds->priv;
- int port = dp->index;
- u32 rew_op = 0;
+ struct ethhdr *hdr = eth_hdr(skb);
- rew_op = ocelot_ptp_rew_op(skb);
- if (rew_op) {
- if (!ocelot_can_inject(ocelot, 0))
- return NULL;
-
- ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
- return NULL;
- }
+ if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest))
+ return ocelot_defer_xmit(dp, skb);
return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q,
((pcp << VLAN_PRIO_SHIFT) | tx_vid));
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index c054f48541c8..2edede9ddac9 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -4,6 +4,7 @@
#include <linux/if_vlan.h>
#include <linux/dsa/sja1105.h>
#include <linux/dsa/8021q.h>
+#include <linux/skbuff.h>
#include <linux/packing.h>
#include "dsa_priv.h"
@@ -53,6 +54,11 @@
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
+
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
@@ -520,6 +526,43 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
+static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
+ u8 ts_id, enum sja1110_meta_tstamp dir,
+ u64 tstamp)
+{
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct skb_shared_hwtstamps shwt = {0};
+ struct sja1105_port *sp = dp->priv;
+
+ if (!dsa_port_is_sja1105(dp))
+ return;
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&sp->data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &sp->data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&sp->data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
{
u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;