diff options
Diffstat (limited to 'drivers/net/dsa')
42 files changed, 3416 insertions, 874 deletions
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index f6232ce8481f..c7667645f04a 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -52,6 +52,8 @@ source "drivers/net/dsa/microchip/Kconfig" source "drivers/net/dsa/mv88e6xxx/Kconfig" +source "drivers/net/dsa/ocelot/Kconfig" + source "drivers/net/dsa/sja1105/Kconfig" config NET_DSA_QCA8K @@ -77,6 +79,7 @@ config NET_DSA_REALTEK_SMI config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 + select REGMAP ---help--- This enables support for the SMSC/Microchip LAN9303 3 port ethernet switch chips. diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index ae70b79628d6..9d384a32b3a2 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -20,4 +20,5 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o obj-y += b53/ obj-y += microchip/ obj-y += mv88e6xxx/ +obj-y += ocelot/ obj-y += sja1105/ diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index cc3536315eff..36828f210030 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -524,7 +524,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) if (!dsa_is_user_port(ds, port)) return 0; - cpu_port = ds->ports[port].cpu_dp->index; + cpu_port = dsa_to_port(ds, port)->cpu_dp->index; if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); @@ -1503,11 +1503,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, idx = 1; } - memset(&ent, 0, sizeof(ent)); - ent.port = port; + /* For multicast address, the port is a bitmask and the validity + * is determined by having at least one port being still active + */ + if (!is_multicast_ether_addr(addr)) { + ent.port = port; + ent.is_valid = is_valid; + } else { + if (is_valid) + ent.port |= BIT(port); + else + ent.port &= ~BIT(port); + + ent.is_valid = !!(ent.port); + } + ent.is_valid = is_valid; ent.vid = vid; ent.is_static = true; + ent.is_age = false; memcpy(ent.mac, addr, ETH_ALEN); b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); @@ -1626,10 +1640,51 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_fdb_dump); +int b53_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct b53_device *priv = ds->priv; + + /* 5325 and 5365 require some more massaging, but could + * be supported eventually + */ + if (is5325(priv) || is5365(priv)) + return -EOPNOTSUPP; + + return 0; +} +EXPORT_SYMBOL(b53_mdb_prepare); + +void b53_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct b53_device *priv = ds->priv; + int ret; + + ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true); + if (ret) + dev_err(ds->dev, "failed to add MDB entry\n"); +} +EXPORT_SYMBOL(b53_mdb_add); + +int b53_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct b53_device *priv = ds->priv; + int ret; + + ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false); + if (ret) + dev_err(ds->dev, "failed to delete MDB entry\n"); + + return ret; +} +EXPORT_SYMBOL(b53_mdb_del); + int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; - s8 cpu_port = ds->ports[port].cpu_dp->index; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; u16 pvlan, reg; unsigned int i; @@ -1675,7 +1730,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) { struct b53_device *dev = ds->priv; struct b53_vlan *vl = &dev->vlans[0]; - s8 cpu_port = ds->ports[port].cpu_dp->index; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; unsigned int i; u16 pvlan, reg, pvid; @@ -1994,6 +2049,9 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_fdb_del = b53_fdb_del, .port_mirror_add = b53_mirror_add, .port_mirror_del = b53_mirror_del, + .port_mdb_prepare = b53_mdb_prepare, + .port_mdb_add = b53_mdb_add, + .port_mdb_del = b53_mdb_del, }; struct b53_chip_data { @@ -2341,10 +2399,13 @@ struct b53_device *b53_switch_alloc(struct device *base, struct dsa_switch *ds; struct b53_device *dev; - ds = dsa_switch_alloc(base, DSA_MAX_PORTS); + ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); if (!ds) return NULL; + ds->dev = base; + ds->num_ports = DSA_MAX_PORTS; + dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index a7dd8acc281b..1877acf05081 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -250,7 +250,7 @@ b53_build_op(write48, u64); b53_build_op(write64, u64); struct b53_arl_entry { - u8 port; + u16 port; u8 mac[ETH_ALEN]; u16 vid; u8 is_valid:1; @@ -351,6 +351,12 @@ int b53_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int b53_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); +int b53_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); +void b53_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); +int b53_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 69fc13046ac7..e43040c9f9ee 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -350,6 +350,18 @@ static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) { unsigned int timeout = 1000; u32 reg; + int ret; + + /* The watchdog reset does not work on 7278, we need to hit the + * "external" reset line through the reset controller. + */ + if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) { + ret = reset_control_assert(priv->rcdev); + if (ret) + return ret; + + return reset_control_deassert(priv->rcdev); + } reg = core_readl(priv, CORE_WATCHDOG_CTRL); reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; @@ -381,8 +393,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, struct device_node *dn) { struct device_node *port; - int mode; unsigned int port_num; + phy_interface_t mode; + int err; priv->moca_port = -1; @@ -395,8 +408,8 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, * has completed, since they might be turned off at that * time */ - mode = of_get_phy_mode(port); - if (mode < 0) + err = of_get_phy_mode(port, &mode); + if (err) continue; if (mode == PHY_INTERFACE_MODE_INTERNAL) @@ -668,7 +681,7 @@ static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) - netif_carrier_off(ds->ports[port].slave); + netif_carrier_off(dsa_to_port(ds, port)->slave); status->duplex = DUPLEX_FULL; } else { status->link = true; @@ -734,7 +747,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->ports[port].cpu_dp->master; + struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol = { }; @@ -758,9 +771,9 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->ports[port].cpu_dp->master; + struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - s8 cpu_port = ds->ports[port].cpu_dp->index; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; struct ethtool_wolinfo pwol = { }; if (p->ethtool_ops->get_wol) @@ -974,6 +987,9 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_rxnfc = bcm_sf2_set_rxnfc, .port_mirror_add = b53_mirror_add, .port_mirror_del = b53_mirror_del, + .port_mdb_prepare = b53_mdb_prepare, + .port_mdb_add = b53_mdb_add, + .port_mdb_del = b53_mdb_del, }; struct bcm_sf2_of_data { @@ -1088,6 +1104,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) priv->core_reg_align = data->core_reg_align; priv->num_cfp_rules = data->num_cfp_rules; + priv->rcdev = devm_reset_control_get_optional_exclusive(&pdev->dev, + "switch"); + if (PTR_ERR(priv->rcdev) == -EPROBE_DEFER) + return PTR_ERR(priv->rcdev); + /* Auto-detection using standard registers will not work, so * provide an indication of what kind of device we are for * b53_common to work with @@ -1220,6 +1241,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) dsa_unregister_switch(priv->dev->ds); bcm_sf2_cfp_exit(priv->dev->ds); bcm_sf2_mdio_unregister(priv); + if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev)) + reset_control_assert(priv->rcdev); return 0; } diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 1df30ccec42d..de386dd96d66 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -18,6 +18,7 @@ #include <linux/types.h> #include <linux/bitops.h> #include <linux/if_vlan.h> +#include <linux/reset.h> #include <net/dsa.h> @@ -64,6 +65,8 @@ struct bcm_sf2_priv { void __iomem *fcb; void __iomem *acb; + struct reset_control *rcdev; + /* Register offsets indirection tables */ u32 type; const u16 *reg_offsets; diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index d264776a95a3..f3f0c3f07391 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -821,7 +821,7 @@ static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port, struct ethtool_rx_flow_spec *fs) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - s8 cpu_port = ds->ports[port].cpu_dp->index; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; __u64 ring_cookie = fs->ring_cookie; unsigned int queue_num, port_num; int ret; @@ -1049,7 +1049,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs) { - struct net_device *p = ds->ports[port].cpu_dp->master; + struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; @@ -1092,7 +1092,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc) { - struct net_device *p = ds->ports[port].cpu_dp->master; + struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); int ret = 0; diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 925ed135a4d9..c8d7ef27fd72 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -286,10 +286,13 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev) dev_info(&mdiodev->dev, "%s: 0x%0x\n", pdata->name, pdata->enabled_ports); - ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); + ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL); if (!ds) return -ENOMEM; + ds->dev = &mdiodev->dev; + ds->num_ports = DSA_MAX_PORTS; + ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL); if (!ps) return -ENOMEM; diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index bbec86b9418e..e3c333a8f45d 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1283,10 +1283,12 @@ static int lan9303_register_switch(struct lan9303 *chip) { int base; - chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS); + chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL); if (!chip->ds) return -ENOMEM; + chip->ds->dev = chip->dev; + chip->ds->num_ports = LAN9303_NUM_PORTS; chip->ds->priv = chip; chip->ds->ops = &lan9303_switch_ops; base = chip->phy_addr_base; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index a69c9b9878b7..955324968b74 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1854,10 +1854,12 @@ static int gswip_probe(struct platform_device *pdev) if (!priv->hw_info) return -EINVAL; - priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports); + priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL); if (!priv->ds) return -ENOMEM; + priv->ds->dev = dev; + priv->ds->num_ports = priv->hw_info->max_ports; priv->ds->priv = priv; priv->ds->ops = &gswip_switch_ops; priv->dev = dev; diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index fdffd9e0c518..7d050fab0889 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -87,7 +87,6 @@ MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); static struct i2c_driver ksz9477_i2c_driver = { .driver = { .name = "ksz9477-switch", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz9477_dt_ids), }, .probe = ksz9477_i2c_probe, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fe47180c908b..d8fda4a02640 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -398,10 +398,13 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) struct dsa_switch *ds; struct ksz_device *swdev; - ds = dsa_switch_alloc(base, DSA_MAX_PORTS); + ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL); if (!ds) return NULL; + ds->dev = base; + ds->num_ports = DSA_MAX_PORTS; + swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL); if (!swdev) return NULL; @@ -419,6 +422,7 @@ EXPORT_SYMBOL(ksz_switch_alloc); int ksz_switch_register(struct ksz_device *dev, const struct ksz_dev_ops *ops) { + phy_interface_t interface; int ret; if (dev->pdata) @@ -453,9 +457,9 @@ int ksz_switch_register(struct ksz_device *dev, * device tree. */ if (dev->dev->of_node) { - ret = of_get_phy_mode(dev->dev->of_node); - if (ret >= 0) - dev->interface = ret; + ret = of_get_phy_mode(dev->dev->of_node, &interface); + if (ret == 0) + dev->interface = interface; dev->synclko_125 = of_property_read_bool(dev->dev->of_node, "microchip,synclko-125"); } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1d8d36de4d20..ed1ec10ec62b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -862,7 +862,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port) for (i = 0; i < MT7530_NUM_PORTS; i++) { if (dsa_is_user_port(ds, i) && - dsa_port_is_vlan_filtering(&ds->ports[i])) { + dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { all_user_ports_removed = false; break; } @@ -922,7 +922,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, * other port is still a VLAN-aware port. */ if (dsa_is_user_port(ds, i) && i != port && - !dsa_port_is_vlan_filtering(&ds->ports[i])) { + !dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) { if (dsa_to_port(ds, i)->bridge_dev != bridge) continue; if (priv->ports[i].enable) @@ -1165,7 +1165,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port, /* The port is kept as VLAN-unaware if bridge with vlan_filtering not * being set. */ - if (!dsa_port_is_vlan_filtering(&ds->ports[port])) + if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) return; mutex_lock(&priv->reg_mutex); @@ -1196,7 +1196,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, /* The port is kept as VLAN-unaware if bridge with vlan_filtering not * being set. */ - if (!dsa_port_is_vlan_filtering(&ds->ports[port])) + if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) return 0; mutex_lock(&priv->reg_mutex); @@ -1252,7 +1252,7 @@ mt7530_setup(struct dsa_switch *ds) * controller also is the container for two GMACs nodes representing * as two netdev instances. */ - dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent; + dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent; if (priv->id == ID_MT7530) { priv->ethernet = syscon_node_to_regmap(dn); @@ -1340,7 +1340,9 @@ mt7530_setup(struct dsa_switch *ds) if (!dsa_is_unused_port(ds, 5)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5; - interface = of_get_phy_mode(ds->ports[5].dn); + ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface); + if (ret && ret != -ENODEV) + return ret; } else { /* Scan the ethernet nodes. look for GMAC1, lookup used phy */ for_each_child_of_node(dn, mac_np) { @@ -1354,7 +1356,9 @@ mt7530_setup(struct dsa_switch *ds) phy_node = of_parse_phandle(mac_np, "phy-handle", 0); if (phy_node->parent == priv->dev->of_node->parent) { - interface = of_get_phy_mode(mac_np); + ret = of_get_phy_mode(mac_np, &interface); + if (ret && ret != -ENODEV) + return ret; id = of_mdio_parse_addr(ds->dev, phy_node); if (id == 0) priv->p5_intf_sel = P5_INTF_SEL_PHY_P0; @@ -1632,10 +1636,13 @@ mt7530_probe(struct mdio_device *mdiodev) if (!priv) return -ENOMEM; - priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL); if (!priv->ds) return -ENOMEM; + priv->ds->dev = &mdiodev->dev; + priv->ds->num_ports = DSA_MAX_PORTS; + /* Use medatek,mcm property to distinguish hardware type that would * casues a little bit differences on power-on sequence. */ diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 2a2489b5196d..a5a37f47b320 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -270,10 +270,12 @@ static int mv88e6060_probe(struct mdio_device *mdiodev) dev_info(dev, "switch %s detected\n", name); - ds = dsa_switch_alloc(dev, MV88E6060_PORTS); + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); if (!ds) return -ENOMEM; + ds->dev = dev; + ds->num_ports = MV88E6060_PORTS; ds->priv = priv; ds->dev = dev; ds->ops = &mv88e6060_switch_ops; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6787d560e9e3..3bd988529178 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1057,35 +1057,43 @@ static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port, return 0; } +/* Mask of the local ports allowed to receive frames from a given fabric port */ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) { - struct dsa_switch *ds = NULL; + struct dsa_switch *ds = chip->ds; + struct dsa_switch_tree *dst = ds->dst; struct net_device *br; + struct dsa_port *dp; + bool found = false; u16 pvlan; - int i; - if (dev < DSA_MAX_SWITCHES) - ds = chip->ds->dst->ds[dev]; + list_for_each_entry(dp, &dst->ports, list) { + if (dp->ds->index == dev && dp->index == port) { + found = true; + break; + } + } /* Prevent frames from unknown switch or port */ - if (!ds || port >= ds->num_ports) + if (!found) return 0; /* Frames from DSA links and CPU ports can egress any local port */ - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) + if (dp->type == DSA_PORT_TYPE_CPU || dp->type == DSA_PORT_TYPE_DSA) return mv88e6xxx_port_mask(chip); - br = ds->ports[port].bridge_dev; + br = dp->bridge_dev; pvlan = 0; /* Frames from user ports can egress any local DSA links and CPU ports, * as well as any local member of their bridge group. */ - for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) - if (dsa_is_cpu_port(chip->ds, i) || - dsa_is_dsa_port(chip->ds, i) || - (br && dsa_to_port(chip->ds, i)->bridge_dev == br)) - pvlan |= BIT(i); + list_for_each_entry(dp, &dst->ports, list) + if (dp->ds == ds && + (dp->type == DSA_PORT_TYPE_CPU || + dp->type == DSA_PORT_TYPE_DSA || + (br && dp->bridge_dev == br))) + pvlan |= BIT(dp->index); return pvlan; } @@ -1135,6 +1143,7 @@ static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip) static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) { + struct dsa_switch *ds = chip->ds; int target, port; int err; @@ -1143,10 +1152,9 @@ static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) /* Initialize the routing port to the 32 possible target devices */ for (target = 0; target < 32; target++) { - port = 0x1f; - if (target < DSA_MAX_SWITCHES) - if (chip->ds->rtable[target] != DSA_RTABLE_NONE) - port = chip->ds->rtable[target]; + port = dsa_routing_port(ds, target); + if (port == ds->num_ports) + port = 0x1f; err = mv88e6xxx_g2_device_mapping_write(chip, target, port); if (err) @@ -1253,7 +1261,7 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port) u16 pvlan = 0; if (!mv88e6xxx_has_pvt(chip)) - return -EOPNOTSUPP; + return 0; /* Skip the local source device, which uses in-chip port VLAN */ if (dev != chip->ds->index) @@ -1370,6 +1378,22 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) return mv88e6xxx_g1_atu_flush(chip, *fid, true); } +static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash) +{ + if (chip->info->ops->atu_get_hash) + return chip->info->ops->atu_get_hash(chip, hash); + + return -EOPNOTSUPP; +} + +static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash) +{ + if (chip->info->ops->atu_set_hash) + return chip->info->ops->atu_set_hash(chip, hash); + + return -EOPNOTSUPP; +} + static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid_begin, u16 vid_end) { @@ -1402,7 +1426,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) continue; - if (!ds->ports[i].slave) + if (!dsa_to_port(ds, i)->slave) continue; if (vlan.member[i] == @@ -1410,7 +1434,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, continue; if (dsa_to_port(ds, i)->bridge_dev == - ds->ports[port].bridge_dev) + dsa_to_port(ds, port)->bridge_dev) break; /* same bridge, check next VLAN */ if (!dsa_to_port(ds, i)->bridge_dev) @@ -2035,32 +2059,26 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, struct net_device *br) { - struct dsa_switch *ds; - int port; - int dev; + struct dsa_switch *ds = chip->ds; + struct dsa_switch_tree *dst = ds->dst; + struct dsa_port *dp; int err; - /* Remap the Port VLAN of each local bridge group member */ - for (port = 0; port < mv88e6xxx_num_ports(chip); ++port) { - if (chip->ds->ports[port].bridge_dev == br) { - err = mv88e6xxx_port_vlan_map(chip, port); - if (err) - return err; - } - } - - if (!mv88e6xxx_has_pvt(chip)) - return 0; - - /* Remap the Port VLAN of each cross-chip bridge group member */ - for (dev = 0; dev < DSA_MAX_SWITCHES; ++dev) { - ds = chip->ds->dst->ds[dev]; - if (!ds) - break; - - for (port = 0; port < ds->num_ports; ++port) { - if (ds->ports[port].bridge_dev == br) { - err = mv88e6xxx_pvt_map(chip, dev, port); + list_for_each_entry(dp, &dst->ports, list) { + if (dp->bridge_dev == br) { + if (dp->ds == ds) { + /* This is a local bridge group member, + * remap its Port VLAN Map. + */ + err = mv88e6xxx_port_vlan_map(chip, dp->index); + if (err) + return err; + } else { + /* This is an external bridge group member, + * remap its cross-chip Port VLAN Table entry. + */ + err = mv88e6xxx_pvt_map(chip, dp->ds->index, + dp->index); if (err) return err; } @@ -2101,9 +2119,6 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev, struct mv88e6xxx_chip *chip = ds->priv; int err; - if (!mv88e6xxx_has_pvt(chip)) - return 0; - mv88e6xxx_reg_lock(chip); err = mv88e6xxx_pvt_map(chip, dev, port); mv88e6xxx_reg_unlock(chip); @@ -2116,9 +2131,6 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds, int dev, { struct mv88e6xxx_chip *chip = ds->priv; - if (!mv88e6xxx_has_pvt(chip)) - return; - mv88e6xxx_reg_lock(chip); if (mv88e6xxx_pvt_map(chip, dev, port)) dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); @@ -2378,7 +2390,14 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port) if (chip->info->ops->set_egress_port) { err = chip->info->ops->set_egress_port(chip, - upstream_port); + MV88E6XXX_EGRESS_DIR_INGRESS, + upstream_port); + if (err) + return err; + + err = chip->info->ops->set_egress_port(chip, + MV88E6XXX_EGRESS_DIR_EGRESS, + upstream_port); if (err) return err; } @@ -2641,6 +2660,248 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip) return mv88e6xxx_software_reset(chip); } +enum mv88e6xxx_devlink_param_id { + MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH, +}; + +static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mv88e6xxx_reg_lock(chip); + + switch (id) { + case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH: + err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8); + break; + default: + err = -EOPNOTSUPP; + break; + } + + mv88e6xxx_reg_unlock(chip); + + return err; +} + +static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mv88e6xxx_reg_lock(chip); + + switch (id) { + case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH: + err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8); + break; + default: + err = -EOPNOTSUPP; + break; + } + + mv88e6xxx_reg_unlock(chip); + + return err; +} + +static const struct devlink_param mv88e6xxx_devlink_params[] = { + DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH, + "ATU_hash", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME)), +}; + +static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds) +{ + return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params, + ARRAY_SIZE(mv88e6xxx_devlink_params)); +} + +static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds) +{ + dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params, + ARRAY_SIZE(mv88e6xxx_devlink_params)); +} + +enum mv88e6xxx_devlink_resource_id { + MV88E6XXX_RESOURCE_ID_ATU, + MV88E6XXX_RESOURCE_ID_ATU_BIN_0, + MV88E6XXX_RESOURCE_ID_ATU_BIN_1, + MV88E6XXX_RESOURCE_ID_ATU_BIN_2, + MV88E6XXX_RESOURCE_ID_ATU_BIN_3, +}; + +static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip, + u16 bin) +{ + u16 occupancy = 0; + int err; + + mv88e6xxx_reg_lock(chip); + + err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL, + bin); + if (err) { + dev_err(chip->dev, "failed to set ATU stats kind/bin\n"); + goto unlock; + } + + err = mv88e6xxx_g1_atu_get_next(chip, 0); + if (err) { + dev_err(chip->dev, "failed to perform ATU get next\n"); + goto unlock; + } + + err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy); + if (err) { + dev_err(chip->dev, "failed to get ATU stats\n"); + goto unlock; + } + +unlock: + mv88e6xxx_reg_unlock(chip); + + return occupancy; +} + +static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv) +{ + struct mv88e6xxx_chip *chip = priv; + + return mv88e6xxx_devlink_atu_bin_get(chip, + MV88E6XXX_G2_ATU_STATS_BIN_0); +} + +static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv) +{ + struct mv88e6xxx_chip *chip = priv; + + return mv88e6xxx_devlink_atu_bin_get(chip, + MV88E6XXX_G2_ATU_STATS_BIN_1); +} + +static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv) +{ + struct mv88e6xxx_chip *chip = priv; + + return mv88e6xxx_devlink_atu_bin_get(chip, + MV88E6XXX_G2_ATU_STATS_BIN_2); +} + +static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv) +{ + struct mv88e6xxx_chip *chip = priv; + + return mv88e6xxx_devlink_atu_bin_get(chip, + MV88E6XXX_G2_ATU_STATS_BIN_3); +} + +static u64 mv88e6xxx_devlink_atu_get(void *priv) +{ + return mv88e6xxx_devlink_atu_bin_0_get(priv) + + mv88e6xxx_devlink_atu_bin_1_get(priv) + + mv88e6xxx_devlink_atu_bin_2_get(priv) + + mv88e6xxx_devlink_atu_bin_3_get(priv); +} + +static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds) +{ + struct devlink_resource_size_params size_params; + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + devlink_resource_size_params_init(&size_params, + mv88e6xxx_num_macs(chip), + mv88e6xxx_num_macs(chip), + 1, DEVLINK_RESOURCE_UNIT_ENTRY); + + err = dsa_devlink_resource_register(ds, "ATU", + mv88e6xxx_num_macs(chip), + MV88E6XXX_RESOURCE_ID_ATU, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &size_params); + if (err) + goto out; + + devlink_resource_size_params_init(&size_params, + mv88e6xxx_num_macs(chip) / 4, + mv88e6xxx_num_macs(chip) / 4, + 1, DEVLINK_RESOURCE_UNIT_ENTRY); + + err = dsa_devlink_resource_register(ds, "ATU_bin_0", + mv88e6xxx_num_macs(chip) / 4, + MV88E6XXX_RESOURCE_ID_ATU_BIN_0, + MV88E6XXX_RESOURCE_ID_ATU, + &size_params); + if (err) + goto out; + + err = dsa_devlink_resource_register(ds, "ATU_bin_1", + mv88e6xxx_num_macs(chip) / 4, + MV88E6XXX_RESOURCE_ID_ATU_BIN_1, + MV88E6XXX_RESOURCE_ID_ATU, + &size_params); + if (err) + goto out; + + err = dsa_devlink_resource_register(ds, "ATU_bin_2", + mv88e6xxx_num_macs(chip) / 4, + MV88E6XXX_RESOURCE_ID_ATU_BIN_2, + MV88E6XXX_RESOURCE_ID_ATU, + &size_params); + if (err) + goto out; + + err = dsa_devlink_resource_register(ds, "ATU_bin_3", + mv88e6xxx_num_macs(chip) / 4, + MV88E6XXX_RESOURCE_ID_ATU_BIN_3, + MV88E6XXX_RESOURCE_ID_ATU, + &size_params); + if (err) + goto out; + + dsa_devlink_resource_occ_get_register(ds, + MV88E6XXX_RESOURCE_ID_ATU, + mv88e6xxx_devlink_atu_get, + chip); + + dsa_devlink_resource_occ_get_register(ds, + MV88E6XXX_RESOURCE_ID_ATU_BIN_0, + mv88e6xxx_devlink_atu_bin_0_get, + chip); + + dsa_devlink_resource_occ_get_register(ds, + MV88E6XXX_RESOURCE_ID_ATU_BIN_1, + mv88e6xxx_devlink_atu_bin_1_get, + chip); + + dsa_devlink_resource_occ_get_register(ds, + MV88E6XXX_RESOURCE_ID_ATU_BIN_2, + mv88e6xxx_devlink_atu_bin_2_get, + chip); + + dsa_devlink_resource_occ_get_register(ds, + MV88E6XXX_RESOURCE_ID_ATU_BIN_3, + mv88e6xxx_devlink_atu_bin_3_get, + chip); + + return 0; + +out: + dsa_devlink_resources_unregister(ds); + return err; +} + +static void mv88e6xxx_teardown(struct dsa_switch *ds) +{ + mv88e6xxx_teardown_devlink_params(ds); + dsa_devlink_resources_unregister(ds); +} + static int mv88e6xxx_setup(struct dsa_switch *ds) { struct mv88e6xxx_chip *chip = ds->priv; @@ -2757,6 +3018,22 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) unlock: mv88e6xxx_reg_unlock(chip); + if (err) + return err; + + /* Have to be called without holding the register lock, since + * they take the devlink lock, and we later take the locks in + * the reverse order when getting/setting parameters or + * resource occupancy. + */ + err = mv88e6xxx_setup_devlink_resources(ds); + if (err) + return err; + + err = mv88e6xxx_setup_devlink_params(ds); + if (err) + dsa_devlink_resources_unregister(ds); + return err; } @@ -3117,6 +3394,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, @@ -3246,6 +3525,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6165_avb_ops, @@ -3280,6 +3561,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6165_avb_ops, @@ -3322,6 +3605,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, @@ -3366,6 +3651,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_get_lane = mv88e6352_serdes_get_lane, @@ -3409,6 +3696,8 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, @@ -3453,6 +3742,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_get_lane = mv88e6352_serdes_get_lane, @@ -3538,6 +3829,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3587,6 +3880,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3635,6 +3930,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3686,6 +3983,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_get_lane = mv88e6352_serdes_get_lane, @@ -3777,6 +4076,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3963,6 +4264,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, @@ -4003,6 +4306,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .avb_ops = &mv88e6352_avb_ops, @@ -4049,6 +4354,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6352_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_get_lane = mv88e6352_serdes_get_lane, @@ -4105,6 +4412,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4158,6 +4467,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4177,6 +4488,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6097, .name = "Marvell 88E6085", .num_databases = 4096, + .num_macs = 8192, .num_ports = 10, .num_internal_phys = 5, .max_vid = 4095, @@ -4199,6 +4511,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6095, .name = "Marvell 88E6095/88E6095F", .num_databases = 256, + .num_macs = 8192, .num_ports = 11, .num_internal_phys = 0, .max_vid = 4095, @@ -4219,6 +4532,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6097, .name = "Marvell 88E6097/88E6097F", .num_databases = 4096, + .num_macs = 8192, .num_ports = 11, .num_internal_phys = 8, .max_vid = 4095, @@ -4241,6 +4555,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6123", .num_databases = 4096, + .num_macs = 1024, .num_ports = 3, .num_internal_phys = 5, .max_vid = 4095, @@ -4263,6 +4578,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6131", .num_databases = 256, + .num_macs = 8192, .num_ports = 8, .num_internal_phys = 0, .max_vid = 4095, @@ -4283,6 +4599,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6141", .num_databases = 4096, + .num_macs = 2048, .num_ports = 6, .num_internal_phys = 5, .num_gpio = 11, @@ -4306,6 +4623,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6161", .num_databases = 4096, + .num_macs = 1024, .num_ports = 6, .num_internal_phys = 5, .max_vid = 4095, @@ -4329,6 +4647,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6165, .name = "Marvell 88E6165", .num_databases = 4096, + .num_macs = 8192, .num_ports = 6, .num_internal_phys = 0, .max_vid = 4095, @@ -4352,6 +4671,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6171", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .max_vid = 4095, @@ -4374,6 +4694,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6172", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .num_gpio = 15, @@ -4397,6 +4718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6175", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .max_vid = 4095, @@ -4419,6 +4741,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6176", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .num_gpio = 15, @@ -4442,6 +4765,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6185, .name = "Marvell 88E6185", .num_databases = 256, + .num_macs = 8192, .num_ports = 10, .num_internal_phys = 0, .max_vid = 4095, @@ -4462,6 +4786,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6390, .name = "Marvell 88E6190", .num_databases = 4096, + .num_macs = 16384, .num_ports = 11, /* 10 + Z80 */ .num_internal_phys = 9, .num_gpio = 16, @@ -4485,6 +4810,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6390, .name = "Marvell 88E6190X", .num_databases = 4096, + .num_macs = 16384, .num_ports = 11, /* 10 + Z80 */ .num_internal_phys = 9, .num_gpio = 16, @@ -4508,6 +4834,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6390, .name = "Marvell 88E6191", .num_databases = 4096, + .num_macs = 16384, .num_ports = 11, /* 10 + Z80 */ .num_internal_phys = 9, .max_vid = 8191, @@ -4558,6 +4885,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6240", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .num_gpio = 15, @@ -4628,6 +4956,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6320", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .num_gpio = 15, @@ -4652,6 +4981,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6320, .name = "Marvell 88E6321", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .num_gpio = 15, @@ -4675,6 +5005,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6341", .num_databases = 4096, + .num_macs = 2048, .num_internal_phys = 5, .num_ports = 6, .num_gpio = 11, @@ -4699,6 +5030,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6350", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .max_vid = 4095, @@ -4721,6 +5053,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6351, .name = "Marvell 88E6351", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .max_vid = 4095, @@ -4743,6 +5076,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6352, .name = "Marvell 88E6352", .num_databases = 4096, + .num_macs = 8192, .num_ports = 7, .num_internal_phys = 5, .num_gpio = 15, @@ -4766,6 +5100,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6390, .name = "Marvell 88E6390", .num_databases = 4096, + .num_macs = 16384, .num_ports = 11, /* 10 + Z80 */ .num_internal_phys = 9, .num_gpio = 16, @@ -4789,6 +5124,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .family = MV88E6XXX_FAMILY_6390, .name = "Marvell 88E6390X", .num_databases = 4096, + .num_macs = 16384, .num_ports = 11, /* 10 + Z80 */ .num_internal_phys = 9, .num_gpio = 16, @@ -4914,6 +5250,80 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, return err; } +static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + enum mv88e6xxx_egress_direction direction = ingress ? + MV88E6XXX_EGRESS_DIR_INGRESS : + MV88E6XXX_EGRESS_DIR_EGRESS; + struct mv88e6xxx_chip *chip = ds->priv; + bool other_mirrors = false; + int i; + int err; + + if (!chip->info->ops->set_egress_port) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + if ((ingress ? chip->ingress_dest_port : chip->egress_dest_port) != + mirror->to_local_port) { + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) + other_mirrors |= ingress ? + chip->ports[i].mirror_ingress : + chip->ports[i].mirror_egress; + + /* Can't change egress port when other mirror is active */ + if (other_mirrors) { + err = -EBUSY; + goto out; + } + + err = chip->info->ops->set_egress_port(chip, + direction, + mirror->to_local_port); + if (err) + goto out; + } + + err = mv88e6xxx_port_set_mirror(chip, port, direction, true); +out: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static void mv88e6xxx_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + enum mv88e6xxx_egress_direction direction = mirror->ingress ? + MV88E6XXX_EGRESS_DIR_INGRESS : + MV88E6XXX_EGRESS_DIR_EGRESS; + struct mv88e6xxx_chip *chip = ds->priv; + bool other_mirrors = false; + int i; + + mutex_lock(&chip->reg_lock); + if (mv88e6xxx_port_set_mirror(chip, port, direction, false)) + dev_err(ds->dev, "p%d: failed to disable mirroring\n", port); + + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) + other_mirrors |= mirror->ingress ? + chip->ports[i].mirror_ingress : + chip->ports[i].mirror_egress; + + /* Reset egress port when no other mirror is active */ + if (!other_mirrors) { + if (chip->info->ops->set_egress_port(chip, + direction, + dsa_upstream_port(ds, + port))) + dev_err(ds->dev, "failed to set egress port\n"); + } + + mutex_unlock(&chip->reg_lock); +} + static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, bool unicast, bool multicast) { @@ -4933,6 +5343,7 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, + .teardown = mv88e6xxx_teardown, .phylink_validate = mv88e6xxx_validate, .phylink_mac_link_state = mv88e6xxx_link_state, .phylink_mac_config = mv88e6xxx_mac_config, @@ -4968,6 +5379,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_mdb_prepare = mv88e6xxx_port_mdb_prepare, .port_mdb_add = mv88e6xxx_port_mdb_add, .port_mdb_del = mv88e6xxx_port_mdb_del, + .port_mirror_add = mv88e6xxx_port_mirror_add, + .port_mirror_del = mv88e6xxx_port_mirror_del, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave, .port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set, @@ -4975,6 +5388,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_txtstamp = mv88e6xxx_port_txtstamp, .port_rxtstamp = mv88e6xxx_port_rxtstamp, .get_ts_info = mv88e6xxx_get_ts_info, + .devlink_param_get = mv88e6xxx_devlink_param_get, + .devlink_param_set = mv88e6xxx_devlink_param_set, }; static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) @@ -4982,10 +5397,12 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) struct device *dev = chip->dev; struct dsa_switch *ds; - ds = dsa_switch_alloc(dev, mv88e6xxx_num_ports(chip)); + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); if (!ds) return -ENOMEM; + ds->dev = dev; + ds->num_ports = mv88e6xxx_num_ports(chip); ds->priv = chip; ds->dev = dev; ds->ops = &mv88e6xxx_switch_ops; diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e9b1a1ac9a8e..8a8e38bfb161 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -33,6 +33,11 @@ enum mv88e6xxx_egress_mode { MV88E6XXX_EGRESS_MODE_ETHERTYPE, }; +enum mv88e6xxx_egress_direction { + MV88E6XXX_EGRESS_DIR_INGRESS, + MV88E6XXX_EGRESS_DIR_EGRESS, +}; + enum mv88e6xxx_frame_mode { MV88E6XXX_FRAME_MODE_NORMAL, MV88E6XXX_FRAME_MODE_DSA, @@ -94,6 +99,7 @@ struct mv88e6xxx_info { u16 prod_num; const char *name; unsigned int num_databases; + unsigned int num_macs; unsigned int num_ports; unsigned int num_internal_phys; unsigned int num_gpio; @@ -227,6 +233,8 @@ struct mv88e6xxx_port { u64 vtu_member_violation; u64 vtu_miss_violation; u8 cmode; + bool mirror_ingress; + bool mirror_egress; unsigned int serdes_irq; }; @@ -310,6 +318,10 @@ struct mv88e6xxx_chip { u16 evcap_config; u16 enable_count; + /* Current ingress and egress monitor ports */ + int egress_dest_port; + int ingress_dest_port; + /* Per-port timestamping resources. */ struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS]; @@ -464,7 +476,9 @@ struct mv88e6xxx_ops { int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port, uint64_t *data); int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port); - int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); + int (*set_egress_port)(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port); #define MV88E6XXX_CASCADE_PORT_NONE 0xe #define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf @@ -497,6 +511,10 @@ struct mv88e6xxx_ops { int (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port, uint64_t *data); + /* Address Translation Unit operations */ + int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash); + int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash); + /* VLAN Translation Unit operations */ int (*vtu_getnext)(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); @@ -609,6 +627,11 @@ static inline unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip) return chip->info->num_databases; } +static inline unsigned int mv88e6xxx_num_macs(struct mv88e6xxx_chip *chip) +{ + return chip->info->num_macs; +} + static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip) { return chip->info->num_ports; diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index 25ec4c0ac589..120a65d3e3ef 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -263,8 +263,11 @@ int mv88e6250_g1_ieee_pri_map(struct mv88e6xxx_chip *chip) /* Offset 0x1a: Monitor Control */ /* Offset 0x1a: Monitor & MGMT Control on some devices */ -int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port) +int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) { + int *dest_port_chip; u16 reg; int err; @@ -272,13 +275,28 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port) if (err) return err; - reg &= ~(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK | - MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); + switch (direction) { + case MV88E6XXX_EGRESS_DIR_INGRESS: + dest_port_chip = &chip->ingress_dest_port; + reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK; + reg |= port << + __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK); + break; + case MV88E6XXX_EGRESS_DIR_EGRESS: + dest_port_chip = &chip->egress_dest_port; + reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK; + reg |= port << + __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); + break; + default: + return -EINVAL; + } + + err = mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg); + if (!err) + *dest_port_chip = port; - reg |= port << __bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK) | - port << __bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK); - - return mv88e6xxx_g1_write(chip, MV88E6185_G1_MONITOR_CTL, reg); + return err; } /* Older generations also call this the ARP destination. It has been @@ -310,22 +328,32 @@ static int mv88e6390_g1_monitor_write(struct mv88e6xxx_chip *chip, return mv88e6xxx_g1_write(chip, MV88E6390_G1_MONITOR_MGMT_CTL, reg); } -int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port) +int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) { + int *dest_port_chip; u16 ptr; int err; - ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST; - err = mv88e6390_g1_monitor_write(chip, ptr, port); - if (err) - return err; + switch (direction) { + case MV88E6XXX_EGRESS_DIR_INGRESS: + dest_port_chip = &chip->ingress_dest_port; + ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST; + break; + case MV88E6XXX_EGRESS_DIR_EGRESS: + dest_port_chip = &chip->egress_dest_port; + ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST; + break; + default: + return -EINVAL; + } - ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST; err = mv88e6390_g1_monitor_write(chip, ptr, port); - if (err) - return err; + if (!err) + *dest_port_chip = port; - return 0; + return err; } int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 0870fcc8bfc8..bc5a6b2bb1e4 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -109,6 +109,7 @@ /* Offset 0x0A: ATU Control Register */ #define MV88E6XXX_G1_ATU_CTL 0x0a #define MV88E6XXX_G1_ATU_CTL_LEARN2ALL 0x0008 +#define MV88E6161_G1_ATU_CTL_HASH_MASK 0x0003 /* Offset 0x0B: ATU Operation Register */ #define MV88E6XXX_G1_ATU_OP 0x0b @@ -287,8 +288,12 @@ int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip); int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip); void mv88e6xxx_g1_stats_read(struct mv88e6xxx_chip *chip, int stat, u32 *val); int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip); -int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port); -int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, int port); +int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port); +int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port); int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); @@ -318,6 +323,8 @@ int mv88e6xxx_g1_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, int port, bool all); int mv88e6xxx_g1_atu_prob_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip); +int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash); +int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash); int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); @@ -338,5 +345,6 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip); int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid); #endif /* _MV88E6XXX_GLOBAL1_H */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 792a96ef418f..bdcd25560dd2 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -73,6 +73,38 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, return 0; } +int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash) +{ + int err; + u16 val; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val); + if (err) + return err; + + *hash = val & MV88E6161_G1_ATU_CTL_HASH_MASK; + + return 0; +} + +int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash) +{ + int err; + u16 val; + + if (hash & ~MV88E6161_G1_ATU_CTL_HASH_MASK) + return -EINVAL; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL, &val); + if (err) + return err; + + val &= ~MV88E6161_G1_ATU_CTL_HASH_MASK; + val |= hash; + + return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_CTL, val); +} + /* Offset 0x0B: ATU Operation Register */ static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip) @@ -122,6 +154,11 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op) return mv88e6xxx_g1_atu_op_wait(chip); } +int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid) +{ + return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB); +} + /* Offset 0x0C: ATU Data Register */ static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index bdbb72fc20ed..87bfe7c8c9cd 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -280,6 +280,19 @@ int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) return err; } +/* Offset 0x0E: ATU Statistics */ + +int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS, + kind | bin); +} + +int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats) +{ + return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats); +} + /* Offset 0x0F: Priority Override Table */ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 42da4bca73e8..1f42ee656816 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -113,7 +113,16 @@ #define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK 0x00ff /* Offset 0x0E: ATU Stats Register */ -#define MV88E6XXX_G2_ATU_STATS 0x0e +#define MV88E6XXX_G2_ATU_STATS 0x0e +#define MV88E6XXX_G2_ATU_STATS_BIN_0 (0x0 << 14) +#define MV88E6XXX_G2_ATU_STATS_BIN_1 (0x1 << 14) +#define MV88E6XXX_G2_ATU_STATS_BIN_2 (0x2 << 14) +#define MV88E6XXX_G2_ATU_STATS_BIN_3 (0x3 << 14) +#define MV88E6XXX_G2_ATU_STATS_MODE_ALL (0x0 << 12) +#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC (0x1 << 12) +#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL (0x2 << 12) +#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC (0x3 << 12) +#define MV88E6XXX_G2_ATU_STATS_MASK 0x0fff /* Offset 0x0F: Priority Override Table */ #define MV88E6XXX_G2_PRIO_OVERRIDE 0x0f @@ -353,6 +362,8 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops; int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip, bool external); +int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin); +int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats); #else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ @@ -515,6 +526,18 @@ static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, + u16 kind, u16 bin) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, + u16 *stats) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ #endif /* _MV88E6XXX_GLOBAL2_H */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 15ef81654b67..7fe256c5739d 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -1181,6 +1181,43 @@ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg); } +int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_egress_direction direction, + bool mirror) +{ + bool *mirror_port; + u16 reg; + u16 bit; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®); + if (err) + return err; + + switch (direction) { + case MV88E6XXX_EGRESS_DIR_INGRESS: + bit = MV88E6XXX_PORT_CTL2_INGRESS_MONITOR; + mirror_port = &chip->ports[port].mirror_ingress; + break; + case MV88E6XXX_EGRESS_DIR_EGRESS: + bit = MV88E6XXX_PORT_CTL2_EGRESS_MONITOR; + mirror_port = &chip->ports[port].mirror_egress; + break; + default: + return -EINVAL; + } + + reg &= ~bit; + if (mirror) + reg |= bit; + + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg); + if (!err) + *mirror_port = mirror; + + return err; +} + int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port, u16 mode) { diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 03a480cd71b9..0ec4327c2b42 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -368,6 +368,9 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port, int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, int upstream_port); +int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_egress_direction direction, + bool mirror); int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port); int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig new file mode 100644 index 000000000000..0031ca814346 --- /dev/null +++ b/drivers/net/dsa/ocelot/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_DSA_MSCC_FELIX + tristate "Ocelot / Felix Ethernet switch support" + depends on NET_DSA && PCI + select MSCC_OCELOT_SWITCH + select NET_DSA_TAG_OCELOT + help + This driver supports the VSC9959 network switch, which is a member of + the Vitesse / Microsemi / Microchip Ocelot family of switching cores. + It is embedded as a PCIe function of the NXP LS1028A ENETC integrated + endpoint. diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile new file mode 100644 index 000000000000..37ad403e0b2a --- /dev/null +++ b/drivers/net/dsa/ocelot/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o + +mscc_felix-objs := \ + felix.o \ + felix_vsc9959.o diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c new file mode 100644 index 000000000000..b7f92464815d --- /dev/null +++ b/drivers/net/dsa/ocelot/felix.c @@ -0,0 +1,530 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2019 NXP Semiconductors + */ +#include <uapi/linux/if_bridge.h> +#include <soc/mscc/ocelot.h> +#include <linux/packing.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/of.h> +#include <net/dsa.h> +#include "felix.h" + +static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + return DSA_TAG_PROTO_OCELOT; +} + +static int felix_set_ageing_time(struct dsa_switch *ds, + unsigned int ageing_time) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_set_ageing_time(ocelot, ageing_time); + + return 0; +} + +static void felix_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_adjust_link(ocelot, port, phydev); +} + +static int felix_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_fdb_dump(ocelot, port, cb, data); +} + +static int felix_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct ocelot *ocelot = ds->priv; + bool vlan_aware; + + vlan_aware = dsa_port_is_vlan_filtering(dsa_to_port(ds, port)); + + return ocelot_fdb_add(ocelot, port, addr, vid, vlan_aware); +} + +static int felix_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_fdb_del(ocelot, port, addr, vid); +} + +static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_bridge_stp_state_set(ocelot, port, state); +} + +static int felix_bridge_join(struct dsa_switch *ds, int port, + struct net_device *br) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_bridge_join(ocelot, port, br); +} + +static void felix_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_bridge_leave(ocelot, port, br); +} + +/* This callback needs to be present */ +static int felix_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + return 0; +} + +static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_vlan_filtering(ocelot, port, enabled); + + return 0; +} + +static void felix_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ocelot *ocelot = ds->priv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ocelot_vlan_add(ocelot, port, vid, + vlan->flags & BRIDGE_VLAN_INFO_PVID, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (err) { + dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", + vid, port, err); + return; + } + } +} + +static int felix_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ocelot *ocelot = ds->priv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ocelot_vlan_del(ocelot, port, vid); + if (err) { + dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n", + vid, port, err); + return err; + } + } + return 0; +} + +static int felix_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_enable(ocelot, port, phy); + + return 0; +} + +static void felix_port_disable(struct dsa_switch *ds, int port) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_disable(ocelot, port); +} + +static void felix_get_strings(struct dsa_switch *ds, int port, + u32 stringset, u8 *data) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_strings(ocelot, port, stringset, data); +} + +static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_get_ethtool_stats(ocelot, port, data); +} + +static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_sset_count(ocelot, port, sset); +} + +static int felix_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_get_ts_info(ocelot, port, info); +} + +static int felix_init_structs(struct felix *felix, int num_phys_ports) +{ + struct ocelot *ocelot = &felix->ocelot; + resource_size_t base; + int port, i, err; + + ocelot->num_phys_ports = num_phys_ports; + ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, + sizeof(struct ocelot_port *), GFP_KERNEL); + if (!ocelot->ports) + return -ENOMEM; + + ocelot->map = felix->info->map; + ocelot->stats_layout = felix->info->stats_layout; + ocelot->num_stats = felix->info->num_stats; + ocelot->shared_queue_sz = felix->info->shared_queue_sz; + ocelot->ops = felix->info->ops; + + base = pci_resource_start(felix->pdev, felix->info->pci_bar); + + for (i = 0; i < TARGET_MAX; i++) { + struct regmap *target; + struct resource *res; + + if (!felix->info->target_io_res[i].name) + continue; + + res = &felix->info->target_io_res[i]; + res->flags = IORESOURCE_MEM; + res->start += base; + res->end += base; + + target = ocelot_regmap_init(ocelot, res); + if (IS_ERR(target)) { + dev_err(ocelot->dev, + "Failed to map device memory space\n"); + return PTR_ERR(target); + } + + ocelot->targets[i] = target; + } + + err = ocelot_regfields_init(ocelot, felix->info->regfields); + if (err) { + dev_err(ocelot->dev, "failed to init reg fields map\n"); + return err; + } + + for (port = 0; port < num_phys_ports; port++) { + struct ocelot_port *ocelot_port; + void __iomem *port_regs; + struct resource *res; + + ocelot_port = devm_kzalloc(ocelot->dev, + sizeof(struct ocelot_port), + GFP_KERNEL); + if (!ocelot_port) { + dev_err(ocelot->dev, + "failed to allocate port memory\n"); + return -ENOMEM; + } + + res = &felix->info->port_io_res[port]; + res->flags = IORESOURCE_MEM; + res->start += base; + res->end += base; + + port_regs = devm_ioremap_resource(ocelot->dev, res); + if (IS_ERR(port_regs)) { + dev_err(ocelot->dev, + "failed to map registers for port %d\n", port); + return PTR_ERR(port_regs); + } + + ocelot_port->ocelot = ocelot; + ocelot_port->regs = port_regs; + ocelot->ports[port] = ocelot_port; + } + + return 0; +} + +/* Hardware initialization done here so that we can allocate structures with + * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing + * us to allocate structures twice (leak memory) and map PCI memory twice + * (which will not work). + */ +static int felix_setup(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + struct felix *felix = ocelot_to_felix(ocelot); + int port, err; + + err = felix_init_structs(felix, ds->num_ports); + if (err) + return err; + + ocelot_init(ocelot); + + for (port = 0; port < ds->num_ports; port++) { + ocelot_init_port(ocelot, port); + + if (dsa_is_cpu_port(ds, port)) + ocelot_set_cpu_port(ocelot, port, + OCELOT_TAG_PREFIX_NONE, + OCELOT_TAG_PREFIX_LONG); + } + + return 0; +} + +static void felix_teardown(struct dsa_switch *ds) +{ + struct ocelot *ocelot = ds->priv; + + /* stop workqueue thread */ + ocelot_deinit(ocelot); +} + +static int felix_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_hwstamp_get(ocelot, port, ifr); +} + +static int felix_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_hwstamp_set(ocelot, port, ifr); +} + +static bool felix_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct ocelot *ocelot = ds->priv; + u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN; + u32 tstamp_lo, tstamp_hi; + struct timespec64 ts; + u64 tstamp, val; + + ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); + tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); + + packing(extraction, &val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0); + tstamp_lo = (u32)val; + + tstamp_hi = tstamp >> 32; + if ((tstamp & 0xffffffff) < tstamp_lo) + tstamp_hi--; + + tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamps->hwtstamp = tstamp; + return false; +} + +static bool felix_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type) +{ + struct ocelot *ocelot = ds->priv; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone)) + return true; + + return false; +} + +static const struct dsa_switch_ops felix_switch_ops = { + .get_tag_protocol = felix_get_tag_protocol, + .setup = felix_setup, + .teardown = felix_teardown, + .set_ageing_time = felix_set_ageing_time, + .get_strings = felix_get_strings, + .get_ethtool_stats = felix_get_ethtool_stats, + .get_sset_count = felix_get_sset_count, + .get_ts_info = felix_get_ts_info, + .adjust_link = felix_adjust_link, + .port_enable = felix_port_enable, + .port_disable = felix_port_disable, + .port_fdb_dump = felix_fdb_dump, + .port_fdb_add = felix_fdb_add, + .port_fdb_del = felix_fdb_del, + .port_bridge_join = felix_bridge_join, + .port_bridge_leave = felix_bridge_leave, + .port_stp_state_set = felix_bridge_stp_state_set, + .port_vlan_prepare = felix_vlan_prepare, + .port_vlan_filtering = felix_vlan_filtering, + .port_vlan_add = felix_vlan_add, + .port_vlan_del = felix_vlan_del, + .port_hwtstamp_get = felix_hwtstamp_get, + .port_hwtstamp_set = felix_hwtstamp_set, + .port_rxtstamp = felix_rxtstamp, + .port_txtstamp = felix_txtstamp, +}; + +static struct felix_info *felix_instance_tbl[] = { + [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959, +}; + +static irqreturn_t felix_irq_handler(int irq, void *data) +{ + struct ocelot *ocelot = (struct ocelot *)data; + + /* The INTB interrupt is used for both PTP TX timestamp interrupt + * and preemption status change interrupt on each port. + * + * - Get txtstamp if have + * - TODO: handle preemption. Without handling it, driver may get + * interrupt storm. + */ + + ocelot_get_txtstamp(ocelot); + + return IRQ_HANDLED; +} + +static int felix_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + enum felix_instance instance = id->driver_data; + struct dsa_switch *ds; + struct ocelot *ocelot; + struct felix *felix; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + goto err_pci_enable; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + felix = kzalloc(sizeof(struct felix), GFP_KERNEL); + if (!felix) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate driver memory\n"); + goto err_alloc_felix; + } + + pci_set_drvdata(pdev, felix); + ocelot = &felix->ocelot; + ocelot->dev = &pdev->dev; + felix->pdev = pdev; + felix->info = felix_instance_tbl[instance]; + + pci_set_master(pdev); + + err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL, + &felix_irq_handler, IRQF_ONESHOT, + "felix-intb", ocelot); + if (err) { + dev_err(&pdev->dev, "Failed to request irq\n"); + goto err_alloc_irq; + } + + ocelot->ptp = 1; + + ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); + if (!ds) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); + goto err_alloc_ds; + } + + ds->dev = &pdev->dev; + ds->num_ports = felix->info->num_ports; + ds->ops = &felix_switch_ops; + ds->priv = ocelot; + felix->ds = ds; + + err = dsa_register_switch(ds); + if (err) { + dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + goto err_register_ds; + } + + return 0; + +err_register_ds: + kfree(ds); +err_alloc_ds: +err_alloc_irq: +err_alloc_felix: + kfree(felix); +err_dma: + pci_disable_device(pdev); +err_pci_enable: + return err; +} + +static void felix_pci_remove(struct pci_dev *pdev) +{ + struct felix *felix; + + felix = pci_get_drvdata(pdev); + + dsa_unregister_switch(felix->ds); + + kfree(felix->ds); + kfree(felix); + + pci_disable_device(pdev); +} + +static struct pci_device_id felix_ids[] = { + { + /* NXP LS1028A */ + PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0), + .driver_data = FELIX_INSTANCE_VSC9959, + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, felix_ids); + +static struct pci_driver felix_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = felix_ids, + .probe = felix_pci_probe, + .remove = felix_pci_remove, +}; + +module_pci_driver(felix_pci_driver); + +MODULE_DESCRIPTION("Felix Switch driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h new file mode 100644 index 000000000000..204296e51d0c --- /dev/null +++ b/drivers/net/dsa/ocelot/felix.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright 2019 NXP Semiconductors + */ +#ifndef _MSCC_FELIX_H +#define _MSCC_FELIX_H + +#define ocelot_to_felix(o) container_of((o), struct felix, ocelot) + +/* Platform-specific information */ +struct felix_info { + struct resource *target_io_res; + struct resource *port_io_res; + const struct reg_field *regfields; + const u32 *const *map; + const struct ocelot_ops *ops; + int shared_queue_sz; + const struct ocelot_stat_layout *stats_layout; + unsigned int num_stats; + int num_ports; + int pci_bar; +}; + +extern struct felix_info felix_info_vsc9959; + +enum felix_instance { + FELIX_INSTANCE_VSC9959 = 0, +}; + +/* DSA glue / front-end for struct ocelot */ +struct felix { + struct dsa_switch *ds; + struct pci_dev *pdev; + struct felix_info *info; + struct ocelot ocelot; +}; + +#endif diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c new file mode 100644 index 000000000000..b9758b0d18c7 --- /dev/null +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright 2017 Microsemi Corporation + * Copyright 2018-2019 NXP Semiconductors + */ +#include <soc/mscc/ocelot_sys.h> +#include <soc/mscc/ocelot.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include "felix.h" + +static const u32 vsc9959_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x0089a0), + REG(ANA_VLANMASK, 0x0089a4), + REG_RESERVED(ANA_PORT_B_DOMAIN), + REG(ANA_ANAGEFIL, 0x0089ac), + REG(ANA_ANEVENTS, 0x0089b0), + REG(ANA_STORMLIMIT_BURST, 0x0089b4), + REG(ANA_STORMLIMIT_CFG, 0x0089b8), + REG(ANA_ISOLATED_PORTS, 0x0089c8), + REG(ANA_COMMUNITY_PORTS, 0x0089cc), + REG(ANA_AUTOAGE, 0x0089d0), + REG(ANA_MACTOPTIONS, 0x0089d4), + REG(ANA_LEARNDISC, 0x0089d8), + REG(ANA_AGENCTRL, 0x0089dc), + REG(ANA_MIRRORPORTS, 0x0089e0), + REG(ANA_EMIRRORPORTS, 0x0089e4), + REG(ANA_FLOODING, 0x0089e8), + REG(ANA_FLOODING_IPMC, 0x008a08), + REG(ANA_SFLOW_CFG, 0x008a0c), + REG(ANA_PORT_MODE, 0x008a28), + REG(ANA_CUT_THRU_CFG, 0x008a48), + REG(ANA_PGID_PGID, 0x008400), + REG(ANA_TABLES_ANMOVED, 0x007f1c), + REG(ANA_TABLES_MACHDATA, 0x007f20), + REG(ANA_TABLES_MACLDATA, 0x007f24), + REG(ANA_TABLES_STREAMDATA, 0x007f28), + REG(ANA_TABLES_MACACCESS, 0x007f2c), + REG(ANA_TABLES_MACTINDX, 0x007f30), + REG(ANA_TABLES_VLANACCESS, 0x007f34), + REG(ANA_TABLES_VLANTIDX, 0x007f38), + REG(ANA_TABLES_ISDXACCESS, 0x007f3c), + REG(ANA_TABLES_ISDXTIDX, 0x007f40), + REG(ANA_TABLES_ENTRYLIM, 0x007f00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x007f44), + REG(ANA_TABLES_PTP_ID_LOW, 0x007f48), + REG(ANA_TABLES_STREAMACCESS, 0x007f4c), + REG(ANA_TABLES_STREAMTIDX, 0x007f50), + REG(ANA_TABLES_SEQ_HISTORY, 0x007f54), + REG(ANA_TABLES_SEQ_MASK, 0x007f58), + REG(ANA_TABLES_SFID_MASK, 0x007f5c), + REG(ANA_TABLES_SFIDACCESS, 0x007f60), + REG(ANA_TABLES_SFIDTIDX, 0x007f64), + REG(ANA_MSTI_STATE, 0x008600), + REG(ANA_OAM_UPM_LM_CNT, 0x008000), + REG(ANA_SG_ACCESS_CTRL, 0x008a64), + REG(ANA_SG_CONFIG_REG_1, 0x007fb0), + REG(ANA_SG_CONFIG_REG_2, 0x007fb4), + REG(ANA_SG_CONFIG_REG_3, 0x007fb8), + REG(ANA_SG_CONFIG_REG_4, 0x007fbc), + REG(ANA_SG_CONFIG_REG_5, 0x007fc0), + REG(ANA_SG_GCL_GS_CONFIG, 0x007f80), + REG(ANA_SG_GCL_TI_CONFIG, 0x007f90), + REG(ANA_SG_STATUS_REG_1, 0x008980), + REG(ANA_SG_STATUS_REG_2, 0x008984), + REG(ANA_SG_STATUS_REG_3, 0x008988), + REG(ANA_PORT_VLAN_CFG, 0x007800), + REG(ANA_PORT_DROP_CFG, 0x007804), + REG(ANA_PORT_QOS_CFG, 0x007808), + REG(ANA_PORT_VCAP_CFG, 0x00780c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007810), + REG(ANA_PORT_VCAP_S2_CFG, 0x00781c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007820), + REG(ANA_PORT_CPU_FWD_CFG, 0x007860), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007864), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007868), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00786c), + REG(ANA_PORT_PORT_CFG, 0x007870), + REG(ANA_PORT_POL_CFG, 0x007874), + REG(ANA_PORT_PTP_CFG, 0x007878), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00787c), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007880), + REG(ANA_PORT_SFID_CFG, 0x007884), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG_RESERVED(ANA_PFC_PFC_TIMER), + REG_RESERVED(ANA_IPT_OAM_MEP_CFG), + REG_RESERVED(ANA_IPT_IPT), + REG_RESERVED(ANA_PPT_PPT), + REG_RESERVED(ANA_FID_MAP_FID_MAP), + REG(ANA_AGGR_CFG, 0x008a68), + REG(ANA_CPUQ_CFG, 0x008a6c), + REG_RESERVED(ANA_CPUQ_CFG2), + REG(ANA_CPUQ_8021_CFG, 0x008a74), + REG(ANA_DSCP_CFG, 0x008ab4), + REG(ANA_DSCP_REWR_CFG, 0x008bb4), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x008bf4), + REG(ANA_VCAP_RNG_VAL_CFG, 0x008c14), + REG_RESERVED(ANA_VRAP_CFG), + REG_RESERVED(ANA_VRAP_HDR_DATA), + REG_RESERVED(ANA_VRAP_HDR_MASK), + REG(ANA_DISCARD_CFG, 0x008c40), + REG(ANA_FID_CFG, 0x008c44), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG_RESERVED(ANA_POL_STATE), + REG(ANA_POL_FLOWC, 0x008c48), + REG(ANA_POL_HYST, 0x008cb4), + REG_RESERVED(ANA_POL_MISC_CFG), +}; + +static const u32 vsc9959_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG_RESERVED(QS_INH_DBG), +}; + +static const u32 vsc9959_s2_regmap[] = { + REG(S2_CORE_UPDATE_CTRL, 0x000000), + REG(S2_CORE_MV_CFG, 0x000004), + REG(S2_CACHE_ENTRY_DAT, 0x000008), + REG(S2_CACHE_MASK_DAT, 0x000108), + REG(S2_CACHE_ACTION_DAT, 0x000208), + REG(S2_CACHE_CNT_DAT, 0x000308), + REG(S2_CACHE_TG_DAT, 0x000388), +}; + +static const u32 vsc9959_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x00f460), + REG(QSYS_SWITCH_PORT_MODE, 0x00f480), + REG(QSYS_STAT_CNT_CFG, 0x00f49c), + REG(QSYS_EEE_CFG, 0x00f4a0), + REG(QSYS_EEE_THRES, 0x00f4b8), + REG(QSYS_IGR_NO_SHARING, 0x00f4bc), + REG(QSYS_EGR_NO_SHARING, 0x00f4c0), + REG(QSYS_SW_STATUS, 0x00f4c4), + REG(QSYS_EXT_CPU_CFG, 0x00f4e0), + REG_RESERVED(QSYS_PAD_CFG), + REG(QSYS_CPU_GROUP_MAP, 0x00f4e8), + REG_RESERVED(QSYS_QMAP), + REG_RESERVED(QSYS_ISDX_SGRP), + REG_RESERVED(QSYS_TIMED_FRAME_ENTRY), + REG(QSYS_TFRM_MISC, 0x00f50c), + REG(QSYS_TFRM_PORT_DLY, 0x00f510), + REG(QSYS_TFRM_TIMER_CFG_1, 0x00f514), + REG(QSYS_TFRM_TIMER_CFG_2, 0x00f518), + REG(QSYS_TFRM_TIMER_CFG_3, 0x00f51c), + REG(QSYS_TFRM_TIMER_CFG_4, 0x00f520), + REG(QSYS_TFRM_TIMER_CFG_5, 0x00f524), + REG(QSYS_TFRM_TIMER_CFG_6, 0x00f528), + REG(QSYS_TFRM_TIMER_CFG_7, 0x00f52c), + REG(QSYS_TFRM_TIMER_CFG_8, 0x00f530), + REG(QSYS_RED_PROFILE, 0x00f534), + REG(QSYS_RES_QOS_MODE, 0x00f574), + REG(QSYS_RES_CFG, 0x00c000), + REG(QSYS_RES_STAT, 0x00c004), + REG(QSYS_EGR_DROP_MODE, 0x00f578), + REG(QSYS_EQ_CTRL, 0x00f57c), + REG_RESERVED(QSYS_EVENTS_CORE), + REG(QSYS_QMAXSDU_CFG_0, 0x00f584), + REG(QSYS_QMAXSDU_CFG_1, 0x00f5a0), + REG(QSYS_QMAXSDU_CFG_2, 0x00f5bc), + REG(QSYS_QMAXSDU_CFG_3, 0x00f5d8), + REG(QSYS_QMAXSDU_CFG_4, 0x00f5f4), + REG(QSYS_QMAXSDU_CFG_5, 0x00f610), + REG(QSYS_QMAXSDU_CFG_6, 0x00f62c), + REG(QSYS_QMAXSDU_CFG_7, 0x00f648), + REG(QSYS_PREEMPTION_CFG, 0x00f664), + REG_RESERVED(QSYS_CIR_CFG), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG_RESERVED(QSYS_SE_CONNECT), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG_RESERVED(QSYS_SE_STATE), + REG(QSYS_HSCH_MISC_CFG, 0x00f67c), + REG(QSYS_TAG_CONFIG, 0x00f680), + REG(QSYS_TAS_PARAM_CFG_CTRL, 0x00f698), + REG(QSYS_PORT_MAX_SDU, 0x00f69c), + REG(QSYS_PARAM_CFG_REG_1, 0x00f440), + REG(QSYS_PARAM_CFG_REG_2, 0x00f444), + REG(QSYS_PARAM_CFG_REG_3, 0x00f448), + REG(QSYS_PARAM_CFG_REG_4, 0x00f44c), + REG(QSYS_PARAM_CFG_REG_5, 0x00f450), + REG(QSYS_GCL_CFG_REG_1, 0x00f454), + REG(QSYS_GCL_CFG_REG_2, 0x00f458), + REG(QSYS_PARAM_STATUS_REG_1, 0x00f400), + REG(QSYS_PARAM_STATUS_REG_2, 0x00f404), + REG(QSYS_PARAM_STATUS_REG_3, 0x00f408), + REG(QSYS_PARAM_STATUS_REG_4, 0x00f40c), + REG(QSYS_PARAM_STATUS_REG_5, 0x00f410), + REG(QSYS_PARAM_STATUS_REG_6, 0x00f414), + REG(QSYS_PARAM_STATUS_REG_7, 0x00f418), + REG(QSYS_PARAM_STATUS_REG_8, 0x00f41c), + REG(QSYS_PARAM_STATUS_REG_9, 0x00f420), + REG(QSYS_GCL_STATUS_REG_1, 0x00f424), + REG(QSYS_GCL_STATUS_REG_2, 0x00f428), +}; + +static const u32 vsc9959_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_RED_TAG_CFG, 0x000058), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000410), + REG(REW_DSCP_REMAP_CFG, 0x000510), + REG_RESERVED(REW_STAT_CFG), + REG_RESERVED(REW_REW_STICKY), + REG_RESERVED(REW_PPT), +}; + +static const u32 vsc9959_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_TX_OCTETS, 0x000200), + REG(SYS_COUNT_TX_COLLISION, 0x000210), + REG(SYS_COUNT_TX_DROPS, 0x000214), + REG(SYS_COUNT_TX_64, 0x00021c), + REG(SYS_COUNT_TX_65_127, 0x000220), + REG(SYS_COUNT_TX_128_511, 0x000224), + REG(SYS_COUNT_TX_512_1023, 0x000228), + REG(SYS_COUNT_TX_1024_1526, 0x00022c), + REG(SYS_COUNT_TX_1527_MAX, 0x000230), + REG(SYS_COUNT_TX_AGING, 0x000278), + REG(SYS_RESET_CFG, 0x000e00), + REG(SYS_SR_ETYPE_CFG, 0x000e04), + REG(SYS_VLAN_ETYPE_CFG, 0x000e08), + REG(SYS_PORT_MODE, 0x000e0c), + REG(SYS_FRONT_PORT_MODE, 0x000e2c), + REG(SYS_FRM_AGING, 0x000e44), + REG(SYS_STAT_CFG, 0x000e48), + REG(SYS_SW_STATUS, 0x000e4c), + REG_RESERVED(SYS_MISC_CFG), + REG(SYS_REW_MAC_HIGH_CFG, 0x000e6c), + REG(SYS_REW_MAC_LOW_CFG, 0x000e84), + REG(SYS_TIMESTAMP_OFFSET, 0x000e9c), + REG(SYS_PAUSE_CFG, 0x000ea0), + REG(SYS_PAUSE_TOT_CFG, 0x000ebc), + REG(SYS_ATOP, 0x000ec0), + REG(SYS_ATOP_TOT_CFG, 0x000edc), + REG(SYS_MAC_FC_CFG, 0x000ee0), + REG(SYS_MMGT, 0x000ef8), + REG_RESERVED(SYS_MMGT_FAST), + REG_RESERVED(SYS_EVENTS_DIF), + REG_RESERVED(SYS_EVENTS_CORE), + REG_RESERVED(SYS_CNT), + REG(SYS_PTP_STATUS, 0x000f14), + REG(SYS_PTP_TXSTAMP, 0x000f18), + REG(SYS_PTP_NXT, 0x000f1c), + REG(SYS_PTP_CFG, 0x000f20), + REG(SYS_RAM_INIT, 0x000f24), + REG_RESERVED(SYS_CM_ADDR), + REG_RESERVED(SYS_CM_DATA_WR), + REG_RESERVED(SYS_CM_DATA_RD), + REG_RESERVED(SYS_CM_OP), + REG_RESERVED(SYS_CM_DATA), +}; + +static const u32 vsc9959_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; + +static const u32 vsc9959_gcb_regmap[] = { + REG(GCB_SOFT_RST, 0x000004), +}; + +static const u32 *vsc9959_regmap[] = { + [ANA] = vsc9959_ana_regmap, + [QS] = vsc9959_qs_regmap, + [QSYS] = vsc9959_qsys_regmap, + [REW] = vsc9959_rew_regmap, + [SYS] = vsc9959_sys_regmap, + [S2] = vsc9959_s2_regmap, + [PTP] = vsc9959_ptp_regmap, + [GCB] = vsc9959_gcb_regmap, +}; + +/* Addresses are relative to the PCI device's base address and + * will be fixed up at ioremap time. + */ +static struct resource vsc9959_target_io_res[] = { + [ANA] = { + .start = 0x0280000, + .end = 0x028ffff, + .name = "ana", + }, + [QS] = { + .start = 0x0080000, + .end = 0x00800ff, + .name = "qs", + }, + [QSYS] = { + .start = 0x0200000, + .end = 0x021ffff, + .name = "qsys", + }, + [REW] = { + .start = 0x0030000, + .end = 0x003ffff, + .name = "rew", + }, + [SYS] = { + .start = 0x0010000, + .end = 0x001ffff, + .name = "sys", + }, + [S2] = { + .start = 0x0060000, + .end = 0x00603ff, + .name = "s2", + }, + [PTP] = { + .start = 0x0090000, + .end = 0x00900cb, + .name = "ptp", + }, + [GCB] = { + .start = 0x0070000, + .end = 0x00701ff, + .name = "devcpu_gcb", + }, +}; + +static struct resource vsc9959_port_io_res[] = { + { + .start = 0x0100000, + .end = 0x010ffff, + .name = "port0", + }, + { + .start = 0x0110000, + .end = 0x011ffff, + .name = "port1", + }, + { + .start = 0x0120000, + .end = 0x012ffff, + .name = "port2", + }, + { + .start = 0x0130000, + .end = 0x013ffff, + .name = "port3", + }, + { + .start = 0x0140000, + .end = 0x014ffff, + .name = "port4", + }, + { + .start = 0x0150000, + .end = 0x015ffff, + .name = "port5", + }, +}; + +static const struct reg_field vsc9959_regfields[] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5), + [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 26, 26), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 23, 23), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 14, 14), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_SEQ_GEN_ERR_0] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SEQ_GEN_ERR_1] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0), + [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0), +}; + +static const struct ocelot_stat_layout vsc9959_stats_layout[] = { + { .offset = 0x00, .name = "rx_octets", }, + { .offset = 0x01, .name = "rx_unicast", }, + { .offset = 0x02, .name = "rx_multicast", }, + { .offset = 0x03, .name = "rx_broadcast", }, + { .offset = 0x04, .name = "rx_shorts", }, + { .offset = 0x05, .name = "rx_fragments", }, + { .offset = 0x06, .name = "rx_jabbers", }, + { .offset = 0x07, .name = "rx_crc_align_errs", }, + { .offset = 0x08, .name = "rx_sym_errs", }, + { .offset = 0x09, .name = "rx_frames_below_65_octets", }, + { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", }, + { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", }, + { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", }, + { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", }, + { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", }, + { .offset = 0x0F, .name = "rx_frames_over_1526_octets", }, + { .offset = 0x10, .name = "rx_pause", }, + { .offset = 0x11, .name = "rx_control", }, + { .offset = 0x12, .name = "rx_longs", }, + { .offset = 0x13, .name = "rx_classified_drops", }, + { .offset = 0x14, .name = "rx_red_prio_0", }, + { .offset = 0x15, .name = "rx_red_prio_1", }, + { .offset = 0x16, .name = "rx_red_prio_2", }, + { .offset = 0x17, .name = "rx_red_prio_3", }, + { .offset = 0x18, .name = "rx_red_prio_4", }, + { .offset = 0x19, .name = "rx_red_prio_5", }, + { .offset = 0x1A, .name = "rx_red_prio_6", }, + { .offset = 0x1B, .name = "rx_red_prio_7", }, + { .offset = 0x1C, .name = "rx_yellow_prio_0", }, + { .offset = 0x1D, .name = "rx_yellow_prio_1", }, + { .offset = 0x1E, .name = "rx_yellow_prio_2", }, + { .offset = 0x1F, .name = "rx_yellow_prio_3", }, + { .offset = 0x20, .name = "rx_yellow_prio_4", }, + { .offset = 0x21, .name = "rx_yellow_prio_5", }, + { .offset = 0x22, .name = "rx_yellow_prio_6", }, + { .offset = 0x23, .name = "rx_yellow_prio_7", }, + { .offset = 0x24, .name = "rx_green_prio_0", }, + { .offset = 0x25, .name = "rx_green_prio_1", }, + { .offset = 0x26, .name = "rx_green_prio_2", }, + { .offset = 0x27, .name = "rx_green_prio_3", }, + { .offset = 0x28, .name = "rx_green_prio_4", }, + { .offset = 0x29, .name = "rx_green_prio_5", }, + { .offset = 0x2A, .name = "rx_green_prio_6", }, + { .offset = 0x2B, .name = "rx_green_prio_7", }, + { .offset = 0x80, .name = "tx_octets", }, + { .offset = 0x81, .name = "tx_unicast", }, + { .offset = 0x82, .name = "tx_multicast", }, + { .offset = 0x83, .name = "tx_broadcast", }, + { .offset = 0x84, .name = "tx_collision", }, + { .offset = 0x85, .name = "tx_drops", }, + { .offset = 0x86, .name = "tx_pause", }, + { .offset = 0x87, .name = "tx_frames_below_65_octets", }, + { .offset = 0x88, .name = "tx_frames_65_to_127_octets", }, + { .offset = 0x89, .name = "tx_frames_128_255_octets", }, + { .offset = 0x8B, .name = "tx_frames_256_511_octets", }, + { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", }, + { .offset = 0x8D, .name = "tx_frames_over_1526_octets", }, + { .offset = 0x8E, .name = "tx_yellow_prio_0", }, + { .offset = 0x8F, .name = "tx_yellow_prio_1", }, + { .offset = 0x90, .name = "tx_yellow_prio_2", }, + { .offset = 0x91, .name = "tx_yellow_prio_3", }, + { .offset = 0x92, .name = "tx_yellow_prio_4", }, + { .offset = 0x93, .name = "tx_yellow_prio_5", }, + { .offset = 0x94, .name = "tx_yellow_prio_6", }, + { .offset = 0x95, .name = "tx_yellow_prio_7", }, + { .offset = 0x96, .name = "tx_green_prio_0", }, + { .offset = 0x97, .name = "tx_green_prio_1", }, + { .offset = 0x98, .name = "tx_green_prio_2", }, + { .offset = 0x99, .name = "tx_green_prio_3", }, + { .offset = 0x9A, .name = "tx_green_prio_4", }, + { .offset = 0x9B, .name = "tx_green_prio_5", }, + { .offset = 0x9C, .name = "tx_green_prio_6", }, + { .offset = 0x9D, .name = "tx_green_prio_7", }, + { .offset = 0x9E, .name = "tx_aged", }, + { .offset = 0x100, .name = "drop_local", }, + { .offset = 0x101, .name = "drop_tail", }, + { .offset = 0x102, .name = "drop_yellow_prio_0", }, + { .offset = 0x103, .name = "drop_yellow_prio_1", }, + { .offset = 0x104, .name = "drop_yellow_prio_2", }, + { .offset = 0x105, .name = "drop_yellow_prio_3", }, + { .offset = 0x106, .name = "drop_yellow_prio_4", }, + { .offset = 0x107, .name = "drop_yellow_prio_5", }, + { .offset = 0x108, .name = "drop_yellow_prio_6", }, + { .offset = 0x109, .name = "drop_yellow_prio_7", }, + { .offset = 0x10A, .name = "drop_green_prio_0", }, + { .offset = 0x10B, .name = "drop_green_prio_1", }, + { .offset = 0x10C, .name = "drop_green_prio_2", }, + { .offset = 0x10D, .name = "drop_green_prio_3", }, + { .offset = 0x10E, .name = "drop_green_prio_4", }, + { .offset = 0x10F, .name = "drop_green_prio_5", }, + { .offset = 0x110, .name = "drop_green_prio_6", }, + { .offset = 0x111, .name = "drop_green_prio_7", }, +}; + +#define VSC9959_INIT_TIMEOUT 50000 +#define VSC9959_GCB_RST_SLEEP 100 +#define VSC9959_SYS_RAMINIT_SLEEP 80 + +static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot) +{ + int val; + + regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val); + + return val; +} + +static int vsc9959_sys_ram_init_status(struct ocelot *ocelot) +{ + return ocelot_read(ocelot, SYS_RAM_INIT); +} + +static int vsc9959_reset(struct ocelot *ocelot) +{ + int val, err; + + /* soft-reset the switch core */ + regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1); + + err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val, + VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch core reset\n"); + return err; + } + + /* initialize switch mem ~40us */ + ocelot_write(ocelot, SYS_RAM_INIT_RAM_INIT, SYS_RAM_INIT); + err = readx_poll_timeout(vsc9959_sys_ram_init_status, ocelot, val, !val, + VSC9959_SYS_RAMINIT_SLEEP, + VSC9959_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch sram init\n"); + return err; + } + + /* enable switch core */ + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); + + return 0; +} + +static const struct ocelot_ops vsc9959_ops = { + .reset = vsc9959_reset, +}; + +struct felix_info felix_info_vsc9959 = { + .target_io_res = vsc9959_target_io_res, + .port_io_res = vsc9959_port_io_res, + .regfields = vsc9959_regfields, + .map = vsc9959_regmap, + .ops = &vsc9959_ops, + .stats_layout = vsc9959_stats_layout, + .num_stats = ARRAY_SIZE(vsc9959_stats_layout), + .shared_queue_sz = 128 * 1024, + .num_ports = 6, + .pci_bar = 4, +}; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index b00274caae4f..e548289df31e 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -639,7 +639,8 @@ static int qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - int ret, i, phy_mode = -1; + phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA; + int ret, i; u32 mask; /* Make sure that port 0 is the cpu port */ @@ -661,10 +662,10 @@ qca8k_setup(struct dsa_switch *ds) return ret; /* Initialize CPU port pad mode (xMII type, delays...) */ - phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); - if (phy_mode < 0) { + ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode); + if (ret) { pr_err("Can't find phy-mode for master device\n"); - return phy_mode; + return ret; } ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode); if (ret < 0) @@ -1077,10 +1078,13 @@ qca8k_sw_probe(struct mdio_device *mdiodev) if (id != QCA8K_ID_QCA8337) return -ENODEV; - priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS); + priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), + QCA8K_NUM_PORTS); if (!priv->ds) return -ENOMEM; + priv->ds->dev = &mdiodev->dev; + priv->ds->num_ports = QCA8K_NUM_PORTS; priv->ds->priv = priv; priv->ops = qca8k_switch_ops; priv->ds->ops = &priv->ops; diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c index dc0509c02d29..fae188c60191 100644 --- a/drivers/net/dsa/realtek-smi-core.c +++ b/drivers/net/dsa/realtek-smi-core.c @@ -444,9 +444,12 @@ static int realtek_smi_probe(struct platform_device *pdev) return ret; } - smi->ds = dsa_switch_alloc(dev, smi->num_ports); + smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL); if (!smi->ds) return -ENOMEM; + + smi->ds->dev = dev; + smi->ds->num_ports = smi->num_ports; smi->ds->priv = smi; smi->ds->ops = var->ds_ops; diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig index ffac0ea4e8d5..0fe1ae173aa1 100644 --- a/drivers/net/dsa/sja1105/Kconfig +++ b/drivers/net/dsa/sja1105/Kconfig @@ -28,6 +28,7 @@ config NET_DSA_SJA1105_TAS bool "Support for the Time-Aware Scheduler on NXP SJA1105" depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m + depends on NET_DSA_SJA1105_PTP help This enables support for the TTEthernet-based egress scheduling engine in the SJA1105 DSA driver, which is controlled using a diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index fbb564c3beb8..d801fc204d19 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -20,7 +20,13 @@ */ #define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10) +typedef enum { + SPI_READ = 0, + SPI_WRITE = 1, +} sja1105_spi_rw_mode_t; + #include "sja1105_tas.h" +#include "sja1105_ptp.h" /* Keeps the different addresses between E/T and P/Q/R/S */ struct sja1105_regs { @@ -32,9 +38,10 @@ struct sja1105_regs { u64 config; u64 rmii_pll1; u64 ptp_control; - u64 ptpclk; + u64 ptpclkval; u64 ptpclkrate; - u64 ptptsclk; + u64 ptpclkcorp; + u64 ptpschtm; u64 ptpegr_ts[SJA1105_NUM_PORTS]; u64 pad_mii_tx[SJA1105_NUM_PORTS]; u64 pad_mii_id[SJA1105_NUM_PORTS]; @@ -71,14 +78,15 @@ struct sja1105_info { const struct sja1105_dynamic_table_ops *dyn_ops; const struct sja1105_table_ops *static_ops; const struct sja1105_regs *regs; - int (*ptp_cmd)(const void *ctx, const void *data); - int (*reset_cmd)(const void *ctx, const void *data); + int (*reset_cmd)(struct dsa_switch *ds); int (*setup_rgmii_delay)(const void *ctx, int port); /* Prototypes from include/net/dsa.h */ int (*fdb_add_cmd)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int (*fdb_del_cmd)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); + void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd, + enum packing_op op); const char *name; }; @@ -91,26 +99,16 @@ struct sja1105_private { struct spi_device *spidev; struct dsa_switch *ds; struct sja1105_port ports[SJA1105_NUM_PORTS]; - struct ptp_clock_info ptp_caps; - struct ptp_clock *clock; - /* The cycle counter translates the PTP timestamps (based on - * a free-running counter) into a software time domain. - */ - struct cyclecounter tstamp_cc; - struct timecounter tstamp_tc; - struct delayed_work refresh_work; - /* Serializes all operations on the cycle counter */ - struct mutex ptp_lock; /* Serializes transmission of management frames so that * the switch doesn't confuse them with one another. */ struct mutex mgmt_lock; struct sja1105_tagger_data tagger_data; + struct sja1105_ptp_data ptp_data; struct sja1105_tas_data tas_data; }; #include "sja1105_dynamic_config.h" -#include "sja1105_ptp.h" struct sja1105_spi_message { u64 access; @@ -118,24 +116,27 @@ struct sja1105_spi_message { u64 address; }; -typedef enum { - SPI_READ = 0, - SPI_WRITE = 1, -} sja1105_spi_rw_mode_t; - /* From sja1105_main.c */ -int sja1105_static_config_reload(struct sja1105_private *priv); +enum sja1105_reset_reason { + SJA1105_VLAN_FILTERING = 0, + SJA1105_RX_HWTSTAMPING, + SJA1105_AGEING_TIME, + SJA1105_SCHEDULING, +}; + +int sja1105_static_config_reload(struct sja1105_private *priv, + enum sja1105_reset_reason reason); /* From sja1105_spi.c */ -int sja1105_spi_send_packed_buf(const struct sja1105_private *priv, - sja1105_spi_rw_mode_t rw, u64 reg_addr, - void *packed_buf, size_t size_bytes); -int sja1105_spi_send_int(const struct sja1105_private *priv, - sja1105_spi_rw_mode_t rw, u64 reg_addr, - u64 *value, u64 size_bytes); -int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv, - sja1105_spi_rw_mode_t rw, u64 base_addr, - void *packed_buf, u64 buf_len); +int sja1105_xfer_buf(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, + u8 *buf, size_t len); +int sja1105_xfer_u32(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value, + struct ptp_system_timestamp *ptp_sts); +int sja1105_xfer_u64(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value, + struct ptp_system_timestamp *ptp_sts); int sja1105_static_config_upload(struct sja1105_private *priv); int sja1105_inhibit_tx(const struct sja1105_private *priv, unsigned long port_bitmap, bool tx_inhibited); diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c index 608126a15d72..9082e52b55e9 100644 --- a/drivers/net/dsa/sja1105/sja1105_clocking.c +++ b/drivers/net/dsa/sja1105/sja1105_clocking.c @@ -118,9 +118,8 @@ static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port, idiv.pd = enabled ? 0 : 1; /* Power down? */ sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->cgu_idiv[port], packed_buf, - SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static void @@ -167,9 +166,8 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv, mii_tx_clk.pd = 0; /* Power Down off => enabled */ sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->mii_tx_clk[port], packed_buf, - SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int @@ -192,9 +190,8 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port) mii_rx_clk.pd = 0; /* Power Down off => enabled */ sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->mii_rx_clk[port], packed_buf, - SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int @@ -217,9 +214,8 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port) mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->mii_ext_tx_clk[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int @@ -242,9 +238,8 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port) mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->mii_ext_rx_clk[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port, @@ -337,9 +332,8 @@ static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv, txc.pd = 0; sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->rgmii_tx_clk[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } /* AGU */ @@ -383,9 +377,8 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv, pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */ sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->pad_mii_tx[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static void @@ -405,7 +398,7 @@ sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd, } /* Valid range in degrees is an integer between 73.8 and 101.7 */ -static inline u64 sja1105_rgmii_delay(u64 phase) +static u64 sja1105_rgmii_delay(u64 phase) { /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9. * To avoid floating point operations we'll multiply by 10 @@ -442,9 +435,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port) pad_mii_id.txc_pd = 1; sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK); - rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->pad_mii_id[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port], + packed_buf, SJA1105_SIZE_CGU_CMD); if (rc < 0) return rc; @@ -459,9 +451,8 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port) } sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->pad_mii_id[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port, @@ -547,9 +538,8 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv, ref_clk.pd = 0; /* Power Down off => enabled */ sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->rmii_ref_clk[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int @@ -565,9 +555,8 @@ sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port) ext_tx_clk.pd = 0; /* Power Down off => enabled */ sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, - regs->rmii_ext_tx_clk[port], - packed_buf, SJA1105_SIZE_CGU_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port], + packed_buf, SJA1105_SIZE_CGU_CMD); } static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv) @@ -595,8 +584,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv) pll.pd = 0x1; sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK); - rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1, - packed_buf, SJA1105_SIZE_CGU_CMD); + rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf, + SJA1105_SIZE_CGU_CMD); if (rc < 0) { dev_err(dev, "failed to configure PLL1 for 50MHz\n"); return rc; @@ -606,8 +595,8 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv) pll.pd = 0x0; sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK); - rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1, - packed_buf, SJA1105_SIZE_CGU_CMD); + rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf, + SJA1105_SIZE_CGU_CMD); if (rc < 0) { dev_err(dev, "failed to enable PLL1\n"); return rc; diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 91da430045ff..25381bd65ed7 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -686,8 +686,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, ops->entry_packing(packed_buf, entry, PACK); /* Send SPI write operation: read config table entry */ - rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr, - packed_buf, ops->packed_size); + rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, + ops->packed_size); if (rc < 0) return rc; @@ -698,8 +698,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv, memset(packed_buf, 0, ops->packed_size); /* Retrieve the read operation's result */ - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr, - packed_buf, ops->packed_size); + rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf, + ops->packed_size); if (rc < 0) return rc; @@ -771,8 +771,8 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv, ops->entry_packing(packed_buf, entry, PACK); /* Send SPI write operation: read config table entry */ - rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr, - packed_buf, ops->packed_size); + rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf, + ops->packed_size); if (rc < 0) return rc; diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c index ab581a28cd41..064301cc7d5b 100644 --- a/drivers/net/dsa/sja1105/sja1105_ethtool.c +++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c @@ -167,8 +167,8 @@ static int sja1105_port_status_get_mac(struct sja1105_private *priv, int rc; /* MAC area */ - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port], - packed_buf, SJA1105_SIZE_MAC_AREA); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac[port], packed_buf, + SJA1105_SIZE_MAC_AREA); if (rc < 0) return rc; @@ -185,8 +185,8 @@ static int sja1105_port_status_get_hl1(struct sja1105_private *priv, u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0}; int rc; - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port], - packed_buf, SJA1105_SIZE_HL1_AREA); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl1[port], packed_buf, + SJA1105_SIZE_HL1_AREA); if (rc < 0) return rc; @@ -203,8 +203,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv, u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0}; int rc; - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port], - packed_buf, SJA1105_SIZE_HL2_AREA); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->mac_hl2[port], packed_buf, + SJA1105_SIZE_HL2_AREA); if (rc < 0) return rc; @@ -215,8 +215,8 @@ static int sja1105_port_status_get_hl2(struct sja1105_private *priv, priv->info->device_id == SJA1105T_DEVICE_ID) return 0; - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port], - packed_buf, SJA1105_SIZE_QLEVEL_AREA); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->qlevel[port], packed_buf, + SJA1105_SIZE_QLEVEL_AREA); if (rc < 0) return rc; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 7687ddcae159..a51ac088c0bc 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -382,8 +382,8 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv) static int sja1105_init_general_params(struct sja1105_private *priv) { struct sja1105_general_params_entry default_general_params = { - /* Disallow dynamic changing of the mirror port */ - .mirr_ptacu = 0, + /* Allow dynamic changing of the mirror port */ + .mirr_ptacu = true, .switchid = priv->ds->index, /* Priority queue for link-local management frames * (both ingress to and egress from CPU - PTP, STP etc) @@ -403,8 +403,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv) * by installing a temporary 'management route' */ .host_port = dsa_upstream_port(priv->ds, 0), - /* Same as host port */ - .mirr_port = dsa_upstream_port(priv->ds, 0), + /* Default to an invalid value */ + .mirr_port = SJA1105_NUM_PORTS, /* Link-local traffic received on casc_port will be forwarded * to host_port without embedding the source port and device ID * info in the destination MAC address (presumably because it @@ -458,9 +458,8 @@ static int sja1105_init_general_params(struct sja1105_private *priv) #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000) -static inline void -sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, - int index) +static void sja1105_setup_policer(struct sja1105_l2_policing_entry *policing, + int index) { policing[index].sharindx = index; policing[index].smax = 65535; /* Burst size in bytes */ @@ -507,39 +506,6 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv) return 0; } -static int sja1105_init_avb_params(struct sja1105_private *priv, - bool on) -{ - struct sja1105_avb_params_entry *avb; - struct sja1105_table *table; - - table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; - - /* Discard previous AVB Parameters Table */ - if (table->entry_count) { - kfree(table->entries); - table->entry_count = 0; - } - - /* Configure the reception of meta frames only if requested */ - if (!on) - return 0; - - table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, - table->ops->unpacked_entry_size, GFP_KERNEL); - if (!table->entries) - return -ENOMEM; - - table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; - - avb = table->entries; - - avb->destmeta = SJA1105_META_DMAC; - avb->srcmeta = SJA1105_META_SMAC; - - return 0; -} - static int sja1105_static_config_load(struct sja1105_private *priv, struct sja1105_dt_port *ports) { @@ -580,9 +546,6 @@ static int sja1105_static_config_load(struct sja1105_private *priv, rc = sja1105_init_general_params(priv); if (rc < 0) return rc; - rc = sja1105_init_avb_params(priv, false); - if (rc < 0) - return rc; /* Send initial configuration to hardware via SPI */ return sja1105_static_config_upload(priv); @@ -594,15 +557,15 @@ static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int i; for (i = 0; i < SJA1105_NUM_PORTS; i++) { - if (ports->role == XMII_MAC) + if (ports[i].role == XMII_MAC) continue; - if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || - ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID || + ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) priv->rgmii_rx_delay[i] = true; - if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || - ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID) + if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID || + ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID) priv->rgmii_tx_delay[i] = true; if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) && @@ -621,8 +584,9 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, for_each_child_of_node(ports_node, child) { struct device_node *phy_node; - int phy_mode; + phy_interface_t phy_mode; u32 index; + int err; /* Get switch port number from DT */ if (of_property_read_u32(child, "reg", &index) < 0) { @@ -633,8 +597,8 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, } /* Get PHY mode from DT */ - phy_mode = of_get_phy_mode(child); - if (phy_mode < 0) { + err = of_get_phy_mode(child, &phy_mode); + if (err) { dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n", index); @@ -951,7 +915,7 @@ sja1105_static_fdb_change(struct sja1105_private *priv, int port, * For the placement of a newly learnt FDB entry, the switch selects the bin * based on a hash function, and the way within that bin incrementally. */ -static inline int sja1105et_fdb_index(int bin, int way) +static int sja1105et_fdb_index(int bin, int way) { return bin * SJA1105ET_FDB_BIN_SIZE + way; } @@ -1095,7 +1059,7 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, l2_lookup.vlanid = vid; l2_lookup.iotag = SJA1105_S_TAG; l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); - if (dsa_port_is_vlan_filtering(&ds->ports[port])) { + if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { l2_lookup.mask_vlanid = VLAN_VID_MASK; l2_lookup.mask_iotag = BIT(0); } else { @@ -1158,7 +1122,7 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, l2_lookup.vlanid = vid; l2_lookup.iotag = SJA1105_S_TAG; l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); - if (dsa_port_is_vlan_filtering(&ds->ports[port])) { + if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) { l2_lookup.mask_vlanid = VLAN_VID_MASK; l2_lookup.mask_iotag = BIT(0); } else { @@ -1204,7 +1168,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, * for what gets printed in 'bridge fdb show'. In the case of zero, * no VID gets printed at all. */ - if (!dsa_port_is_vlan_filtering(&ds->ports[port])) + if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) vid = 0; return priv->info->fdb_add_cmd(ds, port, addr, vid); @@ -1215,7 +1179,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; - if (!dsa_port_is_vlan_filtering(&ds->ports[port])) + if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) vid = 0; return priv->info->fdb_del_cmd(ds, port, addr, vid); @@ -1254,7 +1218,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, u64_to_ether_addr(l2_lookup.macaddr, macaddr); /* We need to hide the dsa_8021q VLANs from the user. */ - if (!dsa_port_is_vlan_filtering(&ds->ports[port])) + if (!dsa_port_is_vlan_filtering(dsa_to_port(ds, port))) l2_lookup.vlanid = 0; cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); } @@ -1377,17 +1341,33 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port, sja1105_bridge_member(ds, port, br, false); } +static const char * const sja1105_reset_reasons[] = { + [SJA1105_VLAN_FILTERING] = "VLAN filtering", + [SJA1105_RX_HWTSTAMPING] = "RX timestamping", + [SJA1105_AGEING_TIME] = "Ageing time", + [SJA1105_SCHEDULING] = "Time-aware scheduling", +}; + /* For situations where we need to change a setting at runtime that is only * available through the static configuration, resetting the switch in order * to upload the new static config is unavoidable. Back up the settings we * modify at runtime (currently only MAC) and restore them after uploading, * such that this operation is relatively seamless. */ -int sja1105_static_config_reload(struct sja1105_private *priv) +int sja1105_static_config_reload(struct sja1105_private *priv, + enum sja1105_reset_reason reason) { + struct ptp_system_timestamp ptp_sts_before; + struct ptp_system_timestamp ptp_sts_after; struct sja1105_mac_config_entry *mac; int speed_mbps[SJA1105_NUM_PORTS]; + struct dsa_switch *ds = priv->ds; + s64 t1, t2, t3, t4; + s64 t12, t34; int rc, i; + s64 now; + + mutex_lock(&priv->mgmt_lock); mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; @@ -1401,10 +1381,41 @@ int sja1105_static_config_reload(struct sja1105_private *priv) mac[i].speed = SJA1105_SPEED_AUTO; } + /* No PTP operations can run right now */ + mutex_lock(&priv->ptp_data.lock); + + rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before); + if (rc < 0) + goto out_unlock_ptp; + /* Reset switch and send updated static configuration */ rc = sja1105_static_config_upload(priv); if (rc < 0) - goto out; + goto out_unlock_ptp; + + rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after); + if (rc < 0) + goto out_unlock_ptp; + + t1 = timespec64_to_ns(&ptp_sts_before.pre_ts); + t2 = timespec64_to_ns(&ptp_sts_before.post_ts); + t3 = timespec64_to_ns(&ptp_sts_after.pre_ts); + t4 = timespec64_to_ns(&ptp_sts_after.post_ts); + /* Mid point, corresponds to pre-reset PTPCLKVAL */ + t12 = t1 + (t2 - t1) / 2; + /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */ + t34 = t3 + (t4 - t3) / 2; + /* Advance PTPCLKVAL by the time it took since its readout */ + now += (t34 - t12); + + __sja1105_ptp_adjtime(ds, now); + +out_unlock_ptp: + mutex_unlock(&priv->ptp_data.lock); + + dev_info(priv->ds->dev, + "Reset switch and programmed static config. Reason: %s\n", + sja1105_reset_reasons[reason]); /* Configure the CGU (PLLs) for MII and RMII PHYs. * For these interfaces there is no dynamic configuration @@ -1420,6 +1431,8 @@ int sja1105_static_config_reload(struct sja1105_private *priv) goto out; } out: + mutex_unlock(&priv->mgmt_lock); + return rc; } @@ -1598,7 +1611,7 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) l2_lookup_params = table->entries; l2_lookup_params->shared_learn = !enabled; - rc = sja1105_static_config_reload(priv); + rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING); if (rc) dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); @@ -1687,7 +1700,7 @@ static int sja1105_setup(struct dsa_switch *ds) return rc; } - rc = sja1105_ptp_clock_register(priv); + rc = sja1105_ptp_clock_register(ds); if (rc < 0) { dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc); return rc; @@ -1729,9 +1742,7 @@ static void sja1105_teardown(struct dsa_switch *ds) struct sja1105_private *priv = ds->priv; sja1105_tas_teardown(ds); - cancel_work_sync(&priv->tagger_data.rxtstamp_work); - skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); - sja1105_ptp_clock_unregister(priv); + sja1105_ptp_clock_unregister(ds); sja1105_static_config_free(&priv->static_config); } @@ -1743,7 +1754,7 @@ static int sja1105_port_enable(struct dsa_switch *ds, int port, if (!dsa_is_user_port(ds, port)) return 0; - slave = ds->ports[port].slave; + slave = dsa_to_port(ds, port)->slave; slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; @@ -1775,7 +1786,7 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, } /* Transfer skb to the host port. */ - dsa_enqueue_skb(skb, ds->ports[port].slave); + dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave); /* Wait until the switch has processed the frame */ do { @@ -1817,11 +1828,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, { struct sja1105_private *priv = ds->priv; struct sja1105_port *sp = &priv->ports[port]; - struct skb_shared_hwtstamps shwt = {0}; int slot = sp->mgmt_slot; struct sk_buff *clone; - u64 now, ts; - int rc; /* The tragic fact about the switch having 4x2 slots for installing * management routes is that all of them except one are actually @@ -1847,27 +1855,8 @@ static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port, if (!clone) goto out; - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - - mutex_lock(&priv->ptp_lock); + sja1105_ptp_txtstamp_skb(ds, slot, clone); - now = priv->tstamp_cc.read(&priv->tstamp_cc); - - rc = sja1105_ptpegr_ts_poll(priv, slot, &ts); - if (rc < 0) { - dev_err(ds->dev, "xmit: timed out polling for tstamp\n"); - kfree_skb(clone); - goto out_unlock_ptp; - } - - ts = sja1105_tstamp_reconstruct(priv, now, ts); - ts = timecounter_cyc2time(&priv->tstamp_tc, ts); - - shwt.hwtstamp = ns_to_ktime(ts); - skb_complete_tx_timestamp(clone, &shwt); - -out_unlock_ptp: - mutex_unlock(&priv->ptp_lock); out: mutex_unlock(&priv->mgmt_lock); return NETDEV_TX_OK; @@ -1894,183 +1883,97 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds, l2_lookup_params->maxage = maxage; - return sja1105_static_config_reload(priv); + return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME); } -/* Must be called only with priv->tagger_data.state bit - * SJA1105_HWTS_RX_EN cleared +static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, + enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_TAPRIO: + return sja1105_setup_tc_taprio(ds, port, type_data); + default: + return -EOPNOTSUPP; + } +} + +/* We have a single mirror (@to) port, but can configure ingress and egress + * mirroring on all other (@from) ports. + * We need to allow mirroring rules only as long as the @to port is always the + * same, and we need to unset the @to port from mirr_port only when there is no + * mirroring rule that references it. */ -static int sja1105_change_rxtstamping(struct sja1105_private *priv, - bool on) +static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to, + bool ingress, bool enabled) { struct sja1105_general_params_entry *general_params; + struct sja1105_mac_config_entry *mac; struct sja1105_table *table; + bool already_enabled; + u64 new_mirr_port; int rc; table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; - general_params->send_meta1 = on; - general_params->send_meta0 = on; - rc = sja1105_init_avb_params(priv, on); - if (rc < 0) - return rc; + mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; - /* Initialize the meta state machine to a known state */ - if (priv->tagger_data.stampable_skb) { - kfree_skb(priv->tagger_data.stampable_skb); - priv->tagger_data.stampable_skb = NULL; + already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS); + if (already_enabled && enabled && general_params->mirr_port != to) { + dev_err(priv->ds->dev, + "Delete mirroring rules towards port %llu first\n", + general_params->mirr_port); + return -EBUSY; } - return sja1105_static_config_reload(priv); -} + new_mirr_port = to; + if (!enabled) { + bool keep = false; + int port; -static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, - struct ifreq *ifr) -{ - struct sja1105_private *priv = ds->priv; - struct hwtstamp_config config; - bool rx_on; - int rc; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - priv->ports[port].hwts_tx_en = false; - break; - case HWTSTAMP_TX_ON: - priv->ports[port].hwts_tx_en = true; - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - rx_on = false; - break; - default: - rx_on = true; - break; + /* Anybody still referencing mirr_port? */ + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + if (mac[port].ing_mirr || mac[port].egr_mirr) { + keep = true; + break; + } + } + /* Unset already_enabled for next time */ + if (!keep) + new_mirr_port = SJA1105_NUM_PORTS; } + if (new_mirr_port != general_params->mirr_port) { + general_params->mirr_port = new_mirr_port; - if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) { - clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); - - rc = sja1105_change_rxtstamping(priv, rx_on); - if (rc < 0) { - dev_err(ds->dev, - "Failed to change RX timestamping: %d\n", rc); + rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS, + 0, general_params, true); + if (rc < 0) return rc; - } - if (rx_on) - set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); } - if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) - return -EFAULT; - return 0; -} - -static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, - struct ifreq *ifr) -{ - struct sja1105_private *priv = ds->priv; - struct hwtstamp_config config; - - config.flags = 0; - if (priv->ports[port].hwts_tx_en) - config.tx_type = HWTSTAMP_TX_ON; + if (ingress) + mac[from].ing_mirr = enabled; else - config.tx_type = HWTSTAMP_TX_OFF; - if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; - else - config.rx_filter = HWTSTAMP_FILTER_NONE; - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -#define to_tagger(d) \ - container_of((d), struct sja1105_tagger_data, rxtstamp_work) -#define to_sja1105(d) \ - container_of((d), struct sja1105_private, tagger_data) - -static void sja1105_rxtstamp_work(struct work_struct *work) -{ - struct sja1105_tagger_data *data = to_tagger(work); - struct sja1105_private *priv = to_sja1105(data); - struct sk_buff *skb; - u64 now; - - mutex_lock(&priv->ptp_lock); - - while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) { - struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb); - u64 ts; - - now = priv->tstamp_cc.read(&priv->tstamp_cc); - - *shwt = (struct skb_shared_hwtstamps) {0}; + mac[from].egr_mirr = enabled; - ts = SJA1105_SKB_CB(skb)->meta_tstamp; - ts = sja1105_tstamp_reconstruct(priv, now, ts); - ts = timecounter_cyc2time(&priv->tstamp_tc, ts); - - shwt->hwtstamp = ns_to_ktime(ts); - netif_rx_ni(skb); - } - - mutex_unlock(&priv->ptp_lock); + return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from, + &mac[from], true); } -/* Called from dsa_skb_defer_rx_timestamp */ -static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, - struct sk_buff *skb, unsigned int type) +static int sja1105_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) { - struct sja1105_private *priv = ds->priv; - struct sja1105_tagger_data *data = &priv->tagger_data; - - if (!test_bit(SJA1105_HWTS_RX_EN, &data->state)) - return false; - - /* We need to read the full PTP clock to reconstruct the Rx - * timestamp. For that we need a sleepable context. - */ - skb_queue_tail(&data->skb_rxtstamp_queue, skb); - schedule_work(&data->rxtstamp_work); - return true; + return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, + ingress, true); } -/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone - * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit - * callback, where we will timestamp it synchronously. - */ -static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, - struct sk_buff *skb, unsigned int type) +static void sja1105_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) { - struct sja1105_private *priv = ds->priv; - struct sja1105_port *sp = &priv->ports[port]; - - if (!sp->hwts_tx_en) - return false; - - return true; -} - -static int sja1105_port_setup_tc(struct dsa_switch *ds, int port, - enum tc_setup_type type, - void *type_data) -{ - switch (type) { - case TC_SETUP_QDISC_TAPRIO: - return sja1105_setup_tc_taprio(ds, port, type_data); - default: - return -EOPNOTSUPP; - } + sja1105_mirror_apply(ds->priv, port, mirror->to_local_port, + mirror->ingress, false); } static const struct dsa_switch_ops sja1105_switch_ops = { @@ -2106,6 +2009,8 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .port_rxtstamp = sja1105_port_rxtstamp, .port_txtstamp = sja1105_port_txtstamp, .port_setup_tc = sja1105_port_setup_tc, + .port_mirror_add = sja1105_mirror_add, + .port_mirror_del = sja1105_mirror_del, }; static int sja1105_check_device_id(struct sja1105_private *priv) @@ -2113,23 +2018,23 @@ static int sja1105_check_device_id(struct sja1105_private *priv) const struct sja1105_regs *regs = priv->info->regs; u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; struct device *dev = &priv->spidev->dev; - u64 device_id; + u32 device_id; u64 part_no; int rc; - rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id, - &device_id, SJA1105_SIZE_DEVICE_ID); + rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id, + NULL); if (rc < 0) return rc; if (device_id != priv->info->device_id) { - dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n", + dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n", priv->info->device_id, device_id); return -ENODEV; } - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id, - prod_id, SJA1105_SIZE_DEVICE_ID); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, + SJA1105_SIZE_DEVICE_ID); if (rc < 0) return rc; @@ -2193,32 +2098,37 @@ static int sja1105_probe(struct spi_device *spi) dev_info(dev, "Probed switch chip: %s\n", priv->info->name); - ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS); + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); if (!ds) return -ENOMEM; + ds->dev = dev; + ds->num_ports = SJA1105_NUM_PORTS; ds->ops = &sja1105_switch_ops; ds->priv = priv; priv->ds = ds; tagger_data = &priv->tagger_data; - skb_queue_head_init(&tagger_data->skb_rxtstamp_queue); - INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work); - spin_lock_init(&tagger_data->meta_lock); + + mutex_init(&priv->ptp_data.lock); + mutex_init(&priv->mgmt_lock); + + sja1105_tas_setup(ds); + + rc = dsa_register_switch(priv->ds); + if (rc) + return rc; /* Connections between dsa_port and sja1105_port */ for (i = 0; i < SJA1105_NUM_PORTS; i++) { struct sja1105_port *sp = &priv->ports[i]; - ds->ports[i].priv = sp; - sp->dp = &ds->ports[i]; + dsa_to_port(ds, i)->priv = sp; + sp->dp = dsa_to_port(ds, i); sp->data = tagger_data; } - mutex_init(&priv->mgmt_lock); - - sja1105_tas_setup(ds); - return dsa_register_switch(priv->ds); + return 0; } static int sja1105_remove(struct spi_device *spi) diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index d8e8dd59f3d1..54258a25031d 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com> */ +#include <linux/spi/spi.h> #include "sja1105.h" /* The adjfine API clamps ppb between [-32,768,000, 32,768,000], and @@ -13,24 +14,6 @@ #define SJA1105_MAX_ADJ_PPB 32000000 #define SJA1105_SIZE_PTP_CMD 4 -/* Timestamps are in units of 8 ns clock ticks (equivalent to a fixed - * 125 MHz clock) so the scale factor (MULT / SHIFT) needs to be 8. - * Furthermore, wisely pick SHIFT as 28 bits, which translates - * MULT into 2^31 (0x80000000). This is the same value around which - * the hardware PTPCLKRATE is centered, so the same ppb conversion - * arithmetic can be reused. - */ -#define SJA1105_CC_SHIFT 28 -#define SJA1105_CC_MULT (8 << SJA1105_CC_SHIFT) - -/* Having 33 bits of cycle counter left until a 64-bit overflow during delta - * conversion, we multiply this by the 8 ns counter resolution and arrive at - * a comfortable 68.71 second refresh interval until the delta would cause - * an integer overflow, in absence of any other readout. - * Approximate to 1 minute. - */ -#define SJA1105_REFRESH_INTERVAL (HZ * 60) - /* This range is actually +/- SJA1105_MAX_ADJ_PPB * divided by 1000 (ppb -> ppm) and with a 16-bit * "fractional" part (actually fixed point). @@ -41,7 +24,7 @@ * * This forgoes a "ppb" numeric representation (up to NSEC_PER_SEC) * and defines the scaling factor between scaled_ppm and the actual - * frequency adjustments (both cycle counter and hardware). + * frequency adjustments of the PHC. * * ptpclkrate = scaled_ppm * 2^31 / (10^6 * 2^16) * simplifies to @@ -49,22 +32,154 @@ */ #define SJA1105_CC_MULT_NUM (1 << 9) #define SJA1105_CC_MULT_DEM 15625 +#define SJA1105_CC_MULT 0x80000000 -#define ptp_to_sja1105(d) container_of((d), struct sja1105_private, ptp_caps) -#define cc_to_sja1105(d) container_of((d), struct sja1105_private, tstamp_cc) -#define dw_to_sja1105(d) container_of((d), struct sja1105_private, refresh_work) - -struct sja1105_ptp_cmd { - u64 resptp; /* reset */ +enum sja1105_ptp_clk_mode { + PTP_ADD_MODE = 1, + PTP_SET_MODE = 0, }; +#define ptp_caps_to_data(d) \ + container_of((d), struct sja1105_ptp_data, caps) +#define ptp_data_to_sja1105(d) \ + container_of((d), struct sja1105_private, ptp_data) + +static int sja1105_init_avb_params(struct sja1105_private *priv, + bool on) +{ + struct sja1105_avb_params_entry *avb; + struct sja1105_table *table; + + table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS]; + + /* Discard previous AVB Parameters Table */ + if (table->entry_count) { + kfree(table->entries); + table->entry_count = 0; + } + + /* Configure the reception of meta frames only if requested */ + if (!on) + return 0; + + table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, + table->ops->unpacked_entry_size, GFP_KERNEL); + if (!table->entries) + return -ENOMEM; + + table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT; + + avb = table->entries; + + avb->destmeta = SJA1105_META_DMAC; + avb->srcmeta = SJA1105_META_SMAC; + + return 0; +} + +/* Must be called only with priv->tagger_data.state bit + * SJA1105_HWTS_RX_EN cleared + */ +static int sja1105_change_rxtstamping(struct sja1105_private *priv, + bool on) +{ + struct sja1105_general_params_entry *general_params; + struct sja1105_table *table; + int rc; + + table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; + general_params = table->entries; + general_params->send_meta1 = on; + general_params->send_meta0 = on; + + rc = sja1105_init_avb_params(priv, on); + if (rc < 0) + return rc; + + /* Initialize the meta state machine to a known state */ + if (priv->tagger_data.stampable_skb) { + kfree_skb(priv->tagger_data.stampable_skb); + priv->tagger_data.stampable_skb = NULL; + } + + return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING); +} + +int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) +{ + struct sja1105_private *priv = ds->priv; + struct hwtstamp_config config; + bool rx_on; + int rc; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->ports[port].hwts_tx_en = false; + break; + case HWTSTAMP_TX_ON: + priv->ports[port].hwts_tx_en = true; + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + rx_on = false; + break; + default: + rx_on = true; + break; + } + + if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) { + clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + + rc = sja1105_change_rxtstamping(priv, rx_on); + if (rc < 0) { + dev_err(ds->dev, + "Failed to change RX timestamping: %d\n", rc); + return rc; + } + if (rx_on) + set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state); + } + + if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) + return -EFAULT; + return 0; +} + +int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr) +{ + struct sja1105_private *priv = ds->priv; + struct hwtstamp_config config; + + config.flags = 0; + if (priv->ports[port].hwts_tx_en) + config.tx_type = HWTSTAMP_TX_ON; + else + config.tx_type = HWTSTAMP_TX_OFF; + if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + else + config.rx_filter = HWTSTAMP_FILTER_NONE; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + int sja1105_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *info) { struct sja1105_private *priv = ds->priv; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; /* Called during cleanup */ - if (!priv->clock) + if (!ptp_data->clock) return -ENODEV; info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | @@ -74,42 +189,58 @@ int sja1105_get_ts_info(struct dsa_switch *ds, int port, (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT); - info->phc_index = ptp_clock_index(priv->clock); + info->phc_index = ptp_clock_index(ptp_data->clock); return 0; } -int sja1105et_ptp_cmd(const void *ctx, const void *data) +void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd, + enum packing_op op) { - const struct sja1105_ptp_cmd *cmd = data; - const struct sja1105_private *priv = ctx; - const struct sja1105_regs *regs = priv->info->regs; const int size = SJA1105_SIZE_PTP_CMD; - u8 buf[SJA1105_SIZE_PTP_CMD] = {0}; /* No need to keep this as part of the structure */ u64 valid = 1; - sja1105_pack(buf, &valid, 31, 31, size); - sja1105_pack(buf, &cmd->resptp, 2, 2, size); - - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control, - buf, SJA1105_SIZE_PTP_CMD); + sja1105_packing(buf, &valid, 31, 31, size, op); + sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op); + sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op); + sja1105_packing(buf, &cmd->resptp, 2, 2, size, op); + sja1105_packing(buf, &cmd->corrclk4ts, 1, 1, size, op); + sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op); } -int sja1105pqrs_ptp_cmd(const void *ctx, const void *data) +void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd, + enum packing_op op) { - const struct sja1105_ptp_cmd *cmd = data; - const struct sja1105_private *priv = ctx; - const struct sja1105_regs *regs = priv->info->regs; const int size = SJA1105_SIZE_PTP_CMD; - u8 buf[SJA1105_SIZE_PTP_CMD] = {0}; /* No need to keep this as part of the structure */ u64 valid = 1; - sja1105_pack(buf, &valid, 31, 31, size); - sja1105_pack(buf, &cmd->resptp, 3, 3, size); + sja1105_packing(buf, &valid, 31, 31, size, op); + sja1105_packing(buf, &cmd->ptpstrtsch, 30, 30, size, op); + sja1105_packing(buf, &cmd->ptpstopsch, 29, 29, size, op); + sja1105_packing(buf, &cmd->resptp, 3, 3, size, op); + sja1105_packing(buf, &cmd->corrclk4ts, 2, 2, size, op); + sja1105_packing(buf, &cmd->ptpclkadd, 0, 0, size, op); +} + +int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd, + sja1105_spi_rw_mode_t rw) +{ + const struct sja1105_private *priv = ds->priv; + const struct sja1105_regs *regs = priv->info->regs; + u8 buf[SJA1105_SIZE_PTP_CMD] = {0}; + int rc; + + if (rw == SPI_WRITE) + priv->info->ptp_cmd_packing(buf, cmd, PACK); + + rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf, + SJA1105_SIZE_PTP_CMD); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->ptp_control, - buf, SJA1105_SIZE_PTP_CMD); + if (rw == SPI_READ) + priv->info->ptp_cmd_packing(buf, cmd, UNPACK); + + return rc; } /* The switch returns partial timestamps (24 bits for SJA1105 E/T, which wrap @@ -126,9 +257,10 @@ int sja1105pqrs_ptp_cmd(const void *ctx, const void *data) * Must be called within one wraparound period of the partial timestamp since * it was generated by the MAC. */ -u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now, - u64 ts_partial) +static u64 sja1105_tstamp_reconstruct(struct dsa_switch *ds, u64 now, + u64 ts_partial) { + struct sja1105_private *priv = ds->priv; u64 partial_tstamp_mask = CYCLECOUNTER_MASK(priv->info->ptp_ts_bits); u64 ts_reconstructed; @@ -170,8 +302,9 @@ u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now, * To have common code for E/T and P/Q/R/S for reading the timestamp, * we need to juggle with the offset and the bit indices. */ -int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts) +static int sja1105_ptpegr_ts_poll(struct dsa_switch *ds, int port, u64 *ts) { + struct sja1105_private *priv = ds->priv; const struct sja1105_regs *regs = priv->info->regs; int tstamp_bit_start, tstamp_bit_end; int timeout = 10; @@ -180,10 +313,8 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts) int rc; do { - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, - regs->ptpegr_ts[port], - packed_buf, - priv->info->ptpegr_ts_bytes); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->ptpegr_ts[port], + packed_buf, priv->info->ptpegr_ts_bytes); if (rc < 0) return rc; @@ -216,177 +347,350 @@ int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts) return 0; } -int sja1105_ptp_reset(struct sja1105_private *priv) +/* Caller must hold ptp_data->lock */ +static int sja1105_ptpclkval_read(struct sja1105_private *priv, u64 *ticks, + struct ptp_system_timestamp *ptp_sts) +{ + const struct sja1105_regs *regs = priv->info->regs; + + return sja1105_xfer_u64(priv, SPI_READ, regs->ptpclkval, ticks, + ptp_sts); +} + +/* Caller must hold ptp_data->lock */ +static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks, + struct ptp_system_timestamp *ptp_sts) { + const struct sja1105_regs *regs = priv->info->regs; + + return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpclkval, &ticks, + ptp_sts); +} + +#define rxtstamp_to_tagger(d) \ + container_of((d), struct sja1105_tagger_data, rxtstamp_work) +#define tagger_to_sja1105(d) \ + container_of((d), struct sja1105_private, tagger_data) + +static void sja1105_rxtstamp_work(struct work_struct *work) +{ + struct sja1105_tagger_data *tagger_data = rxtstamp_to_tagger(work); + struct sja1105_private *priv = tagger_to_sja1105(tagger_data); + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; struct dsa_switch *ds = priv->ds; - struct sja1105_ptp_cmd cmd = {0}; + struct sk_buff *skb; + + mutex_lock(&ptp_data->lock); + + while ((skb = skb_dequeue(&tagger_data->skb_rxtstamp_queue)) != NULL) { + struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb); + u64 ticks, ts; + int rc; + + rc = sja1105_ptpclkval_read(priv, &ticks, NULL); + if (rc < 0) { + dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc); + kfree_skb(skb); + continue; + } + + *shwt = (struct skb_shared_hwtstamps) {0}; + + ts = SJA1105_SKB_CB(skb)->meta_tstamp; + ts = sja1105_tstamp_reconstruct(ds, ticks, ts); + + shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts)); + netif_rx_ni(skb); + } + + mutex_unlock(&ptp_data->lock); +} + +/* Called from dsa_skb_defer_rx_timestamp */ +bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; + + if (!test_bit(SJA1105_HWTS_RX_EN, &tagger_data->state)) + return false; + + /* We need to read the full PTP clock to reconstruct the Rx + * timestamp. For that we need a sleepable context. + */ + skb_queue_tail(&tagger_data->skb_rxtstamp_queue, skb); + schedule_work(&tagger_data->rxtstamp_work); + return true; +} + +/* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone + * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit + * callback, where we will timestamp it synchronously. + */ +bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_port *sp = &priv->ports[port]; + + if (!sp->hwts_tx_en) + return false; + + return true; +} + +static int sja1105_ptp_reset(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + struct sja1105_ptp_cmd cmd = ptp_data->cmd; int rc; - mutex_lock(&priv->ptp_lock); + mutex_lock(&ptp_data->lock); cmd.resptp = 1; + dev_dbg(ds->dev, "Resetting PTP clock\n"); - rc = priv->info->ptp_cmd(priv, &cmd); + rc = sja1105_ptp_commit(ds, &cmd, SPI_WRITE); - timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, - ktime_to_ns(ktime_get_real())); + sja1105_tas_clockstep(priv->ds); - mutex_unlock(&priv->ptp_lock); + mutex_unlock(&ptp_data->lock); return rc; } -static int sja1105_ptp_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) +/* Caller must hold ptp_data->lock */ +int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns, + struct ptp_system_timestamp *ptp_sts) { - struct sja1105_private *priv = ptp_to_sja1105(ptp); - u64 ns; + struct sja1105_private *priv = ds->priv; + u64 ticks; + int rc; - mutex_lock(&priv->ptp_lock); - ns = timecounter_read(&priv->tstamp_tc); - mutex_unlock(&priv->ptp_lock); + rc = sja1105_ptpclkval_read(priv, &ticks, ptp_sts); + if (rc < 0) { + dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc); + return rc; + } - *ts = ns_to_timespec64(ns); + *ns = sja1105_ticks_to_ns(ticks); return 0; } +static int sja1105_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *ptp_sts) +{ + struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data); + u64 now = 0; + int rc; + + mutex_lock(&ptp_data->lock); + + rc = __sja1105_ptp_gettimex(priv->ds, &now, ptp_sts); + *ts = ns_to_timespec64(now); + + mutex_unlock(&ptp_data->lock); + + return rc; +} + +/* Caller must hold ptp_data->lock */ +static int sja1105_ptp_mode_set(struct sja1105_private *priv, + enum sja1105_ptp_clk_mode mode) +{ + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + + if (ptp_data->cmd.ptpclkadd == mode) + return 0; + + ptp_data->cmd.ptpclkadd = mode; + + return sja1105_ptp_commit(priv->ds, &ptp_data->cmd, SPI_WRITE); +} + +/* Write to PTPCLKVAL while PTPCLKADD is 0 */ +int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns, + struct ptp_system_timestamp *ptp_sts) +{ + struct sja1105_private *priv = ds->priv; + u64 ticks = ns_to_sja1105_ticks(ns); + int rc; + + rc = sja1105_ptp_mode_set(priv, PTP_SET_MODE); + if (rc < 0) { + dev_err(priv->ds->dev, "Failed to put PTPCLK in set mode\n"); + return rc; + } + + rc = sja1105_ptpclkval_write(priv, ticks, ptp_sts); + + sja1105_tas_clockstep(priv->ds); + + return rc; +} + static int sja1105_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { - struct sja1105_private *priv = ptp_to_sja1105(ptp); + struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data); u64 ns = timespec64_to_ns(ts); + int rc; - mutex_lock(&priv->ptp_lock); - timecounter_init(&priv->tstamp_tc, &priv->tstamp_cc, ns); - mutex_unlock(&priv->ptp_lock); + mutex_lock(&ptp_data->lock); - return 0; + rc = __sja1105_ptp_settime(priv->ds, ns, NULL); + + mutex_unlock(&ptp_data->lock); + + return rc; } static int sja1105_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { - struct sja1105_private *priv = ptp_to_sja1105(ptp); + struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data); + const struct sja1105_regs *regs = priv->info->regs; + u32 clkrate32; s64 clkrate; + int rc; clkrate = (s64)scaled_ppm * SJA1105_CC_MULT_NUM; clkrate = div_s64(clkrate, SJA1105_CC_MULT_DEM); - mutex_lock(&priv->ptp_lock); - - /* Force a readout to update the timer *before* changing its frequency. - * - * This way, its corrected time curve can at all times be modeled - * as a linear "A * x + B" function, where: - * - * - B are past frequency adjustments and offset shifts, all - * accumulated into the cycle_last variable. - * - * - A is the new frequency adjustments we're just about to set. - * - * Reading now makes B accumulate the correct amount of time, - * corrected at the old rate, before changing it. - * - * Hardware timestamps then become simple points on the curve and - * are approximated using the above function. This is still better - * than letting the switch take the timestamps using the hardware - * rate-corrected clock (PTPCLKVAL) - the comparison in this case would - * be that we're shifting the ruler at the same time as we're taking - * measurements with it. - * - * The disadvantage is that it's possible to receive timestamps when - * a frequency adjustment took place in the near past. - * In this case they will be approximated using the new ppb value - * instead of a compound function made of two segments (one at the old - * and the other at the new rate) - introducing some inaccuracy. - */ - timecounter_read(&priv->tstamp_tc); + /* Take a +/- value and re-center it around 2^31. */ + clkrate = SJA1105_CC_MULT + clkrate; + WARN_ON(abs(clkrate) >= GENMASK_ULL(31, 0)); + clkrate32 = clkrate; - priv->tstamp_cc.mult = SJA1105_CC_MULT + clkrate; + mutex_lock(&ptp_data->lock); - mutex_unlock(&priv->ptp_lock); + rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkrate, &clkrate32, + NULL); - return 0; + sja1105_tas_adjfreq(priv->ds); + + mutex_unlock(&ptp_data->lock); + + return rc; } -static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +/* Write to PTPCLKVAL while PTPCLKADD is 1 */ +int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta) { - struct sja1105_private *priv = ptp_to_sja1105(ptp); + struct sja1105_private *priv = ds->priv; + s64 ticks = ns_to_sja1105_ticks(delta); + int rc; - mutex_lock(&priv->ptp_lock); - timecounter_adjtime(&priv->tstamp_tc, delta); - mutex_unlock(&priv->ptp_lock); + rc = sja1105_ptp_mode_set(priv, PTP_ADD_MODE); + if (rc < 0) { + dev_err(priv->ds->dev, "Failed to put PTPCLK in add mode\n"); + return rc; + } - return 0; + rc = sja1105_ptpclkval_write(priv, ticks, NULL); + + sja1105_tas_clockstep(priv->ds); + + return rc; } -static u64 sja1105_ptptsclk_read(const struct cyclecounter *cc) +static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { - struct sja1105_private *priv = cc_to_sja1105(cc); - const struct sja1105_regs *regs = priv->info->regs; - u64 ptptsclk = 0; + struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp); + struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data); int rc; - rc = sja1105_spi_send_int(priv, SPI_READ, regs->ptptsclk, - &ptptsclk, 8); - if (rc < 0) - dev_err_ratelimited(priv->ds->dev, - "failed to read ptp cycle counter: %d\n", - rc); - return ptptsclk; -} + mutex_lock(&ptp_data->lock); -static void sja1105_ptp_overflow_check(struct work_struct *work) -{ - struct delayed_work *dw = to_delayed_work(work); - struct sja1105_private *priv = dw_to_sja1105(dw); - struct timespec64 ts; + rc = __sja1105_ptp_adjtime(priv->ds, delta); - sja1105_ptp_gettime(&priv->ptp_caps, &ts); + mutex_unlock(&ptp_data->lock); - schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL); + return rc; } -static const struct ptp_clock_info sja1105_ptp_caps = { - .owner = THIS_MODULE, - .name = "SJA1105 PHC", - .adjfine = sja1105_ptp_adjfine, - .adjtime = sja1105_ptp_adjtime, - .gettime64 = sja1105_ptp_gettime, - .settime64 = sja1105_ptp_settime, - .max_adj = SJA1105_MAX_ADJ_PPB, -}; - -int sja1105_ptp_clock_register(struct sja1105_private *priv) +int sja1105_ptp_clock_register(struct dsa_switch *ds) { - struct dsa_switch *ds = priv->ds; - - /* Set up the cycle counter */ - priv->tstamp_cc = (struct cyclecounter) { - .read = sja1105_ptptsclk_read, - .mask = CYCLECOUNTER_MASK(64), - .shift = SJA1105_CC_SHIFT, - .mult = SJA1105_CC_MULT, + struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + + ptp_data->caps = (struct ptp_clock_info) { + .owner = THIS_MODULE, + .name = "SJA1105 PHC", + .adjfine = sja1105_ptp_adjfine, + .adjtime = sja1105_ptp_adjtime, + .gettimex64 = sja1105_ptp_gettimex, + .settime64 = sja1105_ptp_settime, + .max_adj = SJA1105_MAX_ADJ_PPB, }; - mutex_init(&priv->ptp_lock); - priv->ptp_caps = sja1105_ptp_caps; - priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev); - if (IS_ERR_OR_NULL(priv->clock)) - return PTR_ERR(priv->clock); + skb_queue_head_init(&tagger_data->skb_rxtstamp_queue); + INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work); + spin_lock_init(&tagger_data->meta_lock); - INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check); - schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL); + ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev); + if (IS_ERR_OR_NULL(ptp_data->clock)) + return PTR_ERR(ptp_data->clock); - return sja1105_ptp_reset(priv); + ptp_data->cmd.corrclk4ts = true; + ptp_data->cmd.ptpclkadd = PTP_SET_MODE; + + return sja1105_ptp_reset(ds); } -void sja1105_ptp_clock_unregister(struct sja1105_private *priv) +void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { - if (IS_ERR_OR_NULL(priv->clock)) + struct sja1105_private *priv = ds->priv; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + + if (IS_ERR_OR_NULL(ptp_data->clock)) return; - cancel_delayed_work_sync(&priv->refresh_work); - ptp_clock_unregister(priv->clock); - priv->clock = NULL; + cancel_work_sync(&priv->tagger_data.rxtstamp_work); + skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); + ptp_clock_unregister(ptp_data->clock); + ptp_data->clock = NULL; +} + +void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, + struct sk_buff *skb) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + struct skb_shared_hwtstamps shwt = {0}; + u64 ticks, ts; + int rc; + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + mutex_lock(&ptp_data->lock); + + rc = sja1105_ptpclkval_read(priv, &ticks, NULL); + if (rc < 0) { + dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc); + kfree_skb(skb); + goto out; + } + + rc = sja1105_ptpegr_ts_poll(ds, slot, &ts); + if (rc < 0) { + dev_err(ds->dev, "timed out polling for tstamp\n"); + kfree_skb(skb); + goto out; + } + + ts = sja1105_tstamp_reconstruct(ds, ticks, ts); + + shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts)); + skb_complete_tx_timestamp(skb, &shwt); + +out: + mutex_unlock(&ptp_data->lock); } diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 394e12a6ad59..470f44b76318 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -6,59 +6,136 @@ #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) -int sja1105_ptp_clock_register(struct sja1105_private *priv); +/* Timestamps are in units of 8 ns clock ticks (equivalent to + * a fixed 125 MHz clock). + */ +#define SJA1105_TICK_NS 8 + +static inline s64 ns_to_sja1105_ticks(s64 ns) +{ + return ns / SJA1105_TICK_NS; +} + +static inline s64 sja1105_ticks_to_ns(s64 ticks) +{ + return ticks * SJA1105_TICK_NS; +} -void sja1105_ptp_clock_unregister(struct sja1105_private *priv); +struct sja1105_ptp_cmd { + u64 ptpstrtsch; /* start schedule */ + u64 ptpstopsch; /* stop schedule */ + u64 resptp; /* reset */ + u64 corrclk4ts; /* use the corrected clock for timestamps */ + u64 ptpclkadd; /* enum sja1105_ptp_clk_mode */ +}; -int sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts); +struct sja1105_ptp_data { + struct ptp_clock_info caps; + struct ptp_clock *clock; + struct sja1105_ptp_cmd cmd; + /* Serializes all operations on the PTP hardware clock */ + struct mutex lock; +}; -int sja1105et_ptp_cmd(const void *ctx, const void *data); +int sja1105_ptp_clock_register(struct dsa_switch *ds); -int sja1105pqrs_ptp_cmd(const void *ctx, const void *data); +void sja1105_ptp_clock_unregister(struct dsa_switch *ds); + +void sja1105et_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd, + enum packing_op op); + +void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd, + enum packing_op op); int sja1105_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts); -u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, u64 now, - u64 ts_partial); +void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, + struct sk_buff *clone); + +bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type); + +bool sja1105_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type); + +int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr); + +int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr); + +int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns, + struct ptp_system_timestamp *sts); -int sja1105_ptp_reset(struct sja1105_private *priv); +int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns, + struct ptp_system_timestamp *ptp_sts); + +int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta); + +int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd, + sja1105_spi_rw_mode_t rw); #else -static inline int sja1105_ptp_clock_register(struct sja1105_private *priv) +struct sja1105_ptp_cmd; + +/* Structures cannot be empty in C. Bah! + * Keep the mutex as the only element, which is a bit more difficult to + * refactor out of sja1105_main.c anyway. + */ +struct sja1105_ptp_data { + struct mutex lock; +}; + +static inline int sja1105_ptp_clock_register(struct dsa_switch *ds) { return 0; } -static inline void sja1105_ptp_clock_unregister(struct sja1105_private *priv) +static inline void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { } + +static inline void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, + struct sk_buff *clone) +{ +} + +static inline int __sja1105_ptp_gettimex(struct dsa_switch *ds, u64 *ns, + struct ptp_system_timestamp *sts) { - return; + return 0; } -static inline int -sja1105_ptpegr_ts_poll(struct sja1105_private *priv, int port, u64 *ts) +static inline int __sja1105_ptp_settime(struct dsa_switch *ds, u64 ns, + struct ptp_system_timestamp *ptp_sts) { return 0; } -static inline u64 sja1105_tstamp_reconstruct(struct sja1105_private *priv, - u64 now, u64 ts_partial) +static inline int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta) { return 0; } -static inline int sja1105_ptp_reset(struct sja1105_private *priv) +static inline int sja1105_ptp_commit(struct dsa_switch *ds, + struct sja1105_ptp_cmd *cmd, + sja1105_spi_rw_mode_t rw) { return 0; } -#define sja1105et_ptp_cmd NULL +#define sja1105et_ptp_cmd_packing NULL -#define sja1105pqrs_ptp_cmd NULL +#define sja1105pqrs_ptp_cmd_packing NULL #define sja1105_get_ts_info NULL +#define sja1105_port_rxtstamp NULL + +#define sja1105_port_txtstamp NULL + +#define sja1105_hwtstamp_get NULL + +#define sja1105_hwtstamp_set NULL + #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ #endif /* _SJA1105_PTP_H */ diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index 58dd37ecde17..29b127f3bf9c 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -7,42 +7,15 @@ #include <linux/packing.h> #include "sja1105.h" -#define SJA1105_SIZE_PORT_CTRL 4 #define SJA1105_SIZE_RESET_CMD 4 #define SJA1105_SIZE_SPI_MSG_HEADER 4 #define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4) -#define SJA1105_SIZE_SPI_TRANSFER_MAX \ - (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN) -static int sja1105_spi_transfer(const struct sja1105_private *priv, - const void *tx, void *rx, int size) -{ - struct spi_device *spi = priv->spidev; - struct spi_transfer transfer = { - .tx_buf = tx, - .rx_buf = rx, - .len = size, - }; - struct spi_message msg; - int rc; - - if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) { - dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n", - size, SJA1105_SIZE_SPI_TRANSFER_MAX); - return -EMSGSIZE; - } - - spi_message_init(&msg); - spi_message_add_tail(&transfer, &msg); - - rc = spi_sync(spi, &msg); - if (rc < 0) { - dev_err(&spi->dev, "SPI transfer failed: %d\n", rc); - return rc; - } - - return rc; -} +struct sja1105_chunk { + u8 *buf; + size_t len; + u64 reg_addr; +}; static void sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg) @@ -56,242 +29,219 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg) sja1105_pack(buf, &msg->address, 24, 4, size); } +#define sja1105_hdr_xfer(xfers, chunk) \ + ((xfers) + 2 * (chunk)) +#define sja1105_chunk_xfer(xfers, chunk) \ + ((xfers) + 2 * (chunk) + 1) +#define sja1105_hdr_buf(hdr_bufs, chunk) \ + ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER) + /* If @rw is: * - SPI_WRITE: creates and sends an SPI write message at absolute - * address reg_addr, taking size_bytes from *packed_buf + * address reg_addr, taking @len bytes from *buf * - SPI_READ: creates and sends an SPI read message from absolute - * address reg_addr, writing size_bytes into *packed_buf - * - * This function should only be called if it is priorly known that - * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers - * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below. + * address reg_addr, writing @len bytes into *buf */ -int sja1105_spi_send_packed_buf(const struct sja1105_private *priv, - sja1105_spi_rw_mode_t rw, u64 reg_addr, - void *packed_buf, size_t size_bytes) +static int sja1105_xfer(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf, + size_t len, struct ptp_system_timestamp *ptp_sts) { - u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0}; - u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0}; - const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER; - struct sja1105_spi_message msg = {0}; - int rc; + struct sja1105_chunk chunk = { + .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN), + .reg_addr = reg_addr, + .buf = buf, + }; + struct spi_device *spi = priv->spidev; + struct spi_transfer *xfers; + int num_chunks; + int rc, i = 0; + u8 *hdr_bufs; - if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX) - return -ERANGE; + num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN); - msg.access = rw; - msg.address = reg_addr; - if (rw == SPI_READ) - msg.read_count = size_bytes / 4; + /* One transfer for each message header, one for each message + * payload (chunk). + */ + xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer), + GFP_KERNEL); + if (!xfers) + return -ENOMEM; - sja1105_spi_message_pack(tx_buf, &msg); + /* Packed buffers for the num_chunks SPI message headers, + * stored as a contiguous array + */ + hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER, + GFP_KERNEL); + if (!hdr_bufs) { + kfree(xfers); + return -ENOMEM; + } - if (rw == SPI_WRITE) - memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER, - packed_buf, size_bytes); + for (i = 0; i < num_chunks; i++) { + struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i); + struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i); + u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i); + struct spi_transfer *ptp_sts_xfer; + struct sja1105_spi_message msg; + + /* Populate the transfer's header buffer */ + msg.address = chunk.reg_addr; + msg.access = rw; + if (rw == SPI_READ) + msg.read_count = chunk.len / 4; + else + /* Ignored */ + msg.read_count = 0; + sja1105_spi_message_pack(hdr_buf, &msg); + hdr_xfer->tx_buf = hdr_buf; + hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER; + + /* Populate the transfer's data buffer */ + if (rw == SPI_READ) + chunk_xfer->rx_buf = chunk.buf; + else + chunk_xfer->tx_buf = chunk.buf; + chunk_xfer->len = chunk.len; + + /* Request timestamping for the transfer. Instead of letting + * callers specify which byte they want to timestamp, we can + * make certain assumptions: + * - A read operation will request a software timestamp when + * what's being read is the PTP time. That is snapshotted by + * the switch hardware at the end of the command portion + * (hdr_xfer). + * - A write operation will request a software timestamp on + * actions that modify the PTP time. Taking clock stepping as + * an example, the switch writes the PTP time at the end of + * the data portion (chunk_xfer). + */ + if (rw == SPI_READ) + ptp_sts_xfer = hdr_xfer; + else + ptp_sts_xfer = chunk_xfer; + ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1; + ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1; + ptp_sts_xfer->ptp_sts = ptp_sts; + + /* Calculate next chunk */ + chunk.buf += chunk.len; + chunk.reg_addr += chunk.len / 4; + chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf), + SJA1105_SIZE_SPI_MSG_MAXLEN); + + /* De-assert the chip select after each chunk. */ + if (chunk.len) + chunk_xfer->cs_change = 1; + } - rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len); + rc = spi_sync_transfer(spi, xfers, 2 * num_chunks); if (rc < 0) - return rc; + dev_err(&spi->dev, "SPI transfer failed: %d\n", rc); - if (rw == SPI_READ) - memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER, - size_bytes); + kfree(hdr_bufs); + kfree(xfers); - return 0; + return rc; +} + +int sja1105_xfer_buf(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, + u8 *buf, size_t len) +{ + return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL); } /* If @rw is: * - SPI_WRITE: creates and sends an SPI write message at absolute - * address reg_addr, taking size_bytes from *packed_buf + * address reg_addr * - SPI_READ: creates and sends an SPI read message from absolute - * address reg_addr, writing size_bytes into *packed_buf + * address reg_addr * * The u64 *value is unpacked, meaning that it's stored in the native * CPU endianness and directly usable by software running on the core. - * - * This is a wrapper around sja1105_spi_send_packed_buf(). */ -int sja1105_spi_send_int(const struct sja1105_private *priv, - sja1105_spi_rw_mode_t rw, u64 reg_addr, - u64 *value, u64 size_bytes) +int sja1105_xfer_u64(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value, + struct ptp_system_timestamp *ptp_sts) { - u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN]; + u8 packed_buf[8]; int rc; - if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN) - return -ERANGE; - if (rw == SPI_WRITE) - sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0, - size_bytes); + sja1105_pack(packed_buf, value, 63, 0, 8); - rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf, - size_bytes); + rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts); if (rw == SPI_READ) - sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0, - size_bytes); + sja1105_unpack(packed_buf, value, 63, 0, 8); return rc; } -/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN - * must be sent/received. Splitting the buffer into chunks and assembling - * those into SPI messages is done automatically by this function. - */ -int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv, - sja1105_spi_rw_mode_t rw, u64 base_addr, - void *packed_buf, u64 buf_len) +/* Same as above, but transfers only a 4 byte word */ +int sja1105_xfer_u32(const struct sja1105_private *priv, + sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value, + struct ptp_system_timestamp *ptp_sts) { - struct chunk { - void *buf_ptr; - int len; - u64 spi_address; - } chunk; - int distance_to_end; + u8 packed_buf[4]; + u64 tmp; int rc; - /* Initialize chunk */ - chunk.buf_ptr = packed_buf; - chunk.spi_address = base_addr; - chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN); - - while (chunk.len) { - rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address, - chunk.buf_ptr, chunk.len); - if (rc < 0) - return rc; - - chunk.buf_ptr += chunk.len; - chunk.spi_address += chunk.len / 4; - distance_to_end = (uintptr_t)(packed_buf + buf_len - - chunk.buf_ptr); - chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN); + if (rw == SPI_WRITE) { + /* The packing API only supports u64 as CPU word size, + * so we need to convert. + */ + tmp = *value; + sja1105_pack(packed_buf, &tmp, 31, 0, 4); } - return 0; -} + rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts); -/* Back-ported structure from UM11040 Table 112. - * Reset control register (addr. 100440h) - * In the SJA1105 E/T, only warm_rst and cold_rst are - * supported (exposed in UM10944 as rst_ctrl), but the bit - * offsets of warm_rst and cold_rst are actually reversed. - */ -struct sja1105_reset_cmd { - u64 switch_rst; - u64 cfg_rst; - u64 car_rst; - u64 otp_rst; - u64 warm_rst; - u64 cold_rst; - u64 por_rst; -}; - -static void -sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset) -{ - const int size = SJA1105_SIZE_RESET_CMD; - - memset(buf, 0, size); - - sja1105_pack(buf, &reset->cold_rst, 3, 3, size); - sja1105_pack(buf, &reset->warm_rst, 2, 2, size); -} - -static void -sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset) -{ - const int size = SJA1105_SIZE_RESET_CMD; - - memset(buf, 0, size); + if (rw == SPI_READ) { + sja1105_unpack(packed_buf, &tmp, 31, 0, 4); + *value = tmp; + } - sja1105_pack(buf, &reset->switch_rst, 8, 8, size); - sja1105_pack(buf, &reset->cfg_rst, 7, 7, size); - sja1105_pack(buf, &reset->car_rst, 5, 5, size); - sja1105_pack(buf, &reset->otp_rst, 4, 4, size); - sja1105_pack(buf, &reset->warm_rst, 3, 3, size); - sja1105_pack(buf, &reset->cold_rst, 2, 2, size); - sja1105_pack(buf, &reset->por_rst, 1, 1, size); + return rc; } -static int sja1105et_reset_cmd(const void *ctx, const void *data) +static int sja1105et_reset_cmd(struct dsa_switch *ds) { - const struct sja1105_private *priv = ctx; - const struct sja1105_reset_cmd *reset = data; + struct sja1105_private *priv = ds->priv; const struct sja1105_regs *regs = priv->info->regs; - struct device *dev = priv->ds->dev; - u8 packed_buf[SJA1105_SIZE_RESET_CMD]; - - if (reset->switch_rst || - reset->cfg_rst || - reset->car_rst || - reset->otp_rst || - reset->por_rst) { - dev_err(dev, "Only warm and cold reset is supported " - "for SJA1105 E/T!\n"); - return -EINVAL; - } - - if (reset->warm_rst) - dev_dbg(dev, "Warm reset requested\n"); - if (reset->cold_rst) - dev_dbg(dev, "Cold reset requested\n"); + u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0}; + const int size = SJA1105_SIZE_RESET_CMD; + u64 cold_rst = 1; - sja1105et_reset_cmd_pack(packed_buf, reset); + sja1105_pack(packed_buf, &cold_rst, 3, 3, size); - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu, - packed_buf, SJA1105_SIZE_RESET_CMD); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf, + SJA1105_SIZE_RESET_CMD); } -static int sja1105pqrs_reset_cmd(const void *ctx, const void *data) +static int sja1105pqrs_reset_cmd(struct dsa_switch *ds) { - const struct sja1105_private *priv = ctx; - const struct sja1105_reset_cmd *reset = data; + struct sja1105_private *priv = ds->priv; const struct sja1105_regs *regs = priv->info->regs; - struct device *dev = priv->ds->dev; - u8 packed_buf[SJA1105_SIZE_RESET_CMD]; - - if (reset->switch_rst) - dev_dbg(dev, "Main reset for all functional modules requested\n"); - if (reset->cfg_rst) - dev_dbg(dev, "Chip configuration reset requested\n"); - if (reset->car_rst) - dev_dbg(dev, "Clock and reset control logic reset requested\n"); - if (reset->otp_rst) - dev_dbg(dev, "OTP read cycle for reading product " - "config settings requested\n"); - if (reset->warm_rst) - dev_dbg(dev, "Warm reset requested\n"); - if (reset->cold_rst) - dev_dbg(dev, "Cold reset requested\n"); - if (reset->por_rst) - dev_dbg(dev, "Power-on reset requested\n"); - - sja1105pqrs_reset_cmd_pack(packed_buf, reset); - - return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu, - packed_buf, SJA1105_SIZE_RESET_CMD); -} + u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0}; + const int size = SJA1105_SIZE_RESET_CMD; + u64 cold_rst = 1; -static int sja1105_cold_reset(const struct sja1105_private *priv) -{ - struct sja1105_reset_cmd reset = {0}; + sja1105_pack(packed_buf, &cold_rst, 2, 2, size); - reset.cold_rst = 1; - return priv->info->reset_cmd(priv, &reset); + return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf, + SJA1105_SIZE_RESET_CMD); } int sja1105_inhibit_tx(const struct sja1105_private *priv, unsigned long port_bitmap, bool tx_inhibited) { const struct sja1105_regs *regs = priv->info->regs; - u64 inhibit_cmd; + u32 inhibit_cmd; int rc; - rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control, - &inhibit_cmd, SJA1105_SIZE_PORT_CTRL); + rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control, + &inhibit_cmd, NULL); if (rc < 0) return rc; @@ -300,8 +250,8 @@ int sja1105_inhibit_tx(const struct sja1105_private *priv, else inhibit_cmd &= ~port_bitmap; - return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control, - &inhibit_cmd, SJA1105_SIZE_PORT_CTRL); + return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control, + &inhibit_cmd, NULL); } struct sja1105_status { @@ -339,9 +289,7 @@ static int sja1105_status_get(struct sja1105_private *priv, u8 packed_buf[4]; int rc; - rc = sja1105_spi_send_packed_buf(priv, SPI_READ, - regs->status, - packed_buf, 4); + rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4); if (rc < 0) return rc; @@ -429,7 +377,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv) usleep_range(500, 1000); do { /* Put the SJA1105 in programming mode */ - rc = sja1105_cold_reset(priv); + rc = priv->info->reset_cmd(priv->ds); if (rc < 0) { dev_err(dev, "Failed to reset switch, retrying...\n"); continue; @@ -437,9 +385,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv) /* Wait for the switch to come out of reset */ usleep_range(1000, 5000); /* Upload the static config to the device */ - rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE, - regs->config, - config_buf, buf_len); + rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config, + config_buf, buf_len); if (rc < 0) { dev_err(dev, "Failed to upload config, retrying...\n"); continue; @@ -482,12 +429,6 @@ int sja1105_static_config_upload(struct sja1105_private *priv) dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries); } - rc = sja1105_ptp_reset(priv); - if (rc < 0) - dev_err(dev, "Failed to reset PTP clock: %d\n", rc); - - dev_info(dev, "Reset switch and programmed static config\n"); - out: kfree(config_buf); return rc; @@ -516,10 +457,11 @@ static struct sja1105_regs sja1105et_regs = { .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031}, .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034}, .ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8}, + .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */ .ptp_control = 0x17, - .ptpclk = 0x18, /* Spans 0x18 to 0x19 */ + .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */ .ptpclkrate = 0x1A, - .ptptsclk = 0x1B, /* Spans 0x1B to 0x1C */ + .ptpclkcorp = 0x1D, }; static struct sja1105_regs sja1105pqrs_regs = { @@ -547,10 +489,11 @@ static struct sja1105_regs sja1105pqrs_regs = { .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F}, .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644}, .ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0}, + .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */ .ptp_control = 0x18, - .ptpclk = 0x19, + .ptpclkval = 0x19, .ptpclkrate = 0x1B, - .ptptsclk = 0x1C, + .ptpclkcorp = 0x1E, }; struct sja1105_info sja1105e_info = { @@ -563,7 +506,7 @@ struct sja1105_info sja1105e_info = { .reset_cmd = sja1105et_reset_cmd, .fdb_add_cmd = sja1105et_fdb_add, .fdb_del_cmd = sja1105et_fdb_del, - .ptp_cmd = sja1105et_ptp_cmd, + .ptp_cmd_packing = sja1105et_ptp_cmd_packing, .regs = &sja1105et_regs, .name = "SJA1105E", }; @@ -577,7 +520,7 @@ struct sja1105_info sja1105t_info = { .reset_cmd = sja1105et_reset_cmd, .fdb_add_cmd = sja1105et_fdb_add, .fdb_del_cmd = sja1105et_fdb_del, - .ptp_cmd = sja1105et_ptp_cmd, + .ptp_cmd_packing = sja1105et_ptp_cmd_packing, .regs = &sja1105et_regs, .name = "SJA1105T", }; @@ -592,7 +535,7 @@ struct sja1105_info sja1105p_info = { .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_del_cmd = sja1105pqrs_fdb_del, - .ptp_cmd = sja1105pqrs_ptp_cmd, + .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing, .regs = &sja1105pqrs_regs, .name = "SJA1105P", }; @@ -607,7 +550,7 @@ struct sja1105_info sja1105q_info = { .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_del_cmd = sja1105pqrs_fdb_del, - .ptp_cmd = sja1105pqrs_ptp_cmd, + .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing, .regs = &sja1105pqrs_regs, .name = "SJA1105Q", }; @@ -622,7 +565,7 @@ struct sja1105_info sja1105r_info = { .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_del_cmd = sja1105pqrs_fdb_del, - .ptp_cmd = sja1105pqrs_ptp_cmd, + .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing, .regs = &sja1105pqrs_regs, .name = "SJA1105R", }; @@ -638,6 +581,6 @@ struct sja1105_info sja1105s_info = { .reset_cmd = sja1105pqrs_reset_cmd, .fdb_add_cmd = sja1105pqrs_fdb_add, .fdb_del_cmd = sja1105pqrs_fdb_del, - .ptp_cmd = sja1105pqrs_ptp_cmd, + .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing, .name = "SJA1105S", }; diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c index 33eca6a82ec5..26b925b5dace 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.c +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -10,6 +10,11 @@ #define SJA1105_TAS_MAX_DELTA BIT(19) #define SJA1105_GATE_MASK GENMASK_ULL(SJA1105_NUM_TC - 1, 0) +#define work_to_sja1105_tas(d) \ + container_of((d), struct sja1105_tas_data, tas_work) +#define tas_to_sja1105(d) \ + container_of((d), struct sja1105_private, tas_data) + /* This is not a preprocessor macro because the "ns" argument may or may not be * s64 at caller side. This ensures it is properly type-cast before div_s64. */ @@ -18,6 +23,100 @@ static s64 ns_to_sja1105_delta(s64 ns) return div_s64(ns, 200); } +static s64 sja1105_delta_to_ns(s64 delta) +{ + return delta * 200; +} + +/* Calculate the first base_time in the future that satisfies this + * relationship: + * + * future_base_time = base_time + N x cycle_time >= now, or + * + * now - base_time + * N >= --------------- + * cycle_time + * + * Because N is an integer, the ceiling value of the above "a / b" ratio + * is in fact precisely the floor value of "(a + b - 1) / b", which is + * easier to calculate only having integer division tools. + */ +static s64 future_base_time(s64 base_time, s64 cycle_time, s64 now) +{ + s64 a, b, n; + + if (base_time >= now) + return base_time; + + a = now - base_time; + b = cycle_time; + n = div_s64(a + b - 1, b); + + return base_time + n * cycle_time; +} + +static int sja1105_tas_set_runtime_params(struct sja1105_private *priv) +{ + struct sja1105_tas_data *tas_data = &priv->tas_data; + struct dsa_switch *ds = priv->ds; + s64 earliest_base_time = S64_MAX; + s64 latest_base_time = 0; + s64 its_cycle_time = 0; + s64 max_cycle_time = 0; + int port; + + tas_data->enabled = false; + + for (port = 0; port < SJA1105_NUM_PORTS; port++) { + const struct tc_taprio_qopt_offload *offload; + + offload = tas_data->offload[port]; + if (!offload) + continue; + + tas_data->enabled = true; + + if (max_cycle_time < offload->cycle_time) + max_cycle_time = offload->cycle_time; + if (latest_base_time < offload->base_time) + latest_base_time = offload->base_time; + if (earliest_base_time > offload->base_time) { + earliest_base_time = offload->base_time; + its_cycle_time = offload->cycle_time; + } + } + + if (!tas_data->enabled) + return 0; + + /* Roll the earliest base time over until it is in a comparable + * time base with the latest, then compare their deltas. + * We want to enforce that all ports' base times are within + * SJA1105_TAS_MAX_DELTA 200ns cycles of one another. + */ + earliest_base_time = future_base_time(earliest_base_time, + its_cycle_time, + latest_base_time); + while (earliest_base_time > latest_base_time) + earliest_base_time -= its_cycle_time; + if (latest_base_time - earliest_base_time > + sja1105_delta_to_ns(SJA1105_TAS_MAX_DELTA)) { + dev_err(ds->dev, + "Base times too far apart: min %llu max %llu\n", + earliest_base_time, latest_base_time); + return -ERANGE; + } + + tas_data->earliest_base_time = earliest_base_time; + tas_data->max_cycle_time = max_cycle_time; + + dev_dbg(ds->dev, "earliest base time %lld ns\n", earliest_base_time); + dev_dbg(ds->dev, "latest base time %lld ns\n", latest_base_time); + dev_dbg(ds->dev, "longest cycle time %lld ns\n", max_cycle_time); + + return 0; +} + /* Lo and behold: the egress scheduler from hell. * * At the hardware level, the Time-Aware Shaper holds a global linear arrray of @@ -99,7 +198,11 @@ static int sja1105_init_scheduling(struct sja1105_private *priv) int num_cycles = 0; int cycle = 0; int i, k = 0; - int port; + int port, rc; + + rc = sja1105_tas_set_runtime_params(priv); + if (rc < 0) + return rc; /* Discard previous Schedule Table */ table = &priv->static_config.tables[BLK_IDX_SCHEDULE]; @@ -184,11 +287,13 @@ static int sja1105_init_scheduling(struct sja1105_private *priv) schedule_entry_points = table->entries; /* Finally start populating the static config tables */ - schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_STANDALONE; + schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP; schedule_entry_points_params->actsubsch = num_cycles - 1; for (port = 0; port < SJA1105_NUM_PORTS; port++) { const struct tc_taprio_qopt_offload *offload; + /* Relative base time */ + s64 rbt; offload = tas_data->offload[port]; if (!offload) @@ -196,15 +301,21 @@ static int sja1105_init_scheduling(struct sja1105_private *priv) schedule_start_idx = k; schedule_end_idx = k + offload->num_entries - 1; - /* TODO this is the base time for the port's subschedule, - * relative to PTPSCHTM. But as we're using the standalone - * clock source and not PTP clock as time reference, there's - * little point in even trying to put more logic into this, - * like preserving the phases between the subschedules of - * different ports. We'll get all of that when switching to the - * PTP clock source. + /* This is the base time expressed as a number of TAS ticks + * relative to PTPSCHTM, which we'll (perhaps improperly) call + * the operational base time. + */ + rbt = future_base_time(offload->base_time, + offload->cycle_time, + tas_data->earliest_base_time); + rbt -= tas_data->earliest_base_time; + /* UM10944.pdf 4.2.2. Schedule Entry Points table says that + * delta cannot be zero, which is shitty. Advance all relative + * base times by 1 TAS delta, so that even the earliest base + * time becomes 1 in relative terms. Then start the operational + * base time (PTPSCHTM) one TAS delta earlier than planned. */ - entry_point_delta = 1; + entry_point_delta = ns_to_sja1105_delta(rbt) + 1; schedule_entry_points[cycle].subschindx = cycle; schedule_entry_points[cycle].delta = entry_point_delta; @@ -352,7 +463,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, if (rc < 0) return rc; - return sja1105_static_config_reload(priv); + return sja1105_static_config_reload(priv, SJA1105_SCHEDULING); } /* The cycle time extension is the amount of time the last cycle from @@ -400,11 +511,306 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, if (rc < 0) return rc; - return sja1105_static_config_reload(priv); + return sja1105_static_config_reload(priv, SJA1105_SCHEDULING); +} + +static int sja1105_tas_check_running(struct sja1105_private *priv) +{ + struct sja1105_tas_data *tas_data = &priv->tas_data; + struct dsa_switch *ds = priv->ds; + struct sja1105_ptp_cmd cmd = {0}; + int rc; + + rc = sja1105_ptp_commit(ds, &cmd, SPI_READ); + if (rc < 0) + return rc; + + if (cmd.ptpstrtsch == 1) + /* Schedule successfully started */ + tas_data->state = SJA1105_TAS_STATE_RUNNING; + else if (cmd.ptpstopsch == 1) + /* Schedule is stopped */ + tas_data->state = SJA1105_TAS_STATE_DISABLED; + else + /* Schedule is probably not configured with PTP clock source */ + rc = -EINVAL; + + return rc; +} + +/* Write to PTPCLKCORP */ +static int sja1105_tas_adjust_drift(struct sja1105_private *priv, + u64 correction) +{ + const struct sja1105_regs *regs = priv->info->regs; + u32 ptpclkcorp = ns_to_sja1105_ticks(correction); + + return sja1105_xfer_u32(priv, SPI_WRITE, regs->ptpclkcorp, + &ptpclkcorp, NULL); +} + +/* Write to PTPSCHTM */ +static int sja1105_tas_set_base_time(struct sja1105_private *priv, + u64 base_time) +{ + const struct sja1105_regs *regs = priv->info->regs; + u64 ptpschtm = ns_to_sja1105_ticks(base_time); + + return sja1105_xfer_u64(priv, SPI_WRITE, regs->ptpschtm, + &ptpschtm, NULL); +} + +static int sja1105_tas_start(struct sja1105_private *priv) +{ + struct sja1105_tas_data *tas_data = &priv->tas_data; + struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd; + struct dsa_switch *ds = priv->ds; + int rc; + + dev_dbg(ds->dev, "Starting the TAS\n"); + + if (tas_data->state == SJA1105_TAS_STATE_ENABLED_NOT_RUNNING || + tas_data->state == SJA1105_TAS_STATE_RUNNING) { + dev_err(ds->dev, "TAS already started\n"); + return -EINVAL; + } + + cmd->ptpstrtsch = 1; + cmd->ptpstopsch = 0; + + rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE); + if (rc < 0) + return rc; + + tas_data->state = SJA1105_TAS_STATE_ENABLED_NOT_RUNNING; + + return 0; +} + +static int sja1105_tas_stop(struct sja1105_private *priv) +{ + struct sja1105_tas_data *tas_data = &priv->tas_data; + struct sja1105_ptp_cmd *cmd = &priv->ptp_data.cmd; + struct dsa_switch *ds = priv->ds; + int rc; + + dev_dbg(ds->dev, "Stopping the TAS\n"); + + if (tas_data->state == SJA1105_TAS_STATE_DISABLED) { + dev_err(ds->dev, "TAS already disabled\n"); + return -EINVAL; + } + + cmd->ptpstopsch = 1; + cmd->ptpstrtsch = 0; + + rc = sja1105_ptp_commit(ds, cmd, SPI_WRITE); + if (rc < 0) + return rc; + + tas_data->state = SJA1105_TAS_STATE_DISABLED; + + return 0; +} + +/* The schedule engine and the PTP clock are driven by the same oscillator, and + * they run in parallel. But whilst the PTP clock can keep an absolute + * time-of-day, the schedule engine is only running in 'ticks' (25 ticks make + * up a delta, which is 200ns), and wrapping around at the end of each cycle. + * The schedule engine is started when the PTP clock reaches the PTPSCHTM time + * (in PTP domain). + * Because the PTP clock can be rate-corrected (accelerated or slowed down) by + * a software servo, and the schedule engine clock runs in parallel to the PTP + * clock, there is logic internal to the switch that periodically keeps the + * schedule engine from drifting away. The frequency with which this internal + * syntonization happens is the PTP clock correction period (PTPCLKCORP). It is + * a value also in the PTP clock domain, and is also rate-corrected. + * To be precise, during a correction period, there is logic to determine by + * how many scheduler clock ticks has the PTP clock drifted. At the end of each + * correction period/beginning of new one, the length of a delta is shrunk or + * expanded with an integer number of ticks, compared with the typical 25. + * So a delta lasts for 200ns (or 25 ticks) only on average. + * Sometimes it is longer, sometimes it is shorter. The internal syntonization + * logic can adjust for at most 5 ticks each 20 ticks. + * + * The first implication is that you should choose your schedule correction + * period to be an integer multiple of the schedule length. Preferably one. + * In case there are schedules of multiple ports active, then the correction + * period needs to be a multiple of them all. Given the restriction that the + * cycle times have to be multiples of one another anyway, this means the + * correction period can simply be the largest cycle time, hence the current + * choice. This way, the updates are always synchronous to the transmission + * cycle, and therefore predictable. + * + * The second implication is that at the beginning of a correction period, the + * first few deltas will be modulated in time, until the schedule engine is + * properly phase-aligned with the PTP clock. For this reason, you should place + * your best-effort traffic at the beginning of a cycle, and your + * time-triggered traffic afterwards. + * + * The third implication is that once the schedule engine is started, it can + * only adjust for so much drift within a correction period. In the servo you + * can only change the PTPCLKRATE, but not step the clock (PTPCLKADD). If you + * want to do the latter, you need to stop and restart the schedule engine, + * which is what the state machine handles. + */ +static void sja1105_tas_state_machine(struct work_struct *work) +{ + struct sja1105_tas_data *tas_data = work_to_sja1105_tas(work); + struct sja1105_private *priv = tas_to_sja1105(tas_data); + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + struct timespec64 base_time_ts, now_ts; + struct dsa_switch *ds = priv->ds; + struct timespec64 diff; + s64 base_time, now; + int rc = 0; + + mutex_lock(&ptp_data->lock); + + switch (tas_data->state) { + case SJA1105_TAS_STATE_DISABLED: + /* Can't do anything at all if clock is still being stepped */ + if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) + break; + + rc = sja1105_tas_adjust_drift(priv, tas_data->max_cycle_time); + if (rc < 0) + break; + + rc = __sja1105_ptp_gettimex(ds, &now, NULL); + if (rc < 0) + break; + + /* Plan to start the earliest schedule first. The others + * will be started in hardware, by way of their respective + * entry points delta. + * Try our best to avoid fringe cases (race condition between + * ptpschtm and ptpstrtsch) by pushing the oper_base_time at + * least one second in the future from now. This is not ideal, + * but this only needs to buy us time until the + * sja1105_tas_start command below gets executed. + */ + base_time = future_base_time(tas_data->earliest_base_time, + tas_data->max_cycle_time, + now + 1ull * NSEC_PER_SEC); + base_time -= sja1105_delta_to_ns(1); + + rc = sja1105_tas_set_base_time(priv, base_time); + if (rc < 0) + break; + + tas_data->oper_base_time = base_time; + + rc = sja1105_tas_start(priv); + if (rc < 0) + break; + + base_time_ts = ns_to_timespec64(base_time); + now_ts = ns_to_timespec64(now); + + dev_dbg(ds->dev, "OPER base time %lld.%09ld (now %lld.%09ld)\n", + base_time_ts.tv_sec, base_time_ts.tv_nsec, + now_ts.tv_sec, now_ts.tv_nsec); + + break; + + case SJA1105_TAS_STATE_ENABLED_NOT_RUNNING: + if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) { + /* Clock was stepped.. bad news for TAS */ + sja1105_tas_stop(priv); + break; + } + + /* Check if TAS has actually started, by comparing the + * scheduled start time with the SJA1105 PTP clock + */ + rc = __sja1105_ptp_gettimex(ds, &now, NULL); + if (rc < 0) + break; + + if (now < tas_data->oper_base_time) { + /* TAS has not started yet */ + diff = ns_to_timespec64(tas_data->oper_base_time - now); + dev_dbg(ds->dev, "time to start: [%lld.%09ld]", + diff.tv_sec, diff.tv_nsec); + break; + } + + /* Time elapsed, what happened? */ + rc = sja1105_tas_check_running(priv); + if (rc < 0) + break; + + if (tas_data->state != SJA1105_TAS_STATE_RUNNING) + /* TAS has started */ + dev_err(ds->dev, + "TAS not started despite time elapsed\n"); + + break; + + case SJA1105_TAS_STATE_RUNNING: + /* Clock was stepped.. bad news for TAS */ + if (tas_data->last_op != SJA1105_PTP_ADJUSTFREQ) { + sja1105_tas_stop(priv); + break; + } + + rc = sja1105_tas_check_running(priv); + if (rc < 0) + break; + + if (tas_data->state != SJA1105_TAS_STATE_RUNNING) + dev_err(ds->dev, "TAS surprisingly stopped\n"); + + break; + + default: + if (net_ratelimit()) + dev_err(ds->dev, "TAS in an invalid state (incorrect use of API)!\n"); + } + + if (rc && net_ratelimit()) + dev_err(ds->dev, "An operation returned %d\n", rc); + + mutex_unlock(&ptp_data->lock); +} + +void sja1105_tas_clockstep(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tas_data *tas_data = &priv->tas_data; + + if (!tas_data->enabled) + return; + + tas_data->last_op = SJA1105_PTP_CLOCKSTEP; + schedule_work(&tas_data->tas_work); +} + +void sja1105_tas_adjfreq(struct dsa_switch *ds) +{ + struct sja1105_private *priv = ds->priv; + struct sja1105_tas_data *tas_data = &priv->tas_data; + + if (!tas_data->enabled) + return; + + /* No reason to schedule the workqueue, nothing changed */ + if (tas_data->state == SJA1105_TAS_STATE_RUNNING) + return; + + tas_data->last_op = SJA1105_PTP_ADJUSTFREQ; + schedule_work(&tas_data->tas_work); } void sja1105_tas_setup(struct dsa_switch *ds) { + struct sja1105_private *priv = ds->priv; + struct sja1105_tas_data *tas_data = &priv->tas_data; + + INIT_WORK(&tas_data->tas_work, sja1105_tas_state_machine); + tas_data->state = SJA1105_TAS_STATE_DISABLED; + tas_data->last_op = SJA1105_PTP_NONE; } void sja1105_tas_teardown(struct dsa_switch *ds) @@ -413,6 +819,8 @@ void sja1105_tas_teardown(struct dsa_switch *ds) struct tc_taprio_qopt_offload *offload; int port; + cancel_work_sync(&priv->tas_data.tas_work); + for (port = 0; port < SJA1105_NUM_PORTS; port++) { offload = priv->tas_data.offload[port]; if (!offload) diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h index 0aad212d88b2..b226c3dfd5b1 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.h +++ b/drivers/net/dsa/sja1105/sja1105_tas.h @@ -8,8 +8,27 @@ #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) +enum sja1105_tas_state { + SJA1105_TAS_STATE_DISABLED, + SJA1105_TAS_STATE_ENABLED_NOT_RUNNING, + SJA1105_TAS_STATE_RUNNING, +}; + +enum sja1105_ptp_op { + SJA1105_PTP_NONE, + SJA1105_PTP_CLOCKSTEP, + SJA1105_PTP_ADJUSTFREQ, +}; + struct sja1105_tas_data { struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS]; + enum sja1105_tas_state state; + enum sja1105_ptp_op last_op; + struct work_struct tas_work; + s64 earliest_base_time; + s64 oper_base_time; + u64 max_cycle_time; + bool enabled; }; int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, @@ -19,6 +38,10 @@ void sja1105_tas_setup(struct dsa_switch *ds); void sja1105_tas_teardown(struct dsa_switch *ds); +void sja1105_tas_clockstep(struct dsa_switch *ds); + +void sja1105_tas_adjfreq(struct dsa_switch *ds); + #else /* C doesn't allow empty structures, bah! */ @@ -36,6 +59,10 @@ static inline void sja1105_tas_setup(struct dsa_switch *ds) { } static inline void sja1105_tas_teardown(struct dsa_switch *ds) { } +static inline void sja1105_tas_clockstep(struct dsa_switch *ds) { } + +static inline void sja1105_tas_adjfreq(struct dsa_switch *ds) { } + #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_TAS) */ #endif /* _SJA1105_TAS_H */ diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index 614377ef7956..42c1574d45f2 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -1178,9 +1178,12 @@ int vsc73xx_probe(struct vsc73xx *vsc) * We allocate 8 ports and avoid access to the nonexistant * ports. */ - vsc->ds = dsa_switch_alloc(dev, 8); + vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL); if (!vsc->ds) return -ENOMEM; + + vsc->ds->dev = dev; + vsc->ds->num_ports = 8; vsc->ds->priv = vsc; vsc->ds->ops = &vsc73xx_ds_ops; |