summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/microchip/lan966x
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2022-02-03 10:53:49 +0300
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2022-02-03 10:53:49 +0300
commit876f7a438e4247a948268ad77b67c494f709cc30 (patch)
treec3fa2548657920df9d80822d08ff24666e62d37d /drivers/net/ethernet/microchip/lan966x
parent86df4141869350edaa53fb994b3db2c2cca5065d (diff)
parent53dbee4926d3706ca9e03f3928fa85b5ec3bc0cc (diff)
downloadlinux-876f7a438e4247a948268ad77b67c494f709cc30.tar.xz
Merge drm/drm-next into drm-intel-gt-next
Backmerge to bring in 5.17-rc2 to introduce a common baseline to merge i915_regs changes from drm-intel-next. Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/net/ethernet/microchip/lan966x')
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c682
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c244
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h173
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c470
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1002
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h278
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c506
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c127
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c406
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h871
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c544
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c317
14 files changed, 5639 insertions, 0 deletions
diff --git a/drivers/net/ethernet/microchip/lan966x/Kconfig b/drivers/net/ethernet/microchip/lan966x/Kconfig
new file mode 100644
index 000000000000..ac273f84b69e
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Kconfig
@@ -0,0 +1,9 @@
+config LAN966X_SWITCH
+ tristate "Lan966x switch driver"
+ depends on HAS_IOMEM
+ depends on OF
+ depends on NET_SWITCHDEV
+ select PHYLINK
+ select PACKING
+ help
+ This driver supports the Lan966x network switch device.
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
new file mode 100644
index 000000000000..040cfff9f577
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Microchip Lan966x network device drivers.
+#
+
+obj-$(CONFIG_LAN966X_SWITCH) += lan966x-switch.o
+
+lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
+ lan966x_mac.o lan966x_ethtool.o lan966x_switchdev.o \
+ lan966x_vlan.o lan966x_fdb.o lan966x_mdb.o
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
new file mode 100644
index 000000000000..614f12c2fe6a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+
+#include "lan966x_main.h"
+
+/* Number of traffic classes */
+#define LAN966X_NUM_TC 8
+#define LAN966X_STATS_CHECK_DELAY (2 * HZ)
+
+static const struct lan966x_stat_layout lan966x_stats_layout[] = {
+ { .name = "rx_octets", .offset = 0x00, },
+ { .name = "rx_unicast", .offset = 0x01, },
+ { .name = "rx_multicast", .offset = 0x02 },
+ { .name = "rx_broadcast", .offset = 0x03 },
+ { .name = "rx_short", .offset = 0x04 },
+ { .name = "rx_frag", .offset = 0x05 },
+ { .name = "rx_jabber", .offset = 0x06 },
+ { .name = "rx_crc", .offset = 0x07 },
+ { .name = "rx_symbol_err", .offset = 0x08 },
+ { .name = "rx_sz_64", .offset = 0x09 },
+ { .name = "rx_sz_65_127", .offset = 0x0a},
+ { .name = "rx_sz_128_255", .offset = 0x0b},
+ { .name = "rx_sz_256_511", .offset = 0x0c },
+ { .name = "rx_sz_512_1023", .offset = 0x0d },
+ { .name = "rx_sz_1024_1526", .offset = 0x0e },
+ { .name = "rx_sz_jumbo", .offset = 0x0f },
+ { .name = "rx_pause", .offset = 0x10 },
+ { .name = "rx_control", .offset = 0x11 },
+ { .name = "rx_long", .offset = 0x12 },
+ { .name = "rx_cat_drop", .offset = 0x13 },
+ { .name = "rx_red_prio_0", .offset = 0x14 },
+ { .name = "rx_red_prio_1", .offset = 0x15 },
+ { .name = "rx_red_prio_2", .offset = 0x16 },
+ { .name = "rx_red_prio_3", .offset = 0x17 },
+ { .name = "rx_red_prio_4", .offset = 0x18 },
+ { .name = "rx_red_prio_5", .offset = 0x19 },
+ { .name = "rx_red_prio_6", .offset = 0x1a },
+ { .name = "rx_red_prio_7", .offset = 0x1b },
+ { .name = "rx_yellow_prio_0", .offset = 0x1c },
+ { .name = "rx_yellow_prio_1", .offset = 0x1d },
+ { .name = "rx_yellow_prio_2", .offset = 0x1e },
+ { .name = "rx_yellow_prio_3", .offset = 0x1f },
+ { .name = "rx_yellow_prio_4", .offset = 0x20 },
+ { .name = "rx_yellow_prio_5", .offset = 0x21 },
+ { .name = "rx_yellow_prio_6", .offset = 0x22 },
+ { .name = "rx_yellow_prio_7", .offset = 0x23 },
+ { .name = "rx_green_prio_0", .offset = 0x24 },
+ { .name = "rx_green_prio_1", .offset = 0x25 },
+ { .name = "rx_green_prio_2", .offset = 0x26 },
+ { .name = "rx_green_prio_3", .offset = 0x27 },
+ { .name = "rx_green_prio_4", .offset = 0x28 },
+ { .name = "rx_green_prio_5", .offset = 0x29 },
+ { .name = "rx_green_prio_6", .offset = 0x2a },
+ { .name = "rx_green_prio_7", .offset = 0x2b },
+ { .name = "rx_assembly_err", .offset = 0x2c },
+ { .name = "rx_smd_err", .offset = 0x2d },
+ { .name = "rx_assembly_ok", .offset = 0x2e },
+ { .name = "rx_merge_frag", .offset = 0x2f },
+ { .name = "rx_pmac_octets", .offset = 0x30, },
+ { .name = "rx_pmac_unicast", .offset = 0x31, },
+ { .name = "rx_pmac_multicast", .offset = 0x32 },
+ { .name = "rx_pmac_broadcast", .offset = 0x33 },
+ { .name = "rx_pmac_short", .offset = 0x34 },
+ { .name = "rx_pmac_frag", .offset = 0x35 },
+ { .name = "rx_pmac_jabber", .offset = 0x36 },
+ { .name = "rx_pmac_crc", .offset = 0x37 },
+ { .name = "rx_pmac_symbol_err", .offset = 0x38 },
+ { .name = "rx_pmac_sz_64", .offset = 0x39 },
+ { .name = "rx_pmac_sz_65_127", .offset = 0x3a },
+ { .name = "rx_pmac_sz_128_255", .offset = 0x3b },
+ { .name = "rx_pmac_sz_256_511", .offset = 0x3c },
+ { .name = "rx_pmac_sz_512_1023", .offset = 0x3d },
+ { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e },
+ { .name = "rx_pmac_sz_jumbo", .offset = 0x3f },
+ { .name = "rx_pmac_pause", .offset = 0x40 },
+ { .name = "rx_pmac_control", .offset = 0x41 },
+ { .name = "rx_pmac_long", .offset = 0x42 },
+
+ { .name = "tx_octets", .offset = 0x80, },
+ { .name = "tx_unicast", .offset = 0x81, },
+ { .name = "tx_multicast", .offset = 0x82 },
+ { .name = "tx_broadcast", .offset = 0x83 },
+ { .name = "tx_col", .offset = 0x84 },
+ { .name = "tx_drop", .offset = 0x85 },
+ { .name = "tx_pause", .offset = 0x86 },
+ { .name = "tx_sz_64", .offset = 0x87 },
+ { .name = "tx_sz_65_127", .offset = 0x88 },
+ { .name = "tx_sz_128_255", .offset = 0x89 },
+ { .name = "tx_sz_256_511", .offset = 0x8a },
+ { .name = "tx_sz_512_1023", .offset = 0x8b },
+ { .name = "tx_sz_1024_1526", .offset = 0x8c },
+ { .name = "tx_sz_jumbo", .offset = 0x8d },
+ { .name = "tx_yellow_prio_0", .offset = 0x8e },
+ { .name = "tx_yellow_prio_1", .offset = 0x8f },
+ { .name = "tx_yellow_prio_2", .offset = 0x90 },
+ { .name = "tx_yellow_prio_3", .offset = 0x91 },
+ { .name = "tx_yellow_prio_4", .offset = 0x92 },
+ { .name = "tx_yellow_prio_5", .offset = 0x93 },
+ { .name = "tx_yellow_prio_6", .offset = 0x94 },
+ { .name = "tx_yellow_prio_7", .offset = 0x95 },
+ { .name = "tx_green_prio_0", .offset = 0x96 },
+ { .name = "tx_green_prio_1", .offset = 0x97 },
+ { .name = "tx_green_prio_2", .offset = 0x98 },
+ { .name = "tx_green_prio_3", .offset = 0x99 },
+ { .name = "tx_green_prio_4", .offset = 0x9a },
+ { .name = "tx_green_prio_5", .offset = 0x9b },
+ { .name = "tx_green_prio_6", .offset = 0x9c },
+ { .name = "tx_green_prio_7", .offset = 0x9d },
+ { .name = "tx_aged", .offset = 0x9e },
+ { .name = "tx_llct", .offset = 0x9f },
+ { .name = "tx_ct", .offset = 0xa0 },
+ { .name = "tx_mm_hold", .offset = 0xa1 },
+ { .name = "tx_merge_frag", .offset = 0xa2 },
+ { .name = "tx_pmac_octets", .offset = 0xa3, },
+ { .name = "tx_pmac_unicast", .offset = 0xa4, },
+ { .name = "tx_pmac_multicast", .offset = 0xa5 },
+ { .name = "tx_pmac_broadcast", .offset = 0xa6 },
+ { .name = "tx_pmac_pause", .offset = 0xa7 },
+ { .name = "tx_pmac_sz_64", .offset = 0xa8 },
+ { .name = "tx_pmac_sz_65_127", .offset = 0xa9 },
+ { .name = "tx_pmac_sz_128_255", .offset = 0xaa },
+ { .name = "tx_pmac_sz_256_511", .offset = 0xab },
+ { .name = "tx_pmac_sz_512_1023", .offset = 0xac },
+ { .name = "tx_pmac_sz_1024_1526", .offset = 0xad },
+ { .name = "tx_pmac_sz_jumbo", .offset = 0xae },
+
+ { .name = "dr_local", .offset = 0x100 },
+ { .name = "dr_tail", .offset = 0x101 },
+ { .name = "dr_yellow_prio_0", .offset = 0x102 },
+ { .name = "dr_yellow_prio_1", .offset = 0x103 },
+ { .name = "dr_yellow_prio_2", .offset = 0x104 },
+ { .name = "dr_yellow_prio_3", .offset = 0x105 },
+ { .name = "dr_yellow_prio_4", .offset = 0x106 },
+ { .name = "dr_yellow_prio_5", .offset = 0x107 },
+ { .name = "dr_yellow_prio_6", .offset = 0x108 },
+ { .name = "dr_yellow_prio_7", .offset = 0x109 },
+ { .name = "dr_green_prio_0", .offset = 0x10a },
+ { .name = "dr_green_prio_1", .offset = 0x10b },
+ { .name = "dr_green_prio_2", .offset = 0x10c },
+ { .name = "dr_green_prio_3", .offset = 0x10d },
+ { .name = "dr_green_prio_4", .offset = 0x10e },
+ { .name = "dr_green_prio_5", .offset = 0x10f },
+ { .name = "dr_green_prio_6", .offset = 0x110 },
+ { .name = "dr_green_prio_7", .offset = 0x111 },
+};
+
+/* The following numbers are indexes into lan966x_stats_layout[] */
+#define SYS_COUNT_RX_OCT 0
+#define SYS_COUNT_RX_UC 1
+#define SYS_COUNT_RX_MC 2
+#define SYS_COUNT_RX_BC 3
+#define SYS_COUNT_RX_SHORT 4
+#define SYS_COUNT_RX_FRAG 5
+#define SYS_COUNT_RX_JABBER 6
+#define SYS_COUNT_RX_CRC 7
+#define SYS_COUNT_RX_SYMBOL_ERR 8
+#define SYS_COUNT_RX_SZ_64 9
+#define SYS_COUNT_RX_SZ_65_127 10
+#define SYS_COUNT_RX_SZ_128_255 11
+#define SYS_COUNT_RX_SZ_256_511 12
+#define SYS_COUNT_RX_SZ_512_1023 13
+#define SYS_COUNT_RX_SZ_1024_1526 14
+#define SYS_COUNT_RX_SZ_JUMBO 15
+#define SYS_COUNT_RX_PAUSE 16
+#define SYS_COUNT_RX_CONTROL 17
+#define SYS_COUNT_RX_LONG 18
+#define SYS_COUNT_RX_CAT_DROP 19
+#define SYS_COUNT_RX_RED_PRIO_0 20
+#define SYS_COUNT_RX_RED_PRIO_1 21
+#define SYS_COUNT_RX_RED_PRIO_2 22
+#define SYS_COUNT_RX_RED_PRIO_3 23
+#define SYS_COUNT_RX_RED_PRIO_4 24
+#define SYS_COUNT_RX_RED_PRIO_5 25
+#define SYS_COUNT_RX_RED_PRIO_6 26
+#define SYS_COUNT_RX_RED_PRIO_7 27
+#define SYS_COUNT_RX_YELLOW_PRIO_0 28
+#define SYS_COUNT_RX_YELLOW_PRIO_1 29
+#define SYS_COUNT_RX_YELLOW_PRIO_2 30
+#define SYS_COUNT_RX_YELLOW_PRIO_3 31
+#define SYS_COUNT_RX_YELLOW_PRIO_4 32
+#define SYS_COUNT_RX_YELLOW_PRIO_5 33
+#define SYS_COUNT_RX_YELLOW_PRIO_6 34
+#define SYS_COUNT_RX_YELLOW_PRIO_7 35
+#define SYS_COUNT_RX_GREEN_PRIO_0 36
+#define SYS_COUNT_RX_GREEN_PRIO_1 37
+#define SYS_COUNT_RX_GREEN_PRIO_2 38
+#define SYS_COUNT_RX_GREEN_PRIO_3 39
+#define SYS_COUNT_RX_GREEN_PRIO_4 40
+#define SYS_COUNT_RX_GREEN_PRIO_5 41
+#define SYS_COUNT_RX_GREEN_PRIO_6 42
+#define SYS_COUNT_RX_GREEN_PRIO_7 43
+#define SYS_COUNT_RX_ASSEMBLY_ERR 44
+#define SYS_COUNT_RX_SMD_ERR 45
+#define SYS_COUNT_RX_ASSEMBLY_OK 46
+#define SYS_COUNT_RX_MERGE_FRAG 47
+#define SYS_COUNT_RX_PMAC_OCT 48
+#define SYS_COUNT_RX_PMAC_UC 49
+#define SYS_COUNT_RX_PMAC_MC 50
+#define SYS_COUNT_RX_PMAC_BC 51
+#define SYS_COUNT_RX_PMAC_SHORT 52
+#define SYS_COUNT_RX_PMAC_FRAG 53
+#define SYS_COUNT_RX_PMAC_JABBER 54
+#define SYS_COUNT_RX_PMAC_CRC 55
+#define SYS_COUNT_RX_PMAC_SYMBOL_ERR 56
+#define SYS_COUNT_RX_PMAC_SZ_64 57
+#define SYS_COUNT_RX_PMAC_SZ_65_127 58
+#define SYS_COUNT_RX_PMAC_SZ_128_255 59
+#define SYS_COUNT_RX_PMAC_SZ_256_511 60
+#define SYS_COUNT_RX_PMAC_SZ_512_1023 61
+#define SYS_COUNT_RX_PMAC_SZ_1024_1526 62
+#define SYS_COUNT_RX_PMAC_SZ_JUMBO 63
+#define SYS_COUNT_RX_PMAC_PAUSE 64
+#define SYS_COUNT_RX_PMAC_CONTROL 65
+#define SYS_COUNT_RX_PMAC_LONG 66
+
+#define SYS_COUNT_TX_OCT 67
+#define SYS_COUNT_TX_UC 68
+#define SYS_COUNT_TX_MC 69
+#define SYS_COUNT_TX_BC 70
+#define SYS_COUNT_TX_COL 71
+#define SYS_COUNT_TX_DROP 72
+#define SYS_COUNT_TX_PAUSE 73
+#define SYS_COUNT_TX_SZ_64 74
+#define SYS_COUNT_TX_SZ_65_127 75
+#define SYS_COUNT_TX_SZ_128_255 76
+#define SYS_COUNT_TX_SZ_256_511 77
+#define SYS_COUNT_TX_SZ_512_1023 78
+#define SYS_COUNT_TX_SZ_1024_1526 79
+#define SYS_COUNT_TX_SZ_JUMBO 80
+#define SYS_COUNT_TX_YELLOW_PRIO_0 81
+#define SYS_COUNT_TX_YELLOW_PRIO_1 82
+#define SYS_COUNT_TX_YELLOW_PRIO_2 83
+#define SYS_COUNT_TX_YELLOW_PRIO_3 84
+#define SYS_COUNT_TX_YELLOW_PRIO_4 85
+#define SYS_COUNT_TX_YELLOW_PRIO_5 86
+#define SYS_COUNT_TX_YELLOW_PRIO_6 87
+#define SYS_COUNT_TX_YELLOW_PRIO_7 88
+#define SYS_COUNT_TX_GREEN_PRIO_0 89
+#define SYS_COUNT_TX_GREEN_PRIO_1 90
+#define SYS_COUNT_TX_GREEN_PRIO_2 91
+#define SYS_COUNT_TX_GREEN_PRIO_3 92
+#define SYS_COUNT_TX_GREEN_PRIO_4 93
+#define SYS_COUNT_TX_GREEN_PRIO_5 94
+#define SYS_COUNT_TX_GREEN_PRIO_6 95
+#define SYS_COUNT_TX_GREEN_PRIO_7 96
+#define SYS_COUNT_TX_AGED 97
+#define SYS_COUNT_TX_LLCT 98
+#define SYS_COUNT_TX_CT 99
+#define SYS_COUNT_TX_MM_HOLD 100
+#define SYS_COUNT_TX_MERGE_FRAG 101
+#define SYS_COUNT_TX_PMAC_OCT 102
+#define SYS_COUNT_TX_PMAC_UC 103
+#define SYS_COUNT_TX_PMAC_MC 104
+#define SYS_COUNT_TX_PMAC_BC 105
+#define SYS_COUNT_TX_PMAC_PAUSE 106
+#define SYS_COUNT_TX_PMAC_SZ_64 107
+#define SYS_COUNT_TX_PMAC_SZ_65_127 108
+#define SYS_COUNT_TX_PMAC_SZ_128_255 109
+#define SYS_COUNT_TX_PMAC_SZ_256_511 110
+#define SYS_COUNT_TX_PMAC_SZ_512_1023 111
+#define SYS_COUNT_TX_PMAC_SZ_1024_1526 112
+#define SYS_COUNT_TX_PMAC_SZ_JUMBO 113
+
+#define SYS_COUNT_DR_LOCAL 114
+#define SYS_COUNT_DR_TAIL 115
+#define SYS_COUNT_DR_YELLOW_PRIO_0 116
+#define SYS_COUNT_DR_YELLOW_PRIO_1 117
+#define SYS_COUNT_DR_YELLOW_PRIO_2 118
+#define SYS_COUNT_DR_YELLOW_PRIO_3 119
+#define SYS_COUNT_DR_YELLOW_PRIO_4 120
+#define SYS_COUNT_DR_YELLOW_PRIO_5 121
+#define SYS_COUNT_DR_YELLOW_PRIO_6 122
+#define SYS_COUNT_DR_YELLOW_PRIO_7 123
+#define SYS_COUNT_DR_GREEN_PRIO_0 124
+#define SYS_COUNT_DR_GREEN_PRIO_1 125
+#define SYS_COUNT_DR_GREEN_PRIO_2 126
+#define SYS_COUNT_DR_GREEN_PRIO_3 127
+#define SYS_COUNT_DR_GREEN_PRIO_4 128
+#define SYS_COUNT_DR_GREEN_PRIO_5 129
+#define SYS_COUNT_DR_GREEN_PRIO_6 130
+#define SYS_COUNT_DR_GREEN_PRIO_7 131
+
+/* Add a possibly wrapping 32 bit value to a 64 bit counter */
+static void lan966x_add_cnt(u64 *cnt, u32 val)
+{
+ if (val < (*cnt & U32_MAX))
+ *cnt += (u64)1 << 32; /* value has wrapped */
+
+ *cnt = (*cnt & ~(u64)U32_MAX) + val;
+}
+
+static void lan966x_stats_update(struct lan966x *lan966x)
+{
+ int i, j;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ uint idx = i * lan966x->num_stats;
+
+ lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(i),
+ lan966x, SYS_STAT_CFG);
+
+ for (j = 0; j < lan966x->num_stats; j++) {
+ u32 offset = lan966x->stats_layout[j].offset;
+
+ lan966x_add_cnt(&lan966x->stats[idx++],
+ lan_rd(lan966x, SYS_CNT(offset)));
+ }
+ }
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static int lan966x_get_sset_count(struct net_device *dev, int sset)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return lan966x->num_stats;
+}
+
+static void lan966x_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct lan966x_port *port = netdev_priv(netdev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < lan966x->num_stats; i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ lan966x->stats_layout[i].name, ETH_GSTRING_LEN);
+}
+
+static void lan966x_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int i;
+
+ /* check and update now */
+ lan966x_stats_update(lan966x);
+
+ /* Copy all counters */
+ for (i = 0; i < lan966x->num_stats; i++)
+ *data++ = lan966x->stats[port->chip_port *
+ lan966x->num_stats + i];
+}
+
+static void lan966x_get_eth_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ mac_stats->FramesTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_UC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->SingleCollisionFrames =
+ lan966x->stats[idx + SYS_COUNT_TX_COL];
+ mac_stats->MultipleCollisionFrames = 0;
+ mac_stats->FramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_UC] +
+ lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->FrameCheckSequenceErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC];
+ mac_stats->AlignmentErrors = 0;
+ mac_stats->OctetsTransmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+ mac_stats->FramesWithDeferredXmissions =
+ lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
+ mac_stats->LateCollisions = 0;
+ mac_stats->FramesAbortedDueToXSColls = 0;
+ mac_stats->FramesLostDueToIntMACXmitError = 0;
+ mac_stats->CarrierSenseErrors = 0;
+ mac_stats->OctetsReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_OCT];
+ mac_stats->FramesLostDueToIntMACRcvError = 0;
+ mac_stats->MulticastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_MC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
+ mac_stats->BroadcastFramesXmittedOK =
+ lan966x->stats[idx + SYS_COUNT_TX_BC] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
+ mac_stats->FramesWithExcessiveDeferral = 0;
+ mac_stats->MulticastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_MC];
+ mac_stats->BroadcastFramesReceivedOK =
+ lan966x->stats[idx + SYS_COUNT_RX_BC];
+ mac_stats->InRangeLengthErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_CRC];
+ mac_stats->OutOfRangeLengthField =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ mac_stats->FrameTooLongErrors =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+static const struct ethtool_rmon_hist_range lan966x_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {}
+};
+
+static void lan966x_get_eth_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+
+ lan966x_stats_update(lan966x);
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ rmon_stats->undersize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT];
+ rmon_stats->oversize_pkts =
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_LONG];
+ rmon_stats->fragments =
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG];
+ rmon_stats->jabbers =
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER];
+ rmon_stats->hist[0] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64];
+ rmon_stats->hist[1] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127];
+ rmon_stats->hist[2] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255];
+ rmon_stats->hist[3] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511];
+ rmon_stats->hist[4] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023];
+ rmon_stats->hist[5] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+ rmon_stats->hist[6] =
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526];
+
+ rmon_stats->hist_tx[0] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64];
+ rmon_stats->hist_tx[1] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127];
+ rmon_stats->hist_tx[2] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255];
+ rmon_stats->hist_tx[3] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511];
+ rmon_stats->hist_tx[4] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023];
+ rmon_stats->hist_tx[5] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+ rmon_stats->hist_tx[6] =
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526];
+
+ mutex_unlock(&lan966x->stats_lock);
+
+ *ranges = lan966x_rmon_ranges;
+}
+
+static int lan966x_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(port->phylink, cmd);
+}
+
+static int lan966x_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct lan966x_port *port = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(port->phylink, cmd);
+}
+
+static void lan966x_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(port->phylink, pause);
+}
+
+static int lan966x_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(port->phylink, pause);
+}
+
+const struct ethtool_ops lan966x_ethtool_ops = {
+ .get_link_ksettings = lan966x_get_link_ksettings,
+ .set_link_ksettings = lan966x_set_link_ksettings,
+ .get_pauseparam = lan966x_get_pauseparam,
+ .set_pauseparam = lan966x_set_pauseparam,
+ .get_sset_count = lan966x_get_sset_count,
+ .get_strings = lan966x_get_strings,
+ .get_ethtool_stats = lan966x_get_ethtool_stats,
+ .get_eth_mac_stats = lan966x_get_eth_mac_stats,
+ .get_rmon_stats = lan966x_get_eth_rmon_stats,
+ .get_link = ethtool_op_get_link,
+};
+
+static void lan966x_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct lan966x *lan966x = container_of(del_work, struct lan966x,
+ stats_work);
+
+ lan966x_stats_update(lan966x);
+
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+}
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 idx;
+ int i;
+
+ idx = port->chip_port * lan966x->num_stats;
+
+ mutex_lock(&lan966x->stats_lock);
+
+ stats->rx_bytes = lan966x->stats[idx + SYS_COUNT_RX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_OCT];
+
+ stats->rx_packets = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_SZ_JUMBO];
+
+ stats->multicast = lan966x->stats[idx + SYS_COUNT_RX_MC] +
+ lan966x->stats[idx + SYS_COUNT_RX_PMAC_MC];
+
+ stats->rx_errors = lan966x->stats[idx + SYS_COUNT_RX_SHORT] +
+ lan966x->stats[idx + SYS_COUNT_RX_FRAG] +
+ lan966x->stats[idx + SYS_COUNT_RX_JABBER] +
+ lan966x->stats[idx + SYS_COUNT_RX_CRC] +
+ lan966x->stats[idx + SYS_COUNT_RX_SYMBOL_ERR] +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG];
+
+ stats->rx_dropped = dev->stats.rx_dropped +
+ lan966x->stats[idx + SYS_COUNT_RX_LONG] +
+ lan966x->stats[idx + SYS_COUNT_DR_LOCAL] +
+ lan966x->stats[idx + SYS_COUNT_DR_TAIL];
+
+ for (i = 0; i < LAN966X_NUM_TC; i++) {
+ stats->rx_dropped +=
+ (lan966x->stats[idx + SYS_COUNT_DR_YELLOW_PRIO_0 + i] +
+ lan966x->stats[idx + SYS_COUNT_DR_GREEN_PRIO_0 + i]);
+ }
+
+ /* Get Tx stats */
+ stats->tx_bytes = lan966x->stats[idx + SYS_COUNT_TX_OCT] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
+
+ stats->tx_packets = lan966x->stats[idx + SYS_COUNT_TX_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_SZ_JUMBO] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_64] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_65_127] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_128_255] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_256_511] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_512_1023] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_1024_1526] +
+ lan966x->stats[idx + SYS_COUNT_TX_PMAC_SZ_JUMBO];
+
+ stats->tx_dropped = lan966x->stats[idx + SYS_COUNT_TX_DROP] +
+ lan966x->stats[idx + SYS_COUNT_TX_AGED];
+
+ stats->collisions = lan966x->stats[idx + SYS_COUNT_TX_COL];
+
+ mutex_unlock(&lan966x->stats_lock);
+}
+
+int lan966x_stats_init(struct lan966x *lan966x)
+{
+ char queue_name[32];
+
+ lan966x->stats_layout = lan966x_stats_layout;
+ lan966x->num_stats = ARRAY_SIZE(lan966x_stats_layout);
+ lan966x->stats = devm_kcalloc(lan966x->dev, lan966x->num_phys_ports *
+ lan966x->num_stats,
+ sizeof(u64), GFP_KERNEL);
+ if (!lan966x->stats)
+ return -ENOMEM;
+
+ /* Init stats worker */
+ mutex_init(&lan966x->stats_lock);
+ snprintf(queue_name, sizeof(queue_name), "%s-stats",
+ dev_name(lan966x->dev));
+ lan966x->stats_queue = create_singlethread_workqueue(queue_name);
+ INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work);
+ queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work,
+ LAN966X_STATS_CHECK_DELAY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
new file mode 100644
index 000000000000..da5ca7188679
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ struct lan966x *lan966x;
+ unsigned long event;
+};
+
+struct lan966x_fdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u32 references;
+};
+
+static struct lan966x_fdb_entry *
+lan966x_fdb_find_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr))
+ return fdb_entry;
+ }
+
+ return NULL;
+}
+
+static void lan966x_fdb_add_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ fdb_entry = lan966x_fdb_find_entry(lan966x, fdb_info);
+ if (fdb_entry) {
+ fdb_entry->references++;
+ return;
+ }
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry)
+ return;
+
+ ether_addr_copy(fdb_entry->mac, fdb_info->addr);
+ fdb_entry->vid = fdb_info->vid;
+ fdb_entry->references = 1;
+ list_add_tail(&fdb_entry->list, &lan966x->fdb_entries);
+}
+
+static bool lan966x_fdb_del_entry(struct lan966x *lan966x,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries,
+ list) {
+ if (fdb_entry->vid == fdb_info->vid &&
+ ether_addr_equal(fdb_entry->mac, fdb_info->addr)) {
+ fdb_entry->references--;
+ if (!fdb_entry->references) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ return true;
+ }
+ break;
+ }
+ }
+
+ return false;
+}
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_fdb_entry *fdb_entry;
+
+ list_for_each_entry(fdb_entry, &lan966x->fdb_entries, list) {
+ if (fdb_entry->vid != vid)
+ continue;
+
+ lan966x_mac_cpu_forget(lan966x, fdb_entry->mac, fdb_entry->vid);
+ }
+}
+
+static void lan966x_fdb_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_fdb_entry *fdb_entry, *tmp;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &lan966x->fdb_entries, list) {
+ list_del(&fdb_entry->list);
+ kfree(fdb_entry);
+ }
+}
+
+int lan966x_fdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->fdb_entries);
+ lan966x->fdb_work = alloc_ordered_workqueue("lan966x_order", 0);
+ if (!lan966x->fdb_work)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void lan966x_fdb_deinit(struct lan966x *lan966x)
+{
+ destroy_workqueue(lan966x->fdb_work);
+ lan966x_fdb_purge_entries(lan966x);
+}
+
+static void lan966x_fdb_event_work(struct work_struct *work)
+{
+ struct lan966x_fdb_event_work *fdb_work =
+ container_of(work, struct lan966x_fdb_event_work, work);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct net_device *dev = fdb_work->dev;
+ struct lan966x_port *port;
+ struct lan966x *lan966x;
+ int ret;
+
+ fdb_info = &fdb_work->fdb_info;
+ lan966x = fdb_work->lan966x;
+
+ if (lan966x_netdevice_check(dev)) {
+ port = netdev_priv(dev);
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_add_entry(lan966x, port, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (!fdb_info->added_by_user)
+ break;
+ lan966x_mac_del_entry(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ } else {
+ if (!netif_is_bridge_master(dev))
+ goto out;
+
+ /* In case the bridge is called */
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ /* If there is no front port in this vlan, there is no
+ * point to copy the frame to CPU because it would be
+ * just dropped at later point. So add it only if
+ * there is a port but it is required to store the fdb
+ * entry for later point when a port actually gets in
+ * the vlan.
+ */
+ lan966x_fdb_add_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ lan966x_mac_cpu_learn(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ret = lan966x_fdb_del_entry(lan966x, fdb_info);
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x,
+ fdb_info->vid))
+ break;
+
+ if (ret)
+ lan966x_mac_cpu_forget(lan966x, fdb_info->addr,
+ fdb_info->vid);
+ break;
+ }
+ }
+
+out:
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+ dev_put(dev);
+}
+
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_fdb_event_work *fdb_work;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ if (lan966x_netdevice_check(orig_dev) &&
+ !fdb_info->added_by_user)
+ break;
+
+ fdb_work = kzalloc(sizeof(*fdb_work), GFP_ATOMIC);
+ if (!fdb_work)
+ return -ENOMEM;
+
+ fdb_work->dev = orig_dev;
+ fdb_work->lan966x = lan966x;
+ fdb_work->event = event;
+ INIT_WORK(&fdb_work->work, lan966x_fdb_event_work);
+ memcpy(&fdb_work->fdb_info, fdb_info, sizeof(fdb_work->fdb_info));
+ fdb_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!fdb_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)fdb_work->fdb_info.addr, fdb_info->addr);
+ dev_hold(orig_dev);
+
+ queue_work(lan966x->fdb_work, &fdb_work->work);
+ break;
+ }
+
+ return 0;
+err_addr_alloc:
+ kfree(fdb_work);
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
new file mode 100644
index 000000000000..ca3314789d18
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_IFH_H__
+#define __LAN966X_IFH_H__
+
+/* Fields with description (*) should just be cleared upon injection
+ * IFH is transmitted MSByte first (Highest bit pos sent as MSB of first byte)
+ */
+
+#define IFH_LEN 7
+
+/* Timestamp for frame */
+#define IFH_POS_TIMESTAMP 192
+
+/* Bypass analyzer with a prefilled IFH */
+#define IFH_POS_BYPASS 191
+
+/* Masqueraded injection with masq_port defining logical source port */
+#define IFH_POS_MASQ 190
+
+/* Masqueraded port number for injection */
+#define IFH_POS_MASQ_PORT 186
+
+/* Frame length (*) */
+#define IFH_POS_LEN 178
+
+/* Cell filling mode. Full(0),Etype(1), LlctOpt(2), Llct(3) */
+#define IFH_POS_WRDMODE 176
+
+/* Frame has 16 bits rtag removed compared to line data */
+#define IFH_POS_RTAG48 175
+
+/* Frame has a redundancy tag */
+#define IFH_POS_HAS_RED_TAG 174
+
+/* Frame has been cut through forwarded (*) */
+#define IFH_POS_CUTTHRU 173
+
+/* Rewriter command */
+#define IFH_POS_REW_CMD 163
+
+/* Enable OAM-related rewriting. PDU_TYPE encodes OAM type. */
+#define IFH_POS_REW_OAM 162
+
+/* PDU type. Encoding: (0-NONE, 1-Y1731_CCM, 2-MRP_TST, 3-MRP_ITST, 4-DLR_BCN,
+ * 5-DLR_ADV, 6-RTE_NULL_INJ, 7-IPV4, 8-IPV6, 9-Y1731_NON_CCM).
+ */
+#define IFH_POS_PDU_TYPE 158
+
+/* Update FCS before transmission */
+#define IFH_POS_FCS_UPD 157
+
+/* Classified DSCP value of frame */
+#define IFH_POS_DSCP 151
+
+/* Yellow indication */
+#define IFH_POS_DP 150
+
+/* Process in RTE/inbound */
+#define IFH_POS_RTE_INB_UPDATE 149
+
+/* Number of tags to pop from frame */
+#define IFH_POS_POP_CNT 147
+
+/* Number of tags in front of the ethertype */
+#define IFH_POS_ETYPE_OFS 145
+
+/* Logical source port of frame (*) */
+#define IFH_POS_SRCPORT 141
+
+/* Sequence number in redundancy tag */
+#define IFH_POS_SEQ_NUM 120
+
+/* Stagd flag and classified TCI of frame (PCP/DEI/VID) */
+#define IFH_POS_TCI 103
+
+/* Classified internal priority for queuing */
+#define IFH_POS_QOS_CLASS 100
+
+/* Bit mask with eight cpu copy classses */
+#define IFH_POS_CPUQ 92
+
+/* Relearn + learn flags (*) */
+#define IFH_POS_LEARN_FLAGS 90
+
+/* SFLOW identifier for frame (0-8: Tx port, 9: Rx sampling, 15: No sampling) */
+#define IFH_POS_SFLOW_ID 86
+
+/* Set if an ACL/S2 rule was hit (*).
+ * Super priority: acl_hit=0 and acl_hit(4)=1.
+ */
+#define IFH_POS_ACL_HIT 85
+
+/* S2 rule index hit (*) */
+#define IFH_POS_ACL_IDX 79
+
+/* ISDX as classified by S1 */
+#define IFH_POS_ISDX 71
+
+/* Destination ports for frame */
+#define IFH_POS_DSTS 62
+
+/* Storm policer to be applied: None/Uni/Multi/Broad (*) */
+#define IFH_POS_FLOOD 60
+
+/* Redundancy tag operation */
+#define IFH_POS_SEQ_OP 58
+
+/* Classified internal priority for resourcemgt, tagging etc */
+#define IFH_POS_IPV 55
+
+/* Frame is for AFI use */
+#define IFH_POS_AFI 54
+
+/* Internal aging value (*) */
+#define IFH_POS_AGED 52
+
+/* RTP Identifier */
+#define IFH_POS_RTP_ID 42
+
+/* RTP MRPD flow */
+#define IFH_POS_RTP_SUBID 41
+
+/* Profinet DataStatus or opcua GroupVersion MSB */
+#define IFH_POS_PN_DATA_STATUS 33
+
+/* Profinet transfer status (1 iff the status is 0) */
+#define IFH_POS_PN_TRANSF_STATUS_ZERO 32
+
+/* Profinet cycle counter or opcua NetworkMessageNumber */
+#define IFH_POS_PN_CC 16
+
+#define IFH_WID_TIMESTAMP 32
+#define IFH_WID_BYPASS 1
+#define IFH_WID_MASQ 1
+#define IFH_WID_MASQ_PORT 4
+#define IFH_WID_LEN 14
+#define IFH_WID_WRDMODE 2
+#define IFH_WID_RTAG48 1
+#define IFH_WID_HAS_RED_TAG 1
+#define IFH_WID_CUTTHRU 1
+#define IFH_WID_REW_CMD 10
+#define IFH_WID_REW_OAM 1
+#define IFH_WID_PDU_TYPE 4
+#define IFH_WID_FCS_UPD 1
+#define IFH_WID_DSCP 6
+#define IFH_WID_DP 1
+#define IFH_WID_RTE_INB_UPDATE 1
+#define IFH_WID_POP_CNT 2
+#define IFH_WID_ETYPE_OFS 2
+#define IFH_WID_SRCPORT 4
+#define IFH_WID_SEQ_NUM 16
+#define IFH_WID_TCI 17
+#define IFH_WID_QOS_CLASS 3
+#define IFH_WID_CPUQ 8
+#define IFH_WID_LEARN_FLAGS 2
+#define IFH_WID_SFLOW_ID 4
+#define IFH_WID_ACL_HIT 1
+#define IFH_WID_ACL_IDX 6
+#define IFH_WID_ISDX 8
+#define IFH_WID_DSTS 9
+#define IFH_WID_FLOOD 2
+#define IFH_WID_SEQ_OP 2
+#define IFH_WID_IPV 3
+#define IFH_WID_AFI 1
+#define IFH_WID_AGED 2
+#define IFH_WID_RTP_ID 10
+#define IFH_WID_RTP_SUBID 1
+#define IFH_WID_PN_DATA_STATUS 8
+#define IFH_WID_PN_TRANSF_STATUS_ZERO 1
+#define IFH_WID_PN_CC 16
+
+#endif /* __LAN966X_IFH_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
new file mode 100644
index 000000000000..ce5970bdcc6a
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+#include "lan966x_main.h"
+
+#define LAN966X_MAC_COLUMNS 4
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+#define MACACCESS_CMD_SYNC_GET_NEXT 8
+
+#define LAN966X_MAC_INVALID_ROW -1
+
+struct lan966x_mac_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u16 vid;
+ u16 port_index;
+ int row;
+};
+
+struct lan966x_mac_raw_entry {
+ u32 mach;
+ u32 macl;
+ u32 maca;
+ bool processed;
+};
+
+static int lan966x_mac_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_MACACCESS);
+}
+
+static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_mac_select(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid)
+{
+ u32 macl = 0, mach = 0;
+
+ /* Set the MAC address to handle and the vlan associated in a format
+ * understood by the hardware.
+ */
+ mach |= vid << 16;
+ mach |= mac[0] << 8;
+ mach |= mac[1] << 0;
+ macl |= mac[2] << 24;
+ macl |= mac[3] << 16;
+ macl |= mac[4] << 8;
+ macl |= mac[5] << 0;
+
+ lan_wr(macl, lan966x, ANA_MACLDATA);
+ lan_wr(mach, lan966x, ANA_MACHDATA);
+}
+
+static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a write command */
+ lan_wr(ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_CHANGE2SW_SET(0) |
+ ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
+ ANA_MACACCESS_DEST_IDX_SET(pgid) |
+ ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+/* The mask of the front ports is encoded inside the mac parameter via a call
+ * to lan966x_mdb_encode_mac().
+ */
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
+
+ return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
+}
+
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
+
+ return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
+}
+
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type)
+{
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a forget command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
+ lan966x, ANA_MACACCESS);
+
+ return lan966x_mac_wait_for_completion(lan966x);
+}
+
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
+{
+ return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
+}
+
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing)
+{
+ lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
+ ANA_AUTOAGE_AGE_PERIOD,
+ lan966x, ANA_AUTOAGE);
+}
+
+void lan966x_mac_init(struct lan966x *lan966x)
+{
+ /* Clear the MAC table */
+ lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ spin_lock_init(&lan966x->mac_lock);
+ INIT_LIST_HEAD(&lan966x->mac_entries);
+}
+
+static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL);
+ if (!mac_entry)
+ return NULL;
+
+ memcpy(mac_entry->mac, mac, ETH_ALEN);
+ mac_entry->vid = vid;
+ mac_entry->port_index = port_index;
+ mac_entry->row = LAN966X_MAC_INVALID_ROW;
+ return mac_entry;
+}
+
+static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid, u16 port_index)
+{
+ struct lan966x_mac_entry *res = NULL;
+ struct lan966x_mac_entry *mac_entry;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac, mac_entry->mac) &&
+ mac_entry->port_index == port_index) {
+ res = mac_entry;
+ break;
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return res;
+}
+
+static int lan966x_mac_lookup(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid, enum macaccess_entry_type type)
+{
+ int ret;
+
+ lan966x_mac_select(lan966x, mac, vid);
+
+ /* Issue a read command */
+ lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
+ ANA_MACACCESS_VALID_SET(1) |
+ ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
+ lan966x, ANA_MACACCESS);
+
+ ret = lan966x_mac_wait_for_completion(lan966x);
+ if (ret)
+ return ret;
+
+ return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
+}
+
+static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
+ const char *mac, u16 vid,
+ struct net_device *dev)
+{
+ struct switchdev_notifier_fdb_info info = { 0 };
+
+ info.addr = mac;
+ info.vid = vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(type, dev, &info.info, NULL);
+}
+
+int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
+ const unsigned char *addr, u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry;
+
+ if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL))
+ return 0;
+
+ /* In case the entry already exists, don't add it again to SW,
+ * just update HW, but we need to look in the actual HW because
+ * it is possible for an entry to be learn by HW and before we
+ * get the interrupt the frame will reach CPU and the CPU will
+ * add the entry but without the extern_learn flag.
+ */
+ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
+ if (mac_entry)
+ return lan966x_mac_learn(lan966x, port->chip_port,
+ addr, vid, ENTRYTYPE_LOCKED);
+
+ mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port);
+ if (!mac_entry)
+ return -ENOMEM;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
+ lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+
+ return 0;
+}
+
+int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
+ u16 vid)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(addr, mac_entry->mac)) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ return 0;
+}
+
+void lan966x_mac_purge_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
+ list) {
+ lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid,
+ ENTRYTYPE_LOCKED);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ spin_unlock(&lan966x->mac_lock);
+}
+
+static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
+ unsigned char *mac, u32 vid,
+ struct net_device *dev)
+{
+ rtnl_lock();
+ lan966x_fdb_call_notifiers(type, mac, vid, dev);
+ rtnl_unlock();
+}
+
+static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
+ u8 *mac, u16 *vid, u32 *dest_idx)
+{
+ mac[0] = (raw_entry->mach >> 8) & 0xff;
+ mac[1] = (raw_entry->mach >> 0) & 0xff;
+ mac[2] = (raw_entry->macl >> 24) & 0xff;
+ mac[3] = (raw_entry->macl >> 16) & 0xff;
+ mac[4] = (raw_entry->macl >> 8) & 0xff;
+ mac[5] = (raw_entry->macl >> 0) & 0xff;
+
+ *vid = (raw_entry->mach >> 16) & 0xfff;
+ *dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
+}
+
+static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
+ struct lan966x_mac_raw_entry *raw_entries)
+{
+ struct lan966x_mac_entry *mac_entry, *tmp;
+ unsigned char mac[ETH_ALEN] __aligned(2);
+ u32 dest_idx;
+ u32 column;
+ u16 vid;
+
+ spin_lock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
+ bool found = false;
+
+ if (mac_entry->row != row)
+ continue;
+
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row,
+ * so when get one invalid entry it can just skip the
+ * rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ /* If the entry in SW is found, then there is nothing
+ * to do
+ */
+ if (mac_entry->vid == vid &&
+ ether_addr_equal(mac_entry->mac, mac) &&
+ mac_entry->port_index == dest_idx) {
+ raw_entries[column].processed = true;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* Notify the bridge that the entry doesn't exist
+ * anymore in the HW and remove the entry from the SW
+ * list
+ */
+ lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ mac_entry->mac, mac_entry->vid,
+ lan966x->ports[mac_entry->port_index]->dev);
+
+ list_del(&mac_entry->list);
+ kfree(mac_entry);
+ }
+ }
+ spin_unlock(&lan966x->mac_lock);
+
+ /* Now go to the list of columns and see if any entry was not in the SW
+ * list, then that means that the entry is new so it needs to notify the
+ * bridge.
+ */
+ for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
+ /* All the valid entries are at the start of the row, so when
+ * get one invalid entry it can just skip the rest of the columns
+ */
+ if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
+ break;
+
+ /* If the entry already exists then don't do anything */
+ if (raw_entries[column].processed)
+ continue;
+
+ lan966x_mac_process_raw_entry(&raw_entries[column],
+ mac, &vid, &dest_idx);
+ WARN_ON(dest_idx > lan966x->num_phys_ports);
+
+ mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
+ if (!mac_entry)
+ return;
+
+ mac_entry->row = row;
+
+ spin_lock(&lan966x->mac_lock);
+ list_add_tail(&mac_entry->list, &lan966x->mac_entries);
+ spin_unlock(&lan966x->mac_lock);
+
+ lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ mac, vid, lan966x->ports[dest_idx]->dev);
+ }
+}
+
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
+{
+ struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
+ u32 index, column;
+ bool stop = true;
+ u32 val;
+
+ /* Start the scan from 0, 0 */
+ lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
+ ANA_MACTINDX_BUCKET_SET(0),
+ lan966x, ANA_MACTINDX);
+
+ while (1) {
+ lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
+ ANA_MACACCESS_MAC_TABLE_CMD,
+ lan966x, ANA_MACACCESS);
+ lan966x_mac_wait_for_completion(lan966x);
+
+ val = lan_rd(lan966x, ANA_MACTINDX);
+ index = ANA_MACTINDX_M_INDEX_GET(val);
+ column = ANA_MACTINDX_BUCKET_GET(val);
+
+ /* The SYNC-GET-NEXT returns all the entries(4) in a row in
+ * which is suffered a change. By change it means that new entry
+ * was added or an entry was removed because of ageing.
+ * It would return all the columns for that row. And after that
+ * it would return the next row The stop conditions of the
+ * SYNC-GET-NEXT is when it reaches 'directly' to row 0
+ * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
+ * then it is required to continue to read more even if it
+ * reaches row 0 and column 3.
+ */
+ if (index == 0 && column == 0)
+ stop = false;
+
+ if (column == LAN966X_MAC_COLUMNS - 1 &&
+ index == 0 && stop)
+ break;
+
+ entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
+ entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
+ entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
+
+ /* Once all the columns are read process them */
+ if (column == LAN966X_MAC_COLUMNS - 1) {
+ lan966x_mac_irq_process(lan966x, index, entry);
+ /* A row was processed so it is safe to assume that the
+ * next row/column can be the stop condition
+ */
+ stop = true;
+ }
+ }
+
+ lan_rmw(ANA_ANAINTR_INTR_SET(0),
+ ANA_ANAINTR_INTR,
+ lan966x, ANA_ANAINTR);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
new file mode 100644
index 000000000000..1f60fd125a1d
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -0,0 +1,1002 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/packing.h>
+#include <linux/phy/phy.h>
+#include <linux/reset.h>
+
+#include "lan966x_main.h"
+
+#define XTR_EOF_0 0x00000080U
+#define XTR_EOF_1 0x01000080U
+#define XTR_EOF_2 0x02000080U
+#define XTR_EOF_3 0x03000080U
+#define XTR_PRUNED 0x04000080U
+#define XTR_ABORT 0x05000080U
+#define XTR_ESCAPE 0x06000080U
+#define XTR_NOT_READY 0x07000080U
+#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3))
+
+#define READL_SLEEP_US 10
+#define READL_TIMEOUT_US 100000000
+
+#define IO_RANGES 2
+
+static const struct of_device_id lan966x_match[] = {
+ { .compatible = "microchip,lan966x-switch" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_match);
+
+struct lan966x_main_io_resource {
+ enum lan966x_target id;
+ phys_addr_t offset;
+ int range;
+};
+
+static const struct lan966x_main_io_resource lan966x_main_iomap[] = {
+ { TARGET_CPU, 0xc0000, 0 }, /* 0xe00c0000 */
+ { TARGET_ORG, 0, 1 }, /* 0xe2000000 */
+ { TARGET_GCB, 0x4000, 1 }, /* 0xe2004000 */
+ { TARGET_QS, 0x8000, 1 }, /* 0xe2008000 */
+ { TARGET_CHIP_TOP, 0x10000, 1 }, /* 0xe2010000 */
+ { TARGET_REW, 0x14000, 1 }, /* 0xe2014000 */
+ { TARGET_SYS, 0x28000, 1 }, /* 0xe2028000 */
+ { TARGET_DEV, 0x34000, 1 }, /* 0xe2034000 */
+ { TARGET_DEV + 1, 0x38000, 1 }, /* 0xe2038000 */
+ { TARGET_DEV + 2, 0x3c000, 1 }, /* 0xe203c000 */
+ { TARGET_DEV + 3, 0x40000, 1 }, /* 0xe2040000 */
+ { TARGET_DEV + 4, 0x44000, 1 }, /* 0xe2044000 */
+ { TARGET_DEV + 5, 0x48000, 1 }, /* 0xe2048000 */
+ { TARGET_DEV + 6, 0x4c000, 1 }, /* 0xe204c000 */
+ { TARGET_DEV + 7, 0x50000, 1 }, /* 0xe2050000 */
+ { TARGET_QSYS, 0x100000, 1 }, /* 0xe2100000 */
+ { TARGET_AFI, 0x120000, 1 }, /* 0xe2120000 */
+ { TARGET_ANA, 0x140000, 1 }, /* 0xe2140000 */
+};
+
+static int lan966x_create_targets(struct platform_device *pdev,
+ struct lan966x *lan966x)
+{
+ struct resource *iores[IO_RANGES];
+ void __iomem *begin[IO_RANGES];
+ int idx;
+
+ /* Initially map the entire range and after that update each target to
+ * point inside the region at the correct offset. It is possible that
+ * other devices access the same region so don't add any checks about
+ * this.
+ */
+ for (idx = 0; idx < IO_RANGES; idx++) {
+ iores[idx] = platform_get_resource(pdev, IORESOURCE_MEM,
+ idx);
+ if (!iores[idx]) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
+
+ begin[idx] = devm_ioremap(&pdev->dev,
+ iores[idx]->start,
+ resource_size(iores[idx]));
+ if (!begin[idx]) {
+ dev_err(&pdev->dev, "Unable to get registers: %s\n",
+ iores[idx]->name);
+ return -ENOMEM;
+ }
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(lan966x_main_iomap); idx++) {
+ const struct lan966x_main_io_resource *iomap =
+ &lan966x_main_iomap[idx];
+
+ lan966x->regs[iomap->id] = begin[iomap->range] + iomap->offset;
+ }
+
+ return 0;
+}
+
+static int lan966x_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct sockaddr *addr = p;
+ int ret;
+
+ /* Learn the new net device MAC address in the mac table. */
+ ret = lan966x_mac_cpu_learn(lan966x, addr->sa_data, HOST_PVID);
+ if (ret)
+ return ret;
+
+ /* Then forget the previous one. */
+ ret = lan966x_mac_cpu_forget(lan966x, dev->dev_addr, HOST_PVID);
+ if (ret)
+ return ret;
+
+ eth_hw_addr_set(dev, addr->sa_data);
+ return ret;
+}
+
+static int lan966x_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = snprintf(buf, len, "p%d", port->chip_port);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int lan966x_port_open(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ int err;
+
+ /* Enable receiving frames on the port, and activate auto-learning of
+ * MAC addresses.
+ */
+ lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) |
+ ANA_PORT_CFG_RECV_ENA_SET(1) |
+ ANA_PORT_CFG_PORTID_VAL_SET(port->chip_port),
+ ANA_PORT_CFG_LEARNAUTO |
+ ANA_PORT_CFG_RECV_ENA |
+ ANA_PORT_CFG_PORTID_VAL,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
+ if (err) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return err;
+ }
+
+ phylink_start(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_stop(struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ lan966x_port_config_down(port);
+ phylink_stop(port->phylink);
+ phylink_disconnect_phy(port->phylink);
+
+ return 0;
+}
+
+static int lan966x_port_inj_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, QS_INJ_STATUS);
+}
+
+static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
+}
+
+static int lan966x_port_ifh_xmit(struct sk_buff *skb,
+ __be32 *ifh,
+ struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ u32 i, count, last;
+ u8 grp = 0;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_INJ_STATUS);
+ if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp)) ||
+ (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)))
+ return NETDEV_TX_BUSY;
+
+ /* Write start of frame */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_SOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Write IFH header */
+ for (i = 0; i < IFH_LEN; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr((__force u32)ifh[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Write frame */
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; ++i) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr(((u32 *)skb->data)[i], lan966x, QS_INJ_WR(grp));
+ }
+
+ /* Add padding */
+ while (i < (LAN966X_BUFFER_MIN_SZ / 4)) {
+ /* Wait until the fifo is ready */
+ err = lan966x_port_inj_ready(lan966x, grp);
+ if (err)
+ return NETDEV_TX_BUSY;
+
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ ++i;
+ }
+
+ /* Inidcate EOF and valid bytes in the last word */
+ lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
+ QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
+ 0 : last) |
+ QS_INJ_CTRL_EOF_SET(1),
+ lan966x, QS_INJ_CTRL(grp));
+
+ /* Add dummy CRC */
+ lan_wr(0, lan966x, QS_INJ_WR(grp));
+ skb_tx_timestamp(skb);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
+ IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_port(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
+ IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_qos_class(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_QOS_CLASS + IFH_WID_QOS_CLASS - 1,
+ IFH_POS_QOS_CLASS, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_ipv(void *ifh, u64 bypass)
+{
+ packing(ifh, &bypass, IFH_POS_IPV + IFH_WID_IPV - 1,
+ IFH_POS_IPV, IFH_LEN * 4, PACK, 0);
+}
+
+static void lan966x_ifh_set_vid(void *ifh, u64 vid)
+{
+ packing(ifh, &vid, IFH_POS_TCI + IFH_WID_TCI - 1,
+ IFH_POS_TCI, IFH_LEN * 4, PACK, 0);
+}
+
+static int lan966x_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ __be32 ifh[IFH_LEN];
+
+ memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
+
+ lan966x_ifh_set_bypass(ifh, 1);
+ lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
+ lan966x_ifh_set_qos_class(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_ipv(ifh, skb->priority >= 7 ? 0x7 : skb->priority);
+ lan966x_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+
+ return lan966x_port_ifh_xmit(skb, ifh, dev);
+}
+
+static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu),
+ lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int lan966x_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_forget(lan966x, addr, HOST_PVID, ENTRYTYPE_LOCKED);
+}
+
+static int lan966x_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ return lan966x_mac_cpu_learn(lan966x, addr, HOST_PVID);
+}
+
+static void lan966x_port_set_rx_mode(struct net_device *dev)
+{
+ __dev_mc_sync(dev, lan966x_mc_sync, lan966x_mc_unsync);
+}
+
+static int lan966x_port_get_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ ppid->id_len = sizeof(lan966x->base_mac);
+ memcpy(&ppid->id, &lan966x->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops lan966x_port_netdev_ops = {
+ .ndo_open = lan966x_port_open,
+ .ndo_stop = lan966x_port_stop,
+ .ndo_start_xmit = lan966x_port_xmit,
+ .ndo_change_mtu = lan966x_port_change_mtu,
+ .ndo_set_rx_mode = lan966x_port_set_rx_mode,
+ .ndo_get_phys_port_name = lan966x_port_get_phys_port_name,
+ .ndo_get_stats64 = lan966x_stats_get,
+ .ndo_set_mac_address = lan966x_port_set_mac_address,
+ .ndo_get_port_parent_id = lan966x_port_get_parent_id,
+};
+
+bool lan966x_netdevice_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &lan966x_port_netdev_ops;
+}
+
+static int lan966x_port_xtr_status(struct lan966x *lan966x, u8 grp)
+{
+ return lan_rd(lan966x, QS_XTR_RD(grp));
+}
+
+static int lan966x_port_xtr_ready(struct lan966x *lan966x, u8 grp)
+{
+ u32 val;
+
+ return read_poll_timeout(lan966x_port_xtr_status, val,
+ val != XTR_NOT_READY,
+ READL_SLEEP_US, READL_TIMEOUT_US, false,
+ lan966x, grp);
+}
+
+static int lan966x_rx_frame_word(struct lan966x *lan966x, u8 grp, u32 *rval)
+{
+ u32 bytes_valid;
+ u32 val;
+ int err;
+
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_NOT_READY) {
+ err = lan966x_port_xtr_ready(lan966x, grp);
+ if (err)
+ return -EIO;
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = lan_rd(lan966x, QS_XTR_RD(grp));
+ if (val == XTR_ESCAPE)
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = lan_rd(lan966x, QS_XTR_RD(grp));
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static void lan966x_ifh_get_src_port(void *ifh, u64 *src_port)
+{
+ packing(ifh, src_port, IFH_POS_SRCPORT + IFH_WID_SRCPORT - 1,
+ IFH_POS_SRCPORT, IFH_LEN * 4, UNPACK, 0);
+}
+
+static void lan966x_ifh_get_len(void *ifh, u64 *len)
+{
+ packing(ifh, len, IFH_POS_LEN + IFH_WID_LEN - 1,
+ IFH_POS_LEN, IFH_LEN * 4, UNPACK, 0);
+}
+
+static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+ int i, grp = 0, err = 0;
+
+ if (!(lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp)))
+ return IRQ_NONE;
+
+ do {
+ struct net_device *dev;
+ struct sk_buff *skb;
+ int sz = 0, buf_len;
+ u64 src_port, len;
+ u32 ifh[IFH_LEN];
+ u32 *buf;
+ u32 val;
+
+ for (i = 0; i < IFH_LEN; i++) {
+ err = lan966x_rx_frame_word(lan966x, grp, &ifh[i]);
+ if (err != 4)
+ goto recover;
+ }
+
+ err = 0;
+
+ lan966x_ifh_get_src_port(ifh, &src_port);
+ lan966x_ifh_get_len(ifh, &len);
+
+ WARN_ON(src_port >= lan966x->num_phys_ports);
+
+ dev = lan966x->ports[src_port]->dev;
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = lan966x_rx_frame_word(lan966x, grp, &val);
+ if (sz < 0) {
+ kfree_skb(skb);
+ goto recover;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (lan966x->bridge_mask & BIT(src_port))
+ skb->offload_fwd_mark = 1;
+
+ netif_rx_ni(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+
+recover:
+ if (sz < 0 || err)
+ lan_rd(lan966x, QS_XTR_RD(grp));
+
+ } while (lan_rd(lan966x, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lan966x_ana_irq_handler(int irq, void *args)
+{
+ struct lan966x *lan966x = args;
+
+ return lan966x_mac_irq_handler(lan966x);
+}
+
+static void lan966x_cleanup_ports(struct lan966x *lan966x)
+{
+ struct lan966x_port *port;
+ int p;
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ port = lan966x->ports[p];
+ if (!port)
+ continue;
+
+ if (port->dev)
+ unregister_netdev(port->dev);
+
+ if (port->phylink) {
+ rtnl_lock();
+ lan966x_port_stop(port->dev);
+ rtnl_unlock();
+ phylink_destroy(port->phylink);
+ port->phylink = NULL;
+ }
+
+ if (port->fwnode)
+ fwnode_handle_put(port->fwnode);
+ }
+
+ disable_irq(lan966x->xtr_irq);
+ lan966x->xtr_irq = -ENXIO;
+
+ if (lan966x->ana_irq) {
+ disable_irq(lan966x->ana_irq);
+ lan966x->ana_irq = -ENXIO;
+ }
+}
+
+static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
+ phy_interface_t phy_mode,
+ struct fwnode_handle *portnp)
+{
+ struct lan966x_port *port;
+ struct phylink *phylink;
+ struct net_device *dev;
+ int err;
+
+ if (p >= lan966x->num_phys_ports)
+ return -EINVAL;
+
+ dev = devm_alloc_etherdev_mqs(lan966x->dev,
+ sizeof(struct lan966x_port), 8, 1);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, lan966x->dev);
+ port = netdev_priv(dev);
+ port->dev = dev;
+ port->lan966x = lan966x;
+ port->chip_port = p;
+ lan966x->ports[p] = port;
+
+ dev->max_mtu = ETH_MAX_MTU;
+
+ dev->netdev_ops = &lan966x_port_netdev_ops;
+ dev->ethtool_ops = &lan966x_ethtool_ops;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->needed_headroom = IFH_LEN * sizeof(u32);
+
+ eth_hw_addr_gen(dev, lan966x->base_mac, p + 1);
+
+ lan966x_mac_learn(lan966x, PGID_CPU, dev->dev_addr, HOST_PVID,
+ ENTRYTYPE_LOCKED);
+
+ port->phylink_config.dev = &port->dev->dev;
+ port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_pcs.poll = true;
+ port->phylink_pcs.ops = &lan966x_phylink_pcs_ops;
+
+ port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&port->phylink_config,
+ portnp,
+ phy_mode,
+ &lan966x_phylink_mac_ops);
+ if (IS_ERR(phylink)) {
+ port->dev = NULL;
+ return PTR_ERR(phylink);
+ }
+
+ port->phylink = phylink;
+ phylink_set_pcs(phylink, &port->phylink_pcs);
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(lan966x->dev, "register_netdev failed\n");
+ return err;
+ }
+
+ lan966x_vlan_port_set_vlan_aware(port, 0);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+
+ return 0;
+}
+
+static void lan966x_init(struct lan966x *lan966x)
+{
+ u32 p, i;
+
+ /* MAC table initialization */
+ lan966x_mac_init(lan966x);
+
+ lan966x_vlan_init(lan966x);
+
+ /* Flush queues */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) |
+ GENMASK(1, 0),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Allow to drain */
+ mdelay(1);
+
+ /* All Queues normal */
+ lan_wr(lan_rd(lan966x, QS_XTR_FLUSH) &
+ ~(GENMASK(1, 0)),
+ lan966x, QS_XTR_FLUSH);
+
+ /* Set MAC age time to default value, the entry is aged after
+ * 2 * AGE_PERIOD
+ */
+ lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ),
+ lan966x, ANA_AUTOAGE);
+
+ /* Disable learning for frames discarded by VLAN ingress filtering */
+ lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1),
+ ANA_ADVLEARN_VLAN_CHK,
+ lan966x, ANA_ADVLEARN);
+
+ /* Setup frame ageing - "2 sec" - The unit is 6.5 us on lan966x */
+ lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) |
+ (20000000 / 65),
+ lan966x, SYS_FRM_AGING);
+
+ /* Map the 8 CPU extraction queues to CPU port */
+ lan_wr(0, lan966x, QSYS_CPU_GROUP_MAP);
+
+ /* Do byte-swap and expect status after last data word
+ * Extraction: Mode: manual extraction) | Byte_swap
+ */
+ lan_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
+ QS_XTR_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_XTR_GRP_CFG(0));
+
+ /* Injection: Mode: manual injection | Byte_swap */
+ lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
+ QS_INJ_GRP_CFG_BYTE_SWAP_SET(1),
+ lan966x, QS_INJ_GRP_CFG(0));
+
+ lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0),
+ QS_INJ_CTRL_GAP_SIZE,
+ lan966x, QS_INJ_CTRL(0));
+
+ /* Enable IFH insertion/parsing on CPU ports */
+ lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1) |
+ SYS_PORT_MODE_INCL_XTR_HDR_SET(1),
+ lan966x, SYS_PORT_MODE(CPU_PORT));
+
+ /* Setup flooding PGIDs */
+ lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) |
+ ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MC) |
+ ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC),
+ lan966x, ANA_FLOODING_IPMC);
+
+ /* There are 8 priorities */
+ for (i = 0; i < 8; ++i)
+ lan_rmw(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) |
+ ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) |
+ ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC),
+ ANA_FLOODING_FLD_MULTICAST |
+ ANA_FLOODING_FLD_UNICAST |
+ ANA_FLOODING_FLD_BROADCAST,
+ lan966x, ANA_FLOODING(i));
+
+ for (i = 0; i < PGID_ENTRIES; ++i)
+ /* Set all the entries to obey VLAN_VLAN */
+ lan_rmw(ANA_PGID_CFG_OBEY_VLAN_SET(1),
+ ANA_PGID_CFG_OBEY_VLAN,
+ lan966x, ANA_PGID_CFG(i));
+
+ for (p = 0; p < lan966x->num_phys_ports; p++) {
+ /* Disable bridging by default */
+ lan_rmw(ANA_PGID_PGID_SET(0x0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(p + PGID_SRC));
+
+ /* Do not forward BPDU frames to the front ports and copy them
+ * to CPU
+ */
+ lan_wr(0xffff, lan966x, ANA_CPU_FWD_BPDU_CFG(p));
+ }
+
+ /* Set source buffer size for each priority and each port to 1500 bytes */
+ for (i = 0; i <= QSYS_Q_RSRV; ++i) {
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(i));
+ lan_wr(1500 / 64, lan966x, QSYS_RES_CFG(512 + i));
+ }
+
+ /* Enable switching to/from cpu port */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
+
+ /* Configure and enable the CPU port */
+ lan_rmw(ANA_PGID_PGID_SET(0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(CPU_PORT));
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_CPU));
+
+ /* Multicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MC));
+
+ /* This will be controlled by mrouter ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_MCIPV4));
+
+ /* Unicast to all other ports */
+ lan_rmw(GENMASK(lan966x->num_phys_ports - 1, 0),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_UC));
+
+ /* Broadcast to the CPU port and to other ports */
+ lan_rmw(ANA_PGID_PGID_SET(BIT(CPU_PORT) | GENMASK(lan966x->num_phys_ports - 1, 0)),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(PGID_BC));
+
+ lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1),
+ lan966x, REW_PORT_CFG(CPU_PORT));
+
+ lan_rmw(ANA_ANAINTR_INTR_ENA_SET(1),
+ ANA_ANAINTR_INTR_ENA,
+ lan966x, ANA_ANAINTR);
+}
+
+static int lan966x_ram_init(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, SYS_RAM_INIT);
+}
+
+static int lan966x_reset_switch(struct lan966x *lan966x)
+{
+ struct reset_control *switch_reset, *phy_reset;
+ int val = 0;
+ int ret;
+
+ switch_reset = devm_reset_control_get_shared(lan966x->dev, "switch");
+ if (IS_ERR(switch_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(switch_reset),
+ "Could not obtain switch reset");
+
+ phy_reset = devm_reset_control_get_shared(lan966x->dev, "phy");
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(lan966x->dev, PTR_ERR(phy_reset),
+ "Could not obtain phy reset\n");
+
+ reset_control_reset(switch_reset);
+ reset_control_reset(phy_reset);
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
+ lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
+ ret = readx_poll_timeout(lan966x_ram_init, lan966x,
+ val, (val & BIT(1)) == 0, READL_SLEEP_US,
+ READL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan966x, SYS_RESET_CFG);
+
+ return 0;
+}
+
+static int lan966x_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *ports, *portnp;
+ struct lan966x *lan966x;
+ u8 mac_addr[ETH_ALEN];
+ int err, i;
+
+ lan966x = devm_kzalloc(&pdev->dev, sizeof(*lan966x), GFP_KERNEL);
+ if (!lan966x)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, lan966x);
+ lan966x->dev = &pdev->dev;
+
+ if (!device_get_mac_address(&pdev->dev, mac_addr)) {
+ ether_addr_copy(lan966x->base_mac, mac_addr);
+ } else {
+ pr_info("MAC addr was not set, use random MAC\n");
+ eth_random_addr(lan966x->base_mac);
+ lan966x->base_mac[5] &= 0xf0;
+ }
+
+ ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+ if (!ports)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "no ethernet-ports child found\n");
+
+ err = lan966x_create_targets(pdev, lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "Failed to create targets");
+
+ err = lan966x_reset_switch(lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Reset failed");
+
+ i = 0;
+ fwnode_for_each_available_child_node(ports, portnp)
+ ++i;
+
+ lan966x->num_phys_ports = i;
+ lan966x->ports = devm_kcalloc(&pdev->dev, lan966x->num_phys_ports,
+ sizeof(struct lan966x_port *),
+ GFP_KERNEL);
+ if (!lan966x->ports)
+ return -ENOMEM;
+
+ /* There QS system has 32KB of memory */
+ lan966x->shared_queue_sz = LAN966X_BUFFER_MEMORY;
+
+ /* set irq */
+ lan966x->xtr_irq = platform_get_irq_byname(pdev, "xtr");
+ if (lan966x->xtr_irq <= 0)
+ return -EINVAL;
+
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->xtr_irq, NULL,
+ lan966x_xtr_irq_handler, IRQF_ONESHOT,
+ "frame extraction", lan966x);
+ if (err) {
+ pr_err("Unable to use xtr irq");
+ return -ENODEV;
+ }
+
+ lan966x->ana_irq = platform_get_irq_byname(pdev, "ana");
+ if (lan966x->ana_irq) {
+ err = devm_request_threaded_irq(&pdev->dev, lan966x->ana_irq, NULL,
+ lan966x_ana_irq_handler, IRQF_ONESHOT,
+ "ana irq", lan966x);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Unable to use ana irq");
+ }
+
+ /* init switch */
+ lan966x_init(lan966x);
+ lan966x_stats_init(lan966x);
+
+ /* go over the child nodes */
+ fwnode_for_each_available_child_node(ports, portnp) {
+ phy_interface_t phy_mode;
+ struct phy *serdes;
+ u32 p;
+
+ if (fwnode_property_read_u32(portnp, "reg", &p))
+ continue;
+
+ phy_mode = fwnode_get_phy_mode(portnp);
+ err = lan966x_probe_port(lan966x, p, phy_mode, portnp);
+ if (err)
+ goto cleanup_ports;
+
+ /* Read needed configuration */
+ lan966x->ports[p]->config.portmode = phy_mode;
+ lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
+
+ serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
+ if (!IS_ERR(serdes))
+ lan966x->ports[p]->serdes = serdes;
+
+ lan966x_port_init(lan966x->ports[p]);
+ }
+
+ lan966x_mdb_init(lan966x);
+ err = lan966x_fdb_init(lan966x);
+ if (err)
+ goto cleanup_ports;
+
+ return 0;
+
+cleanup_ports:
+ fwnode_handle_put(portnp);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ return err;
+}
+
+static int lan966x_remove(struct platform_device *pdev)
+{
+ struct lan966x *lan966x = platform_get_drvdata(pdev);
+
+ lan966x_cleanup_ports(lan966x);
+
+ cancel_delayed_work_sync(&lan966x->stats_work);
+ destroy_workqueue(lan966x->stats_queue);
+ mutex_destroy(&lan966x->stats_lock);
+
+ lan966x_mac_purge_entries(lan966x);
+ lan966x_mdb_deinit(lan966x);
+ lan966x_fdb_deinit(lan966x);
+
+ return 0;
+}
+
+static struct platform_driver lan966x_driver = {
+ .probe = lan966x_probe,
+ .remove = lan966x_remove,
+ .driver = {
+ .name = "lan966x-switch",
+ .of_match_table = lan966x_match,
+ },
+};
+
+static int __init lan966x_switch_driver_init(void)
+{
+ int ret;
+
+ lan966x_register_notifier_blocks();
+
+ ret = platform_driver_register(&lan966x_driver);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ lan966x_unregister_notifier_blocks();
+ return ret;
+}
+
+static void __exit lan966x_switch_driver_exit(void)
+{
+ platform_driver_unregister(&lan966x_driver);
+ lan966x_unregister_notifier_blocks();
+}
+
+module_init(lan966x_switch_driver_init);
+module_exit(lan966x_switch_driver_exit);
+
+MODULE_DESCRIPTION("Microchip LAN966X switch driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
new file mode 100644
index 000000000000..99c6d0a9f946
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __LAN966X_MAIN_H__
+#define __LAN966X_MAIN_H__
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <net/switchdev.h>
+
+#include "lan966x_regs.h"
+#include "lan966x_ifh.h"
+
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
+#define LAN966X_BUFFER_CELL_SZ 64
+#define LAN966X_BUFFER_MEMORY (160 * 1024)
+#define LAN966X_BUFFER_MIN_SZ 60
+
+#define PGID_AGGR 64
+#define PGID_SRC 80
+#define PGID_ENTRIES 89
+
+#define UNAWARE_PVID 0
+#define HOST_PVID 4095
+
+/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO */
+#define QSYS_Q_RSRV 95
+
+#define CPU_PORT 8
+
+/* Reserved PGIDs */
+#define PGID_CPU (PGID_AGGR - 6)
+#define PGID_UC (PGID_AGGR - 5)
+#define PGID_BC (PGID_AGGR - 4)
+#define PGID_MC (PGID_AGGR - 3)
+#define PGID_MCIPV4 (PGID_AGGR - 2)
+#define PGID_MCIPV6 (PGID_AGGR - 1)
+
+/* Non-reserved PGIDs, used for general purpose */
+#define PGID_GP_START (CPU_PORT + 1)
+#define PGID_GP_END PGID_CPU
+
+#define LAN966X_SPEED_NONE 0
+#define LAN966X_SPEED_2500 1
+#define LAN966X_SPEED_1000 1
+#define LAN966X_SPEED_100 2
+#define LAN966X_SPEED_10 3
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+ ENTRYTYPE_NORMAL = 0,
+ ENTRYTYPE_LOCKED,
+ ENTRYTYPE_MACV4,
+ ENTRYTYPE_MACV6,
+};
+
+struct lan966x_port;
+
+struct lan966x_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+struct lan966x {
+ struct device *dev;
+
+ u8 num_phys_ports;
+ struct lan966x_port **ports;
+
+ void __iomem *regs[NUM_TARGETS];
+
+ int shared_queue_sz;
+
+ u8 base_mac[ETH_ALEN];
+
+ struct net_device *bridge;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct list_head mac_entries;
+ spinlock_t mac_lock; /* lock for mac_entries list */
+
+ u16 vlan_mask[VLAN_N_VID];
+ DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID);
+
+ /* stats */
+ const struct lan966x_stat_layout *stats_layout;
+ u32 num_stats;
+
+ /* workqueue for reading stats */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ /* interrupts */
+ int xtr_irq;
+ int ana_irq;
+
+ /* worqueue for fdb */
+ struct workqueue_struct *fdb_work;
+ struct list_head fdb_entries;
+
+ /* mdb */
+ struct list_head mdb_entries;
+ struct list_head pgid_entries;
+};
+
+struct lan966x_port_config {
+ phy_interface_t portmode;
+ const unsigned long *advertising;
+ int speed;
+ int duplex;
+ u32 pause;
+ bool inband;
+ bool autoneg;
+};
+
+struct lan966x_port {
+ struct net_device *dev;
+ struct lan966x *lan966x;
+
+ u8 chip_port;
+ u16 pvid;
+ u16 vid;
+ bool vlan_aware;
+
+ bool learn_ena;
+
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+ struct lan966x_port_config config;
+ struct phylink *phylink;
+ struct phy *serdes;
+ struct fwnode_handle *fwnode;
+};
+
+extern const struct phylink_mac_ops lan966x_phylink_mac_ops;
+extern const struct phylink_pcs_ops lan966x_phylink_pcs_ops;
+extern const struct ethtool_ops lan966x_ethtool_ops;
+
+bool lan966x_netdevice_check(const struct net_device *dev);
+
+void lan966x_register_notifier_blocks(void);
+void lan966x_unregister_notifier_blocks(void);
+
+void lan966x_stats_get(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+int lan966x_stats_init(struct lan966x *lan966x);
+
+void lan966x_port_config_down(struct lan966x_port *port);
+void lan966x_port_config_up(struct lan966x_port *port);
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state);
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config);
+void lan966x_port_init(struct lan966x_port *port);
+
+int lan966x_mac_ip_learn(struct lan966x *lan966x,
+ bool cpu_copy,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_learn(struct lan966x *lan966x, int port,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_forget(struct lan966x *lan966x,
+ const unsigned char mac[ETH_ALEN],
+ unsigned int vid,
+ enum macaccess_entry_type type);
+int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid);
+int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid);
+void lan966x_mac_init(struct lan966x *lan966x);
+void lan966x_mac_set_ageing(struct lan966x *lan966x,
+ u32 ageing);
+int lan966x_mac_del_entry(struct lan966x *lan966x,
+ const unsigned char *addr,
+ u16 vid);
+int lan966x_mac_add_entry(struct lan966x *lan966x,
+ struct lan966x_port *port,
+ const unsigned char *addr,
+ u16 vid);
+void lan966x_mac_purge_entries(struct lan966x *lan966x);
+irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x);
+
+void lan966x_vlan_init(struct lan966x *lan966x);
+void lan966x_vlan_port_apply(struct lan966x_port *port);
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware);
+int lan966x_vlan_port_set_vid(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged);
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid);
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid);
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid);
+
+void lan966x_fdb_write_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_fdb_erase_entries(struct lan966x *lan966x, u16 vid);
+int lan966x_fdb_init(struct lan966x *lan966x);
+void lan966x_fdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_fdb(struct net_device *dev,
+ struct net_device *orig_dev,
+ unsigned long event, const void *ctx,
+ const struct switchdev_notifier_fdb_info *fdb_info);
+
+void lan966x_mdb_init(struct lan966x *lan966x);
+void lan966x_mdb_deinit(struct lan966x *lan966x);
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj);
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid);
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid);
+
+static inline void __iomem *lan_addr(void __iomem *base[],
+ int id, int tinst, int tcnt,
+ int gbase, int ginst,
+ int gcnt, int gwidth,
+ int raddr, int rinst,
+ int rcnt, int rwidth)
+{
+ WARN_ON((tinst) >= tcnt);
+ WARN_ON((ginst) >= gcnt);
+ WARN_ON((rinst) >= rcnt);
+ return base[id + (tinst)] +
+ gbase + ((ginst) * gwidth) +
+ raddr + ((rinst) * rwidth);
+}
+
+static inline u32 lan_rd(struct lan966x *lan966x, int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ return readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_wr(u32 val, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ writel(val, lan_addr(lan966x->regs, id, tinst, tcnt,
+ gbase, ginst, gcnt, gwidth,
+ raddr, rinst, rcnt, rwidth));
+}
+
+static inline void lan_rmw(u32 val, u32 mask, struct lan966x *lan966x,
+ int id, int tinst, int tcnt,
+ int gbase, int ginst, int gcnt, int gwidth,
+ int raddr, int rinst, int rcnt, int rwidth)
+{
+ u32 nval;
+
+ nval = readl(lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+ nval = (nval & ~mask) | (val & mask);
+ writel(nval, lan_addr(lan966x->regs, id, tinst, tcnt, gbase, ginst,
+ gcnt, gwidth, raddr, rinst, rcnt, rwidth));
+}
+
+#endif /* __LAN966X_MAIN_H__ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
new file mode 100644
index 000000000000..c68d0a99d292
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+struct lan966x_pgid_entry {
+ struct list_head list;
+ int index;
+ refcount_t refcount;
+ u16 ports;
+};
+
+struct lan966x_mdb_entry {
+ struct list_head list;
+ unsigned char mac[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct lan966x_pgid_entry *pgid;
+ u8 cpu_copy;
+};
+
+void lan966x_mdb_init(struct lan966x *lan966x)
+{
+ INIT_LIST_HEAD(&lan966x->mdb_entries);
+ INIT_LIST_HEAD(&lan966x->pgid_entries);
+}
+
+static void lan966x_mdb_purge_mdb_entries(struct lan966x *lan966x)
+{
+ struct lan966x_mdb_entry *mdb_entry, *tmp;
+
+ list_for_each_entry_safe(mdb_entry, tmp, &lan966x->mdb_entries, list) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ }
+}
+
+static void lan966x_mdb_purge_pgid_entries(struct lan966x *lan966x)
+{
+ struct lan966x_pgid_entry *pgid_entry, *tmp;
+
+ list_for_each_entry_safe(pgid_entry, tmp, &lan966x->pgid_entries, list) {
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+ }
+}
+
+void lan966x_mdb_deinit(struct lan966x *lan966x)
+{
+ lan966x_mdb_purge_mdb_entries(lan966x);
+ lan966x_mdb_purge_pgid_entries(lan966x);
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_get(struct lan966x *lan966x,
+ const unsigned char *mac,
+ u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (ether_addr_equal(mdb_entry->mac, mac) &&
+ mdb_entry->vid == vid)
+ return mdb_entry;
+ }
+
+ return NULL;
+}
+
+static struct lan966x_mdb_entry *
+lan966x_mdb_entry_add(struct lan966x *lan966x,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(mdb_entry->mac, mdb->addr);
+ mdb_entry->vid = mdb->vid;
+
+ list_add_tail(&mdb_entry->list, &lan966x->mdb_entries);
+
+ return mdb_entry;
+}
+
+static void lan966x_mdb_encode_mac(unsigned char *mac,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ ether_addr_copy(mac, mdb_entry->mac);
+
+ if (type == ENTRYTYPE_MACV4) {
+ mac[0] = 0;
+ mac[1] = mdb_entry->ports >> 8;
+ mac[2] = mdb_entry->ports & 0xff;
+ } else if (type == ENTRYTYPE_MACV6) {
+ mac[0] = mdb_entry->ports >> 8;
+ mac[1] = mdb_entry->ports & 0xff;
+ }
+}
+
+static int lan966x_mdb_ip_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ bool cpu_copy = false;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port)
+ mdb_entry->cpu_copy++;
+ else
+ mdb_entry->ports |= BIT(port->chip_port);
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ cpu_copy = true;
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_ip_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports && !mdb_entry->cpu_copy) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ return lan966x_mac_ip_learn(lan966x, mdb_entry->cpu_copy,
+ mac, mdb_entry->vid, type);
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_add(struct lan966x *lan966x, int index, u16 ports)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+
+ pgid_entry = kzalloc(sizeof(*pgid_entry), GFP_KERNEL);
+ if (!pgid_entry)
+ return ERR_PTR(-ENOMEM);
+
+ pgid_entry->ports = ports;
+ pgid_entry->index = index;
+ refcount_set(&pgid_entry->refcount, 1);
+
+ list_add_tail(&pgid_entry->list, &lan966x->pgid_entries);
+
+ return pgid_entry;
+}
+
+static struct lan966x_pgid_entry *
+lan966x_pgid_entry_get(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ int index;
+
+ /* Try to find an existing pgid that uses the same ports as the
+ * mdb_entry
+ */
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->ports == mdb_entry->ports) {
+ refcount_inc(&pgid_entry->refcount);
+ return pgid_entry;
+ }
+ }
+
+ /* Try to find an empty pgid entry and allocate one in case it finds it,
+ * otherwise it means that there are no more resources
+ */
+ for (index = PGID_GP_START; index < PGID_GP_END; index++) {
+ bool used = false;
+
+ list_for_each_entry(pgid_entry, &lan966x->pgid_entries, list) {
+ if (pgid_entry->index == index) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return lan966x_pgid_entry_add(lan966x, index,
+ mdb_entry->ports);
+ }
+
+ return ERR_PTR(-ENOSPC);
+}
+
+static void lan966x_pgid_entry_del(struct lan966x *lan966x,
+ struct lan966x_pgid_entry *pgid_entry)
+{
+ if (!refcount_dec_and_test(&pgid_entry->refcount))
+ return;
+
+ list_del(&pgid_entry->list);
+ kfree(pgid_entry);
+}
+
+static int lan966x_mdb_l2_add(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry) {
+ mdb_entry = lan966x_mdb_entry_add(lan966x, mdb);
+ if (IS_ERR(mdb_entry))
+ return PTR_ERR(mdb_entry);
+ } else {
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ }
+
+ if (cpu_port) {
+ mdb_entry->ports |= BIT(CPU_PORT);
+ mdb_entry->cpu_copy++;
+ } else {
+ mdb_entry->ports |= BIT(port->chip_port);
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ /* Copy the frame to CPU only if the CPU is in the VLAN */
+ if (!lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, mdb_entry->vid) &&
+ mdb_entry->cpu_copy)
+ mdb_entry->ports &= BIT(CPU_PORT);
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static int lan966x_mdb_l2_del(struct lan966x_port *port,
+ const struct switchdev_obj_port_mdb *mdb,
+ enum macaccess_entry_type type)
+{
+ bool cpu_port = netif_is_bridge_master(mdb->obj.orig_dev);
+ struct lan966x *lan966x = port->lan966x;
+ struct lan966x_pgid_entry *pgid_entry;
+ struct lan966x_mdb_entry *mdb_entry;
+ unsigned char mac[ETH_ALEN];
+ u16 ports;
+
+ mdb_entry = lan966x_mdb_entry_get(lan966x, mdb->addr, mdb->vid);
+ if (!mdb_entry)
+ return -ENOENT;
+
+ ports = mdb_entry->ports;
+ if (cpu_port) {
+ /* If there are still other references to the CPU port then
+ * there is no point to delete and add again the same entry
+ */
+ mdb_entry->cpu_copy--;
+ if (mdb_entry->cpu_copy)
+ return 0;
+
+ ports &= ~BIT(CPU_PORT);
+ } else {
+ ports &= ~BIT(port->chip_port);
+ }
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+
+ mdb_entry->ports = ports;
+
+ if (!mdb_entry->ports) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return 0;
+ }
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry)) {
+ list_del(&mdb_entry->list);
+ kfree(mdb_entry);
+ return PTR_ERR(pgid_entry);
+ }
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ return lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+static enum macaccess_entry_type
+lan966x_mdb_classify(const unsigned char *mac)
+{
+ if (mac[0] == 0x01 && mac[1] == 0x00 && mac[2] == 0x5e)
+ return ENTRYTYPE_MACV4;
+ if (mac[0] == 0x33 && mac[1] == 0x33)
+ return ENTRYTYPE_MACV6;
+ return ENTRYTYPE_LOCKED;
+}
+
+int lan966x_handle_port_mdb_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are added for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_add(port, mdb, type);
+
+ return lan966x_mdb_l2_add(port, mdb, type);
+}
+
+int lan966x_handle_port_mdb_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ enum macaccess_entry_type type;
+
+ /* Split the way the entries are removed for ipv4/ipv6 and for l2. The
+ * reason is that for ipv4/ipv6 it doesn't require to use any pgid
+ * entry, while for l2 is required to use pgid entries
+ */
+ type = lan966x_mdb_classify(mdb->addr);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ return lan966x_mdb_ip_del(port, mdb, type);
+
+ return lan966x_mdb_l2_del(port, mdb, type);
+}
+
+static void lan966x_mdb_ip_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, true, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_copy(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports |= BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_write_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_copy(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_copy(lan966x, mdb_entry, type);
+ }
+}
+
+static void lan966x_mdb_ip_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+ lan966x_mac_ip_learn(lan966x, false, mac, mdb_entry->vid, type);
+}
+
+static void lan966x_mdb_l2_cpu_remove(struct lan966x *lan966x,
+ struct lan966x_mdb_entry *mdb_entry,
+ enum macaccess_entry_type type)
+{
+ struct lan966x_pgid_entry *pgid_entry;
+ unsigned char mac[ETH_ALEN];
+
+ lan966x_pgid_entry_del(lan966x, mdb_entry->pgid);
+ lan966x_mdb_encode_mac(mac, mdb_entry, type);
+ lan966x_mac_forget(lan966x, mac, mdb_entry->vid, type);
+
+ mdb_entry->ports &= ~BIT(CPU_PORT);
+
+ pgid_entry = lan966x_pgid_entry_get(lan966x, mdb_entry);
+ if (IS_ERR(pgid_entry))
+ return;
+
+ mdb_entry->pgid = pgid_entry;
+
+ lan_rmw(ANA_PGID_PGID_SET(mdb_entry->ports),
+ ANA_PGID_PGID,
+ lan966x, ANA_PGID(pgid_entry->index));
+
+ lan966x_mac_learn(lan966x, pgid_entry->index, mdb_entry->mac,
+ mdb_entry->vid, type);
+}
+
+void lan966x_mdb_erase_entries(struct lan966x *lan966x, u16 vid)
+{
+ struct lan966x_mdb_entry *mdb_entry;
+ enum macaccess_entry_type type;
+
+ list_for_each_entry(mdb_entry, &lan966x->mdb_entries, list) {
+ if (mdb_entry->vid != vid || !mdb_entry->cpu_copy)
+ continue;
+
+ type = lan966x_mdb_classify(mdb_entry->mac);
+ if (type == ENTRYTYPE_MACV4 || type == ENTRYTYPE_MACV6)
+ lan966x_mdb_ip_cpu_remove(lan966x, mdb_entry, type);
+ else
+ lan966x_mdb_l2_cpu_remove(lan966x, mdb_entry, type);
+ }
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
new file mode 100644
index 000000000000..b66a9aa00ea4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/phylink.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+#include <linux/sfp.h>
+
+#include "lan966x_main.h"
+
+static void lan966x_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static int lan966x_phylink_mac_prepare(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t iface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ int err;
+
+ if (port->serdes) {
+ err = phy_set_mode_ext(port->serdes, PHY_MODE_ETHERNET,
+ iface);
+ if (err) {
+ netdev_err(to_net_dev(config->dev),
+ "Could not set mode of SerDes\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void lan966x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x_port_config *port_config = &port->config;
+
+ port_config->duplex = duplex;
+ port_config->speed = speed;
+ port_config->pause = 0;
+ port_config->pause |= tx_pause ? MLO_PAUSE_TX : 0;
+ port_config->pause |= rx_pause ? MLO_PAUSE_RX : 0;
+
+ lan966x_port_config_up(port);
+}
+
+static void lan966x_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct lan966x_port *port = netdev_priv(to_net_dev(config->dev));
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_port_config_down(port);
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
+
+static struct lan966x_port *lan966x_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct lan966x_port, phylink_pcs);
+}
+
+static void lan966x_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+
+ lan966x_port_status_get(port, state);
+}
+
+static int lan966x_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct lan966x_port *port = lan966x_pcs_to_port(pcs);
+ struct lan966x_port_config config;
+ int ret;
+
+ config = port->config;
+ config.portmode = interface;
+ config.inband = phylink_autoneg_inband(mode);
+ config.autoneg = phylink_test(advertising, Autoneg);
+ config.advertising = advertising;
+
+ ret = lan966x_port_pcs_set(port, &config);
+ if (ret)
+ netdev_err(port->dev, "port PCS config failed: %d\n", ret);
+
+ return ret;
+}
+
+static void lan966x_pcs_aneg_restart(struct phylink_pcs *pcs)
+{
+ /* Currently not used */
+}
+
+const struct phylink_mac_ops lan966x_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = lan966x_phylink_mac_config,
+ .mac_prepare = lan966x_phylink_mac_prepare,
+ .mac_link_down = lan966x_phylink_mac_link_down,
+ .mac_link_up = lan966x_phylink_mac_link_up,
+};
+
+const struct phylink_pcs_ops lan966x_phylink_pcs_ops = {
+ .pcs_get_state = lan966x_pcs_get_state,
+ .pcs_config = lan966x_pcs_config,
+ .pcs_an_restart = lan966x_pcs_aneg_restart,
+};
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
new file mode 100644
index 000000000000..237555845a52
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/netdevice.h>
+#include <linux/phy/phy.h>
+
+#include "lan966x_main.h"
+
+/* Watermark encode */
+#define MULTIPLIER_BIT BIT(8)
+static u32 lan966x_wm_enc(u32 value)
+{
+ value /= LAN966X_BUFFER_CELL_SZ;
+
+ if (value >= MULTIPLIER_BIT) {
+ value /= 16;
+ if (value >= MULTIPLIER_BIT)
+ value = (MULTIPLIER_BIT - 1);
+
+ value |= MULTIPLIER_BIT;
+ }
+
+ return value;
+}
+
+static void lan966x_port_link_down(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val, delay = 0;
+
+ /* 0.5: Disable any AFI */
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(0),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+
+ /* wait for reg afi_port_frm_out to become 0 for the port */
+ while (true) {
+ val = lan_rd(lan966x, AFI_PORT_FRM_OUT(port->chip_port));
+ if (!AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("AFI timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ delay = 0;
+
+ /* 1: Reset the PCS Rx clock domain */
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1),
+ DEV_CLOCK_CFG_PCS_RX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 2: Disable MAC frame reception */
+ lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_RX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* 3: Disable traffic being sent to or from switch port */
+ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
+ QSYS_SW_PORT_MODE_PORT_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 4: Disable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 5: Disable Flowcontrol */
+ lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0),
+ SYS_PAUSE_CFG_PAUSE_ENA,
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* 5.1: Disable PFC */
+ lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0),
+ QSYS_SW_PORT_MODE_TX_PFC_ENA,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 6: Wait a worst case time 8ms (jumbo/10Mbit) */
+ usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC);
+
+ /* 7: Disable HDX backpressure */
+ lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0),
+ SYS_FRONT_PORT_MODE_HDX_MODE,
+ lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
+
+ /* 8: Flush the queues accociated with the port */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* 9: Enable dequeuing from the egress queues */
+ lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0),
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ lan966x, QSYS_PORT_MODE(port->chip_port));
+
+ /* 10: Wait until flushing is complete */
+ while (true) {
+ val = lan_rd(lan966x, QSYS_SW_STATUS(port->chip_port));
+ if (!QSYS_SW_STATUS_EQ_AVAIL_GET(val))
+ break;
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ delay++;
+ if (delay == 2000) {
+ pr_err("Flush timeout chip port %u", port->chip_port);
+ break;
+ }
+ }
+
+ /* 11: Reset the Port and MAC clock domains */
+ lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0),
+ DEV_MAC_ENA_CFG_TX_ENA,
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) |
+ DEV_CLOCK_CFG_MAC_RX_RST_SET(1) |
+ DEV_CLOCK_CFG_PORT_RST_SET(1),
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_PORT_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* 12: Clear flushing */
+ lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(2),
+ QSYS_SW_PORT_MODE_AGING_MODE,
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ /* The port is disabled and flushed, now set up the port in the
+ * new operating mode
+ */
+}
+
+static void lan966x_port_link_up(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+ int speed = 0, mode = 0;
+ int atop_wm = 0;
+
+ switch (config->speed) {
+ case SPEED_10:
+ speed = LAN966X_SPEED_10;
+ break;
+ case SPEED_100:
+ speed = LAN966X_SPEED_100;
+ break;
+ case SPEED_1000:
+ speed = LAN966X_SPEED_1000;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ case SPEED_2500:
+ speed = LAN966X_SPEED_2500;
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+ break;
+ }
+
+ /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the
+ * port speed for QSGMII ports.
+ */
+ if (config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1);
+
+ lan_wr(config->duplex | mode,
+ lan966x, DEV_MAC_MODE_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_IFG_CFG_TX_IFG_SET(config->duplex ? 6 : 5) |
+ DEV_MAC_IFG_CFG_RX_IFG1_SET(config->speed == SPEED_10 ? 2 : 1) |
+ DEV_MAC_IFG_CFG_RX_IFG2_SET(2),
+ DEV_MAC_IFG_CFG_TX_IFG |
+ DEV_MAC_IFG_CFG_RX_IFG1 |
+ DEV_MAC_IFG_CFG_RX_IFG2,
+ lan966x, DEV_MAC_IFG_CFG(port->chip_port));
+
+ lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(4) |
+ DEV_MAC_HDX_CFG_SEED_LOAD_SET(1),
+ DEV_MAC_HDX_CFG_SEED |
+ DEV_MAC_HDX_CFG_SEED_LOAD,
+ lan966x, DEV_MAC_HDX_CFG(port->chip_port));
+
+ if (config->portmode == PHY_INTERFACE_MODE_GMII) {
+ if (config->speed == SPEED_1000)
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(1),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ else
+ lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(0),
+ CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA,
+ lan966x,
+ CHIP_TOP_CUPHY_PORT_CFG(port->chip_port));
+ }
+
+ /* No PFC */
+ lan_wr(ANA_PFC_CFG_FC_LINK_SPEED_SET(speed),
+ lan966x, ANA_PFC_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ DEV_PCS1G_CFG_PCS_ENA,
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0),
+ DEV_PCS1G_SD_CFG_SD_ENA,
+ lan966x, DEV_PCS1G_SD_CFG(port->chip_port));
+
+ /* Set Pause WM hysteresis, start/stop are in 1518 byte units */
+ lan_wr(SYS_PAUSE_CFG_PAUSE_ENA_SET(1) |
+ SYS_PAUSE_CFG_PAUSE_STOP_SET(lan966x_wm_enc(4 * 1518)) |
+ SYS_PAUSE_CFG_PAUSE_START_SET(lan966x_wm_enc(6 * 1518)),
+ lan966x, SYS_PAUSE_CFG(port->chip_port));
+
+ /* Set SMAC of Pause frame (00:00:00:00:00:00) */
+ lan_wr(0, lan966x, DEV_FC_MAC_LOW_CFG(port->chip_port));
+ lan_wr(0, lan966x, DEV_FC_MAC_HIGH_CFG(port->chip_port));
+
+ /* Flow control */
+ lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(speed) |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(7) |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) |
+ SYS_MAC_FC_CFG_RX_FC_ENA_SET(config->pause & MLO_PAUSE_RX ? 1 : 0) |
+ SYS_MAC_FC_CFG_TX_FC_ENA_SET(config->pause & MLO_PAUSE_TX ? 1 : 0),
+ SYS_MAC_FC_CFG_FC_LINK_SPEED |
+ SYS_MAC_FC_CFG_FC_LATENCY_CFG |
+ SYS_MAC_FC_CFG_ZERO_PAUSE_ENA |
+ SYS_MAC_FC_CFG_PAUSE_VAL_CFG |
+ SYS_MAC_FC_CFG_RX_FC_ENA |
+ SYS_MAC_FC_CFG_TX_FC_ENA,
+ lan966x, SYS_MAC_FC_CFG(port->chip_port));
+
+ /* Tail dropping watermark */
+ atop_wm = lan966x->shared_queue_sz;
+
+ /* The total memory size is diveded by number of front ports plus CPU
+ * port
+ */
+ lan_wr(lan966x_wm_enc(atop_wm / lan966x->num_phys_ports + 1), lan966x,
+ SYS_ATOP(port->chip_port));
+ lan_wr(lan966x_wm_enc(atop_wm), lan966x, SYS_ATOP_TOT_CFG);
+
+ /* This needs to be at the end */
+ /* Enable MAC module */
+ lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) |
+ DEV_MAC_ENA_CFG_TX_ENA_SET(1),
+ lan966x, DEV_MAC_ENA_CFG(port->chip_port));
+
+ /* Take out the clock from reset */
+ lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(speed),
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ /* Core: Enable port for frame transfer */
+ lan_wr(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) |
+ QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) |
+ QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1),
+ lan966x, QSYS_SW_PORT_MODE(port->chip_port));
+
+ lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) |
+ AFI_PORT_CFG_FRM_OUT_MAX_SET(16),
+ AFI_PORT_CFG_FC_SKIP_TTI_INJ |
+ AFI_PORT_CFG_FRM_OUT_MAX,
+ lan966x, AFI_PORT_CFG(port->chip_port));
+}
+
+void lan966x_port_config_down(struct lan966x_port *port)
+{
+ lan966x_port_link_down(port);
+}
+
+void lan966x_port_config_up(struct lan966x_port *port)
+{
+ lan966x_port_link_up(port);
+}
+
+void lan966x_port_status_get(struct lan966x_port *port,
+ struct phylink_link_state *state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool link_down;
+ u16 bmsr = 0;
+ u16 lp_adv;
+ u32 val;
+
+ val = lan_rd(lan966x, DEV_PCS1G_STICKY(port->chip_port));
+ link_down = DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(val);
+ if (link_down)
+ lan_wr(val, lan966x, DEV_PCS1G_STICKY(port->chip_port));
+
+ /* Get both current Link and Sync status */
+ val = lan_rd(lan966x, DEV_PCS1G_LINK_STATUS(port->chip_port));
+ state->link = DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(val) &&
+ DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(val);
+ state->link &= !link_down;
+
+ /* Get PCS ANEG status register */
+ val = lan_rd(lan966x, DEV_PCS1G_ANEG_STATUS(port->chip_port));
+ /* Aneg complete provides more information */
+ if (DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(val)) {
+ state->an_complete = true;
+
+ bmsr |= state->link ? BMSR_LSTATUS : 0;
+ bmsr |= BMSR_ANEGCOMPLETE;
+
+ lp_adv = DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(val);
+ phylink_mii_c22_pcs_decode_state(state, bmsr, lp_adv);
+ } else {
+ if (!state->link)
+ return;
+
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ state->speed = SPEED_1000;
+ else if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+
+ state->duplex = DUPLEX_FULL;
+ }
+}
+
+int lan966x_port_pcs_set(struct lan966x_port *port,
+ struct lan966x_port_config *config)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool inband_aneg = false;
+ bool outband;
+
+ if (config->inband) {
+ if (config->portmode == PHY_INTERFACE_MODE_SGMII ||
+ config->portmode == PHY_INTERFACE_MODE_QSGMII)
+ inband_aneg = true; /* Cisco-SGMII in-band-aneg */
+ else if (config->portmode == PHY_INTERFACE_MODE_1000BASEX &&
+ config->autoneg)
+ inband_aneg = true; /* Clause-37 in-band-aneg */
+
+ outband = false;
+ } else {
+ outband = true;
+ }
+
+ /* Disable or enable inband */
+ lan_rmw(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(outband),
+ DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA,
+ lan966x, DEV_PCS1G_MODE_CFG(port->chip_port));
+
+ /* Enable PCS */
+ lan_wr(DEV_PCS1G_CFG_PCS_ENA_SET(1),
+ lan966x, DEV_PCS1G_CFG(port->chip_port));
+
+ if (inband_aneg) {
+ int adv = phylink_mii_c22_pcs_encode_advertisement(config->portmode,
+ config->advertising);
+ if (adv >= 0)
+ /* Enable in-band aneg */
+ lan_wr(DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(adv) |
+ DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_ENA_SET(1) |
+ DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(1),
+ lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ } else {
+ lan_wr(0, lan966x, DEV_PCS1G_ANEG_CFG(port->chip_port));
+ }
+
+ /* Take PCS out of reset */
+ lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
+ DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
+ DEV_CLOCK_CFG_LINK_SPEED |
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+
+ port->config = *config;
+
+ return 0;
+}
+
+void lan966x_port_init(struct lan966x_port *port)
+{
+ struct lan966x_port_config *config = &port->config;
+ struct lan966x *lan966x = port->lan966x;
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_port_config_down(port);
+
+ if (config->portmode != PHY_INTERFACE_MODE_QSGMII)
+ return;
+
+ lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
+ DEV_CLOCK_CFG_PCS_TX_RST_SET(0) |
+ DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000),
+ DEV_CLOCK_CFG_PCS_RX_RST |
+ DEV_CLOCK_CFG_PCS_TX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED,
+ lan966x, DEV_CLOCK_CFG(port->chip_port));
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
new file mode 100644
index 000000000000..797560172aca
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
@@ -0,0 +1,871 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+/* This file is autogenerated by cml-utils 2021-10-10 13:25:08 +0200.
+ * Commit ID: 26db2002924973d36a30b369c94f025a678fe9ea (dirty)
+ */
+
+#ifndef _LAN966X_REGS_H_
+#define _LAN966X_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_AFI = 2,
+ TARGET_ANA = 3,
+ TARGET_CHIP_TOP = 5,
+ TARGET_CPU = 6,
+ TARGET_DEV = 13,
+ TARGET_GCB = 27,
+ TARGET_ORG = 36,
+ TARGET_QS = 42,
+ TARGET_QSYS = 46,
+ TARGET_REW = 47,
+ TARGET_SYS = 52,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* AFI:PORT_TBL:PORT_FRM_OUT */
+#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 0, 0, 1, 4)
+
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\
+ FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\
+ FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x)
+
+/* AFI:PORT_TBL:PORT_CFG */
+#define AFI_PORT_CFG(g) __REG(TARGET_AFI, 0, 1, 98816, g, 10, 8, 4, 0, 1, 4)
+
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x)
+
+#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0)
+#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\
+ FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x)
+#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\
+ FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x)
+
+/* ANA:ANA:ADVLEARN */
+#define ANA_ADVLEARN __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 0, 0, 1, 4)
+
+#define ANA_ADVLEARN_VLAN_CHK BIT(0)
+#define ANA_ADVLEARN_VLAN_CHK_SET(x)\
+ FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x)
+#define ANA_ADVLEARN_VLAN_CHK_GET(x)\
+ FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x)
+
+/* ANA:ANA:VLANMASK */
+#define ANA_VLANMASK __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 8, 0, 1, 4)
+
+/* ANA:ANA:ANAINTR */
+#define ANA_ANAINTR __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 16, 0, 1, 4)
+
+#define ANA_ANAINTR_INTR BIT(1)
+#define ANA_ANAINTR_INTR_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR, x)
+#define ANA_ANAINTR_INTR_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR, x)
+
+#define ANA_ANAINTR_INTR_ENA BIT(0)
+#define ANA_ANAINTR_INTR_ENA_SET(x)\
+ FIELD_PREP(ANA_ANAINTR_INTR_ENA, x)
+#define ANA_ANAINTR_INTR_ENA_GET(x)\
+ FIELD_GET(ANA_ANAINTR_INTR_ENA, x)
+
+/* ANA:ANA:AUTOAGE */
+#define ANA_AUTOAGE __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 44, 0, 1, 4)
+
+#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\
+ FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x)
+#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\
+ FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x)
+
+/* ANA:ANA:FLOODING */
+#define ANA_FLOODING(r) __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 68, r, 8, 4)
+
+#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x)
+#define ANA_FLOODING_FLD_UNICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_UNICAST, x)
+
+#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x)
+#define ANA_FLOODING_FLD_BROADCAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x)
+
+#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0)
+#define ANA_FLOODING_FLD_MULTICAST_SET(x)\
+ FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x)
+#define ANA_FLOODING_FLD_MULTICAST_GET(x)\
+ FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x)
+
+/* ANA:ANA:FLOODING_IPMC */
+#define ANA_FLOODING_IPMC __REG(TARGET_ANA, 0, 1, 29824, 0, 1, 244, 100, 0, 1, 4)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x)
+
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\
+ FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\
+ FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x)
+
+/* ANA:PGID:PGID */
+#define ANA_PGID(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 0, 0, 1, 4)
+
+#define ANA_PGID_PGID GENMASK(8, 0)
+#define ANA_PGID_PGID_SET(x)\
+ FIELD_PREP(ANA_PGID_PGID, x)
+#define ANA_PGID_PGID_GET(x)\
+ FIELD_GET(ANA_PGID_PGID, x)
+
+/* ANA:PGID:PGID_CFG */
+#define ANA_PGID_CFG(g) __REG(TARGET_ANA, 0, 1, 27648, g, 89, 8, 4, 0, 1, 4)
+
+#define ANA_PGID_CFG_OBEY_VLAN BIT(0)
+#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\
+ FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x)
+#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\
+ FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x)
+
+/* ANA:ANA_TABLES:MACHDATA */
+#define ANA_MACHDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 40, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACLDATA */
+#define ANA_MACLDATA __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 44, 0, 1, 4)
+
+/* ANA:ANA_TABLES:MACACCESS */
+#define ANA_MACACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 48, 0, 1, 4)
+
+#define ANA_MACACCESS_CHANGE2SW BIT(17)
+#define ANA_MACACCESS_CHANGE2SW_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x)
+#define ANA_MACACCESS_CHANGE2SW_GET(x)\
+ FIELD_GET(ANA_MACACCESS_CHANGE2SW, x)
+
+#define ANA_MACACCESS_MAC_CPU_COPY BIT(16)
+#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x)
+#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x)
+
+#define ANA_MACACCESS_VALID BIT(12)
+#define ANA_MACACCESS_VALID_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_VALID, x)
+#define ANA_MACACCESS_VALID_GET(x)\
+ FIELD_GET(ANA_MACACCESS_VALID, x)
+
+#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10)
+#define ANA_MACACCESS_ENTRYTYPE_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x)
+#define ANA_MACACCESS_ENTRYTYPE_GET(x)\
+ FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x)
+
+#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4)
+#define ANA_MACACCESS_DEST_IDX_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_DEST_IDX, x)
+#define ANA_MACACCESS_DEST_IDX_GET(x)\
+ FIELD_GET(ANA_MACACCESS_DEST_IDX, x)
+
+#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0)
+#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\
+ FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x)
+#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\
+ FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x)
+
+/* ANA:ANA_TABLES:MACTINDX */
+#define ANA_MACTINDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 52, 0, 1, 4)
+
+#define ANA_MACTINDX_BUCKET GENMASK(12, 11)
+#define ANA_MACTINDX_BUCKET_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_BUCKET, x)
+#define ANA_MACTINDX_BUCKET_GET(x)\
+ FIELD_GET(ANA_MACTINDX_BUCKET, x)
+
+#define ANA_MACTINDX_M_INDEX GENMASK(10, 0)
+#define ANA_MACTINDX_M_INDEX_SET(x)\
+ FIELD_PREP(ANA_MACTINDX_M_INDEX, x)
+#define ANA_MACTINDX_M_INDEX_GET(x)\
+ FIELD_GET(ANA_MACTINDX_M_INDEX, x)
+
+/* ANA:ANA_TABLES:VLAN_PORT_MASK */
+#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(8, 0)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\
+ FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\
+ FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x)
+
+/* ANA:ANA_TABLES:VLANACCESS */
+#define ANA_VLANACCESS __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 60, 0, 1, 4)
+
+#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\
+ FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\
+ FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x)
+
+/* ANA:ANA_TABLES:VLANTIDX */
+#define ANA_VLANTIDX __REG(TARGET_ANA, 0, 1, 27520, 0, 1, 128, 64, 0, 1, 4)
+
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x)
+
+#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0)
+#define ANA_VLANTIDX_V_INDEX_SET(x)\
+ FIELD_PREP(ANA_VLANTIDX_V_INDEX, x)
+#define ANA_VLANTIDX_V_INDEX_GET(x)\
+ FIELD_GET(ANA_VLANTIDX_V_INDEX, x)
+
+/* ANA:PORT:VLAN_CFG */
+#define ANA_VLAN_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 0, 0, 1, 4)
+
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x)
+
+#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x)
+
+#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0)
+#define ANA_VLAN_CFG_VLAN_VID_SET(x)\
+ FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x)
+#define ANA_VLAN_CFG_VLAN_VID_GET(x)\
+ FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x)
+
+/* ANA:PORT:DROP_CFG */
+#define ANA_DROP_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 4, 0, 1, 4)
+
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x)
+
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\
+ FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\
+ FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x)
+
+/* ANA:PORT:CPU_FWD_CFG */
+#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 96, 0, 1, 4)
+
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x)
+
+/* ANA:PORT:CPU_FWD_BPDU_CFG */
+#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 100, 0, 1, 4)
+
+/* ANA:PORT:PORT_CFG */
+#define ANA_PORT_CFG(g) __REG(TARGET_ANA, 0, 1, 28672, g, 9, 128, 112, 0, 1, 4)
+
+#define ANA_PORT_CFG_LEARNAUTO BIT(6)
+#define ANA_PORT_CFG_LEARNAUTO_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x)
+#define ANA_PORT_CFG_LEARNAUTO_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x)
+
+#define ANA_PORT_CFG_LEARN_ENA BIT(5)
+#define ANA_PORT_CFG_LEARN_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x)
+#define ANA_PORT_CFG_LEARN_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x)
+
+#define ANA_PORT_CFG_RECV_ENA BIT(4)
+#define ANA_PORT_CFG_RECV_ENA_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x)
+#define ANA_PORT_CFG_RECV_ENA_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_RECV_ENA, x)
+
+#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0)
+#define ANA_PORT_CFG_PORTID_VAL_SET(x)\
+ FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x)
+#define ANA_PORT_CFG_PORTID_VAL_GET(x)\
+ FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x)
+
+/* ANA:PFC:PFC_CFG */
+#define ANA_PFC_CFG(g) __REG(TARGET_ANA, 0, 1, 30720, g, 8, 64, 0, 0, 1, 4)
+
+#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0)
+#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x)
+#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x)
+
+/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */
+#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP, 0, 1, 16, 0, 1, 20, 8, r, 2, 4)
+
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\
+ FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\
+ FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x)
+
+/* DEV:PORT_MODE:CLOCK_CFG */
+#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV, t, 8, 0, 0, 1, 28, 0, 0, 1, 4)
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x)
+#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x)
+
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x)
+#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x)
+#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x)
+
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x)
+#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x)
+
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PORT_RST_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x)
+#define DEV_CLOCK_CFG_PORT_RST_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x)
+
+#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0)
+#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\
+ FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x)
+#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\
+ FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */
+#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 0, 0, 1, 4)
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x)
+#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x)
+
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x)
+#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */
+#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 4, 0, 1, 4)
+
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
+#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 8, 0, 1, 4)
+
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
+ FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\
+ FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */
+#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4)
+
+#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x)
+#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x)
+#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x)
+
+#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0)
+#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\
+ FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x)
+#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\
+ FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */
+#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 24, 0, 1, 4)
+
+#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x)
+#define DEV_MAC_HDX_CFG_SEED_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED, x)
+
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\
+ FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\
+ FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */
+#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 32, 0, 1, 4)
+
+/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */
+#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 36, 0, 1, 4)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */
+#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 0, 0, 1, 4)
+
+#define DEV_PCS1G_CFG_PCS_ENA BIT(0)
+#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x)
+#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
+#define DEV_PCS1G_MODE_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 4, 0, 1, 4)
+
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+#define DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
+#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 8, 0, 1, 4)
+
+#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0)
+#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x)
+#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
+#define DEV_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 12, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+#define DEV_PCS1G_ANEG_CFG_ADV_ABILITY_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ADV_ABILITY, x)
+
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_SW_RESOLVE_ENA, x)
+
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT BIT(1)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+#define DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_RESTART_ONE_SHOT, x)
+
+#define DEV_PCS1G_ANEG_CFG_ENA BIT(0)
+#define DEV_PCS1G_ANEG_CFG_ENA_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_CFG_ENA, x)
+#define DEV_PCS1G_ANEG_CFG_ENA_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_CFG_ENA, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
+#define DEV_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 32, 0, 1, 4)
+
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV GENMASK(31, 16)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+#define DEV_PCS1G_ANEG_STATUS_LP_ADV_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_LP_ADV, x)
+
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_SET(x)\
+ FIELD_PREP(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+#define DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE_GET(x)\
+ FIELD_GET(DEV_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
+#define DEV_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 40, 0, 1, 4)
+
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_LINK_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_LINK_STATUS, x)
+
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_SET(x)\
+ FIELD_PREP(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+#define DEV_PCS1G_LINK_STATUS_SYNC_STATUS_GET(x)\
+ FIELD_GET(DEV_PCS1G_LINK_STATUS_SYNC_STATUS, x)
+
+/* DEV:PCS1G_CFG_STATUS:PCS1G_STICKY */
+#define DEV_PCS1G_STICKY(t) __REG(TARGET_DEV, t, 8, 72, 0, 1, 68, 48, 0, 1, 4)
+
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
+ FIELD_PREP(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+#define DEV_PCS1G_STICKY_LINK_DOWN_STICKY_GET(x)\
+ FIELD_GET(DEV_PCS1G_STICKY_LINK_DOWN_STICKY, x)
+
+/* DEVCPU_QS:XTR:XTR_GRP_CFG */
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+
+#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_XTR_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_MODE, x)
+#define QS_XTR_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_MODE, x)
+
+#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+#define QS_XTR_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:XTR:XTR_RD */
+#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+
+/* DEVCPU_QS:XTR:XTR_FLUSH */
+#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+
+/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+
+/* DEVCPU_QS:INJ:INJ_GRP_CFG */
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+
+#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
+#define QS_INJ_GRP_CFG_MODE_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_MODE, x)
+#define QS_INJ_GRP_CFG_MODE_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_MODE, x)
+
+#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\
+ FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\
+ FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
+
+/* DEVCPU_QS:INJ:INJ_WR */
+#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+
+/* DEVCPU_QS:INJ:INJ_CTRL */
+#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+
+#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
+#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x)
+#define QS_INJ_CTRL_GAP_SIZE_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x)
+
+#define QS_INJ_CTRL_EOF BIT(19)
+#define QS_INJ_CTRL_EOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_EOF, x)
+#define QS_INJ_CTRL_EOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_EOF, x)
+
+#define QS_INJ_CTRL_SOF BIT(18)
+#define QS_INJ_CTRL_SOF_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_SOF, x)
+#define QS_INJ_CTRL_SOF_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_SOF, x)
+
+#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16)
+#define QS_INJ_CTRL_VLD_BYTES_SET(x)\
+ FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x)
+#define QS_INJ_CTRL_VLD_BYTES_GET(x)\
+ FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
+
+/* DEVCPU_QS:INJ:INJ_STATUS */
+#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+
+#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
+#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_WMARK_REACHED, x)
+#define QS_INJ_STATUS_WMARK_REACHED_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_WMARK_REACHED, x)
+
+#define QS_INJ_STATUS_FIFO_RDY GENMASK(3, 2)
+#define QS_INJ_STATUS_FIFO_RDY_SET(x)\
+ FIELD_PREP(QS_INJ_STATUS_FIFO_RDY, x)
+#define QS_INJ_STATUS_FIFO_RDY_GET(x)\
+ FIELD_GET(QS_INJ_STATUS_FIFO_RDY, x)
+
+/* QSYS:SYSTEM:PORT_MODE */
+#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 0, r, 10, 4)
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\
+ FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\
+ FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x)
+
+/* QSYS:SYSTEM:SWITCH_PORT_MODE */
+#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 80, r, 9, 4)
+
+#define QSYS_SW_PORT_MODE_PORT_ENA BIT(18)
+#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x)
+#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x)
+
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(16, 14)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x)
+
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(12)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x)
+
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(11, 4)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x)
+
+#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(1, 0)
+#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\
+ FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x)
+#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\
+ FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x)
+
+/* QSYS:SYSTEM:SW_STATUS */
+#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 164, r, 9, 4)
+
+#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0)
+#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\
+ FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x)
+#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\
+ FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x)
+
+/* QSYS:SYSTEM:CPU_GROUP_MAP */
+#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS, 0, 1, 28008, 0, 1, 216, 204, 0, 1, 4)
+
+/* QSYS:RES_CTRL:RES_CFG */
+#define QSYS_RES_CFG(g) __REG(TARGET_QSYS, 0, 1, 32768, g, 1024, 8, 0, 0, 1, 4)
+
+/* REW:PORT:PORT_VLAN_CFG */
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 0, 0, 1, 4)
+
+#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16)
+#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x)
+#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x)
+
+#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0)
+#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\
+ FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x)
+#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
+ FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+
+/* REW:PORT:TAG_CFG */
+#define REW_TAG_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 4, 0, 1, 4)
+
+#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7)
+#define REW_TAG_CFG_TAG_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_CFG, x)
+#define REW_TAG_CFG_TAG_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_CFG, x)
+
+#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5)
+#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\
+ FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x)
+#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\
+ FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x)
+
+/* REW:PORT:PORT_CFG */
+#define REW_PORT_CFG(g) __REG(TARGET_REW, 0, 1, 0, g, 10, 128, 8, 0, 1, 4)
+
+#define REW_PORT_CFG_NO_REWRITE BIT(0)
+#define REW_PORT_CFG_NO_REWRITE_SET(x)\
+ FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x)
+#define REW_PORT_CFG_NO_REWRITE_GET(x)\
+ FIELD_GET(REW_PORT_CFG_NO_REWRITE, x)
+
+/* SYS:SYSTEM:RESET_CFG */
+#define SYS_RESET_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 0, 0, 1, 4)
+
+#define SYS_RESET_CFG_CORE_ENA BIT(0)
+#define SYS_RESET_CFG_CORE_ENA_SET(x)\
+ FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x)
+#define SYS_RESET_CFG_CORE_ENA_GET(x)\
+ FIELD_GET(SYS_RESET_CFG_CORE_ENA, x)
+
+/* SYS:SYSTEM:PORT_MODE */
+#define SYS_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 44, r, 10, 4)
+
+#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(5, 4)
+#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x)
+#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x)
+
+#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(3, 2)
+#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\
+ FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x)
+#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\
+ FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x)
+
+/* SYS:SYSTEM:FRONT_PORT_MODE */
+#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 84, r, 8, 4)
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\
+ FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\
+ FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x)
+
+/* SYS:SYSTEM:FRM_AGING */
+#define SYS_FRM_AGING __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 116, 0, 1, 4)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\
+ FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x)
+#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\
+ FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x)
+
+/* SYS:SYSTEM:STAT_CFG */
+#define SYS_STAT_CFG __REG(TARGET_SYS, 0, 1, 4128, 0, 1, 168, 120, 0, 1, 4)
+
+#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0)
+#define SYS_STAT_CFG_STAT_VIEW_SET(x)\
+ FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x)
+#define SYS_STAT_CFG_STAT_VIEW_GET(x)\
+ FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x)
+
+/* SYS:PAUSE_CFG:PAUSE_CFG */
+#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 0, r, 9, 4)
+
+#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x)
+#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x)
+
+#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x)
+#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x)
+
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x)
+#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x)
+
+/* SYS:PAUSE_CFG:ATOP */
+#define SYS_ATOP(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 40, r, 9, 4)
+
+/* SYS:PAUSE_CFG:ATOP_TOT_CFG */
+#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 76, 0, 1, 4)
+
+/* SYS:PAUSE_CFG:MAC_FC_CFG */
+#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS, 0, 1, 4296, 0, 1, 112, 80, r, 8, 4)
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x)
+
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x)
+
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x)
+
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x)
+
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\
+ FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\
+ FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x)
+
+/* SYS:STAT:CNT */
+#define SYS_CNT(g) __REG(TARGET_SYS, 0, 1, 0, g, 896, 4, 0, 0, 1, 4)
+
+/* SYS:RAM_CTRL:RAM_INIT */
+#define SYS_RAM_INIT __REG(TARGET_SYS, 0, 1, 4432, 0, 1, 4, 0, 0, 1, 4)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_INIT_SET(x)\
+ FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x)
+#define SYS_RAM_INIT_RAM_INIT_GET(x)\
+ FIELD_GET(SYS_RAM_INIT_RAM_INIT, x)
+
+#endif /* _LAN966X_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
new file mode 100644
index 000000000000..7de55f6a4da8
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
+#include "lan966x_main.h"
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_nb __read_mostly;
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly;
+
+static void lan966x_port_set_mcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_MC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_MC));
+}
+
+static void lan966x_port_set_ucast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_UC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_UC));
+}
+
+static void lan966x_port_set_bcast_flood(struct lan966x_port *port,
+ bool enabled)
+{
+ u32 val = lan_rd(port->lan966x, ANA_PGID(PGID_BC));
+
+ val = ANA_PGID_PGID_GET(val);
+ if (enabled)
+ val |= BIT(port->chip_port);
+ else
+ val &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PGID_PGID_SET(val),
+ ANA_PGID_PGID,
+ port->lan966x, ANA_PGID(PGID_BC));
+}
+
+static void lan966x_port_set_learning(struct lan966x_port *port, bool enabled)
+{
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled),
+ ANA_PORT_CFG_LEARN_ENA,
+ port->lan966x, ANA_PORT_CFG(port->chip_port));
+
+ port->learn_ena = enabled;
+}
+
+static void lan966x_port_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_MCAST_FLOOD)
+ lan966x_port_set_mcast_flood(port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_FLOOD)
+ lan966x_port_set_ucast_flood(port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ lan966x_port_set_bcast_flood(port,
+ !!(flags.val & BR_BCAST_FLOOD));
+
+ if (flags.mask & BR_LEARNING)
+ lan966x_port_set_learning(port,
+ !!(flags.val & BR_LEARNING));
+}
+
+static int lan966x_port_pre_bridge_flags(struct lan966x_port *port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_MCAST_FLOOD | BR_FLOOD | BR_BCAST_FLOOD |
+ BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lan966x_update_fwd_mask(struct lan966x *lan966x)
+{
+ int i;
+
+ for (i = 0; i < lan966x->num_phys_ports; i++) {
+ struct lan966x_port *port = lan966x->ports[i];
+ unsigned long mask = 0;
+
+ if (port && lan966x->bridge_fwd_mask & BIT(i))
+ mask = lan966x->bridge_fwd_mask & ~BIT(i);
+
+ mask |= BIT(CPU_PORT);
+
+ lan_wr(ANA_PGID_PGID_SET(mask),
+ lan966x, ANA_PGID(PGID_SRC + i));
+ }
+}
+
+static void lan966x_port_stp_state_set(struct lan966x_port *port, u8 state)
+{
+ struct lan966x *lan966x = port->lan966x;
+ bool learn_ena = false;
+
+ if ((state == BR_STATE_FORWARDING || state == BR_STATE_LEARNING) &&
+ port->learn_ena)
+ learn_ena = true;
+
+ if (state == BR_STATE_FORWARDING)
+ lan966x->bridge_fwd_mask |= BIT(port->chip_port);
+ else
+ lan966x->bridge_fwd_mask &= ~BIT(port->chip_port);
+
+ lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena),
+ ANA_PORT_CFG_LEARN_ENA,
+ lan966x, ANA_PORT_CFG(port->chip_port));
+
+ lan966x_update_fwd_mask(lan966x);
+}
+
+static void lan966x_port_ageing_set(struct lan966x_port *port,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+ lan966x_mac_set_ageing(port->lan966x, ageing_time);
+}
+
+static int lan966x_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err = 0;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ lan966x_port_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = lan966x_port_pre_bridge_flags(port, attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ lan966x_port_stp_state_set(port, attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ lan966x_port_ageing_set(port, attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ lan966x_vlan_port_set_vlan_aware(port, attr->u.vlan_filtering);
+ lan966x_vlan_port_apply(port);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_port_bridge_join(struct lan966x_port *port,
+ struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+ struct net_device *dev = port->dev;
+ int err;
+
+ if (!lan966x->bridge_mask) {
+ lan966x->bridge = bridge;
+ } else {
+ if (lan966x->bridge != bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Not allow to add port to different bridge");
+ return -ENODEV;
+ }
+ }
+
+ err = switchdev_bridge_port_offload(dev, dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ lan966x->bridge_mask |= BIT(port->chip_port);
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+ lan966x_port_bridge_flags(port, flags);
+
+ return 0;
+}
+
+static void lan966x_port_bridge_leave(struct lan966x_port *port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags = {0};
+ struct lan966x *lan966x = port->lan966x;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+ lan966x_port_bridge_flags(port, flags);
+
+ lan966x->bridge_mask &= ~BIT(port->chip_port);
+
+ if (!lan966x->bridge_mask)
+ lan966x->bridge = NULL;
+
+ /* Set the port back to host mode */
+ lan966x_vlan_port_set_vlan_aware(port, false);
+ lan966x_vlan_port_set_vid(port, HOST_PVID, false, false);
+ lan966x_vlan_port_apply(port);
+}
+
+static int lan966x_port_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ int err = 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ err = lan966x_port_bridge_join(port, info->upper_dev,
+ extack);
+ else
+ lan966x_port_bridge_leave(port, info->upper_dev);
+ }
+
+ return err;
+}
+
+static int lan966x_port_prechangeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (netif_is_bridge_master(info->upper_dev) && !info->linking)
+ switchdev_bridge_port_unoffload(port->dev, port,
+ &lan966x_switchdev_nb,
+ &lan966x_switchdev_blocking_nb);
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_foreign_bridging_check(struct net_device *bridge,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x *lan966x = NULL;
+ bool has_foreign = false;
+ struct net_device *dev;
+ struct list_head *iter;
+
+ if (!netif_is_bridge_master(bridge))
+ return 0;
+
+ netdev_for_each_lower_dev(bridge, dev, iter) {
+ if (lan966x_netdevice_check(dev)) {
+ struct lan966x_port *port = netdev_priv(dev);
+
+ if (lan966x) {
+ /* Bridge already has at least one port of a
+ * lan966x switch inside it, check that it's
+ * the same instance of the driver.
+ */
+ if (port->lan966x != lan966x) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging between multiple lan966x switches disallowed");
+ return -EINVAL;
+ }
+ } else {
+ /* This is the first lan966x port inside this
+ * bridge
+ */
+ lan966x = port->lan966x;
+ }
+ } else {
+ has_foreign = true;
+ }
+
+ if (lan966x && has_foreign) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bridging lan966x ports with foreign interfaces disallowed");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int lan966x_bridge_check(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ return lan966x_foreign_bridging_check(info->upper_dev,
+ info->info.extack);
+}
+
+static int lan966x_netdevice_port_event(struct net_device *dev,
+ struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ if (!lan966x_netdevice_check(dev)) {
+ if (event == NETDEV_CHANGEUPPER)
+ return lan966x_bridge_check(dev, ptr);
+ return 0;
+ }
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ err = lan966x_port_prechangeupper(dev, ptr);
+ break;
+ case NETDEV_CHANGEUPPER:
+ err = lan966x_bridge_check(dev, ptr);
+ if (err)
+ return err;
+
+ err = lan966x_port_changeupper(dev, ptr);
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ ret = lan966x_netdevice_port_event(dev, nb, event, ptr);
+
+ return notifier_from_errno(ret);
+}
+
+static bool lan966x_foreign_dev_check(const struct net_device *dev,
+ const struct net_device *foreign_dev)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (netif_is_bridge_master(foreign_dev))
+ if (lan966x->bridge != foreign_dev)
+ return true;
+
+ return false;
+}
+
+static int lan966x_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ err = switchdev_handle_fdb_event_to_device(dev, event, ptr,
+ lan966x_netdevice_check,
+ lan966x_foreign_dev_check,
+ lan966x_handle_fdb,
+ NULL);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int lan966x_handle_port_vlan_add(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ /* When adding a port to a vlan, we get a callback for the port but
+ * also for the bridge. When get the callback for the bridge just bail
+ * out. Then when the bridge is added to the vlan, then we get a
+ * callback here but in this case the flags has set:
+ * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
+ * port is added to the vlan, so the broadcast frames and unicast frames
+ * with dmac of the bridge should be foward to CPU.
+ */
+ if (netif_is_bridge_master(obj->orig_dev) &&
+ !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_add_vlan(port, v->vid,
+ v->flags & BRIDGE_VLAN_INFO_PVID,
+ v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ else
+ lan966x_vlan_cpu_add_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_add(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_add(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_add(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_handle_port_vlan_del(struct lan966x_port *port,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!netif_is_bridge_master(obj->orig_dev))
+ lan966x_vlan_port_del_vlan(port, v->vid);
+ else
+ lan966x_vlan_cpu_del_vlan(lan966x, v->vid);
+
+ return 0;
+}
+
+static int lan966x_handle_port_obj_del(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ int err;
+
+ if (ctx && ctx != port)
+ return 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = lan966x_handle_port_vlan_del(port, obj);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = lan966x_handle_port_mdb_del(port, obj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int lan966x_switchdev_blocking_event(struct notifier_block *nb,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_handle_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ lan966x_netdevice_check,
+ lan966x_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lan966x_netdevice_nb __read_mostly = {
+ .notifier_call = lan966x_netdevice_event,
+};
+
+static struct notifier_block lan966x_switchdev_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_event,
+};
+
+static struct notifier_block lan966x_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = lan966x_switchdev_blocking_event,
+};
+
+void lan966x_register_notifier_blocks(void)
+{
+ register_netdevice_notifier(&lan966x_netdevice_nb);
+ register_switchdev_notifier(&lan966x_switchdev_nb);
+ register_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+}
+
+void lan966x_unregister_notifier_blocks(void)
+{
+ unregister_switchdev_blocking_notifier(&lan966x_switchdev_blocking_nb);
+ unregister_switchdev_notifier(&lan966x_switchdev_nb);
+ unregister_netdevice_notifier(&lan966x_netdevice_nb);
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
new file mode 100644
index 000000000000..8d7260cd7da9
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+
+#define VLANACCESS_CMD_IDLE 0
+#define VLANACCESS_CMD_READ 1
+#define VLANACCESS_CMD_WRITE 2
+#define VLANACCESS_CMD_INIT 3
+
+static int lan966x_vlan_get_status(struct lan966x *lan966x)
+{
+ return lan_rd(lan966x, ANA_VLANACCESS);
+}
+
+static int lan966x_vlan_wait_for_completion(struct lan966x *lan966x)
+{
+ u32 val;
+
+ return readx_poll_timeout(lan966x_vlan_get_status,
+ lan966x, val,
+ (val & ANA_VLANACCESS_VLAN_TBL_CMD) ==
+ VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+}
+
+static void lan966x_vlan_set_mask(struct lan966x *lan966x, u16 vid)
+{
+ u16 mask = lan966x->vlan_mask[vid];
+ bool cpu_dis;
+
+ cpu_dis = !(mask & BIT(CPU_PORT));
+
+ /* Set flags and the VID to configure */
+ lan_rmw(ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) |
+ ANA_VLANTIDX_V_INDEX_SET(vid),
+ ANA_VLANTIDX_VLAN_PGID_CPU_DIS |
+ ANA_VLANTIDX_V_INDEX,
+ lan966x, ANA_VLANTIDX);
+
+ /* Set the vlan port members mask */
+ lan_rmw(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(mask),
+ ANA_VLAN_PORT_MASK_VLAN_PORT_MASK,
+ lan966x, ANA_VLAN_PORT_MASK);
+
+ /* Issue a write command */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_WRITE),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+
+ if (lan966x_vlan_wait_for_completion(lan966x))
+ dev_err(lan966x->dev, "Vlan set mask failed\n");
+}
+
+static void lan966x_vlan_port_add_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] |= BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_port_del_vlan_mask(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u8 p = port->chip_port;
+
+ lan966x->vlan_mask[vid] &= ~BIT(p);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static bool lan966x_vlan_port_any_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return !!(lan966x->vlan_mask[vid] & ~BIT(CPU_PORT));
+}
+
+static void lan966x_vlan_cpu_add_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] |= BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_del_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ lan966x->vlan_mask[vid] &= ~BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, vid);
+}
+
+static void lan966x_vlan_cpu_add_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __set_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static void lan966x_vlan_cpu_del_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ __clear_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+bool lan966x_vlan_cpu_member_cpu_vlan_mask(struct lan966x *lan966x, u16 vid)
+{
+ return test_bit(vid, lan966x->cpu_vlan_mask);
+}
+
+static u16 lan966x_vlan_port_get_pvid(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ if (!(lan966x->bridge_mask & BIT(port->chip_port)))
+ return HOST_PVID;
+
+ return port->vlan_aware ? port->pvid : UNAWARE_PVID;
+}
+
+int lan966x_vlan_port_set_vid(struct lan966x_port *port, u16 vid,
+ bool pvid, bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* Egress vlan classification */
+ if (untagged && port->vid != vid) {
+ if (port->vid) {
+ dev_err(lan966x->dev,
+ "Port already has a native VLAN: %d\n",
+ port->vid);
+ return -EBUSY;
+ }
+ port->vid = vid;
+ }
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
+ return 0;
+}
+
+static void lan966x_vlan_port_remove_vid(struct lan966x_port *port, u16 vid)
+{
+ if (port->pvid == vid)
+ port->pvid = 0;
+
+ if (port->vid == vid)
+ port->vid = 0;
+}
+
+void lan966x_vlan_port_set_vlan_aware(struct lan966x_port *port,
+ bool vlan_aware)
+{
+ port->vlan_aware = vlan_aware;
+}
+
+void lan966x_vlan_port_apply(struct lan966x_port *port)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u16 pvid;
+ u32 val;
+
+ pvid = lan966x_vlan_port_get_pvid(port);
+
+ /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Default vlan to classify for untagged frames (may be zero) */
+ val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
+ if (port->vlan_aware)
+ val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1);
+
+ lan_rmw(val,
+ ANA_VLAN_CFG_VLAN_VID | ANA_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_VLAN_CFG_VLAN_POP_CNT,
+ lan966x, ANA_VLAN_CFG(port->chip_port));
+
+ /* Drop frames with multicast source address */
+ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1);
+ if (port->vlan_aware && !pvid)
+ /* If port is vlan-aware and tagged, drop untagged and priority
+ * tagged frames.
+ */
+ val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) |
+ ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1);
+
+ lan_wr(val, lan966x, ANA_DROP_CFG(port->chip_port));
+
+ /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
+ val = REW_TAG_CFG_TAG_TPID_CFG_SET(0);
+ if (port->vlan_aware) {
+ if (port->vid)
+ /* Tag all frames except when VID == DEFAULT_VLAN */
+ val |= REW_TAG_CFG_TAG_CFG_SET(1);
+ else
+ val |= REW_TAG_CFG_TAG_CFG_SET(3);
+ }
+
+ /* Update only some bits in the register */
+ lan_rmw(val,
+ REW_TAG_CFG_TAG_TPID_CFG | REW_TAG_CFG_TAG_CFG,
+ lan966x, REW_TAG_CFG(port->chip_port));
+
+ /* Set default VLAN and tag type to 8021Q */
+ lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021Q) |
+ REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
+ REW_PORT_VLAN_CFG_PORT_TPID |
+ REW_PORT_VLAN_CFG_PORT_VID,
+ lan966x, REW_PORT_VLAN_CFG(port->chip_port));
+}
+
+void lan966x_vlan_port_add_vlan(struct lan966x_port *port,
+ u16 vid,
+ bool pvid,
+ bool untagged)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ /* If the CPU(br) is already part of the vlan then add the fdb
+ * entries in MAC table to copy the frames to the CPU(br).
+ * If the CPU(br) is not part of the vlan then it would
+ * just drop the frames.
+ */
+ if (lan966x_vlan_cpu_member_cpu_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_port_set_vid(port, vid, pvid, untagged);
+ lan966x_vlan_port_add_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+}
+
+void lan966x_vlan_port_del_vlan(struct lan966x_port *port, u16 vid)
+{
+ struct lan966x *lan966x = port->lan966x;
+
+ lan966x_vlan_port_remove_vid(port, vid);
+ lan966x_vlan_port_del_vlan_mask(port, vid);
+ lan966x_vlan_port_apply(port);
+
+ /* In case there are no other ports in vlan then remove the CPU from
+ * that vlan but still keep it in the mask because it may be needed
+ * again then another port gets added in that vlan
+ */
+ if (!lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+ }
+}
+
+void lan966x_vlan_cpu_add_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Add an entry in the MAC table for the CPU
+ * Add the CPU part of the vlan only if there is another port in that
+ * vlan otherwise all the broadcast frames in that vlan will go to CPU
+ * even if none of the ports are in the vlan and then the CPU will just
+ * need to discard these frames. It is required to store this
+ * information so when a front port is added then it would add also the
+ * CPU port.
+ */
+ if (lan966x_vlan_port_any_vlan_mask(lan966x, vid)) {
+ lan966x_vlan_cpu_add_vlan_mask(lan966x, vid);
+ lan966x_mdb_write_entries(lan966x, vid);
+ }
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, vid);
+ lan966x_fdb_write_entries(lan966x, vid);
+}
+
+void lan966x_vlan_cpu_del_vlan(struct lan966x *lan966x, u16 vid)
+{
+ /* Remove the CPU part of the vlan */
+ lan966x_vlan_cpu_del_cpu_vlan_mask(lan966x, vid);
+ lan966x_vlan_cpu_del_vlan_mask(lan966x, vid);
+ lan966x_fdb_erase_entries(lan966x, vid);
+ lan966x_mdb_erase_entries(lan966x, vid);
+}
+
+void lan966x_vlan_init(struct lan966x *lan966x)
+{
+ u16 port, vid;
+
+ /* Clear VLAN table, by default all ports are members of all VLANS */
+ lan_rmw(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT),
+ ANA_VLANACCESS_VLAN_TBL_CMD,
+ lan966x, ANA_VLANACCESS);
+ lan966x_vlan_wait_for_completion(lan966x);
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ lan966x->vlan_mask[vid] = 0;
+ lan966x_vlan_set_mask(lan966x, vid);
+ }
+
+ /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */
+ lan966x->vlan_mask[HOST_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, HOST_PVID);
+
+ lan966x->vlan_mask[UNAWARE_PVID] =
+ GENMASK(lan966x->num_phys_ports - 1, 0) | BIT(CPU_PORT);
+ lan966x_vlan_set_mask(lan966x, UNAWARE_PVID);
+
+ lan966x_vlan_cpu_add_cpu_vlan_mask(lan966x, UNAWARE_PVID);
+
+ /* Configure the CPU port to be vlan aware */
+ lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(0) |
+ ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) |
+ ANA_VLAN_CFG_VLAN_POP_CNT_SET(1),
+ lan966x, ANA_VLAN_CFG(CPU_PORT));
+
+ /* Set vlan ingress filter mask to all ports */
+ lan_wr(GENMASK(lan966x->num_phys_ports, 0),
+ lan966x, ANA_VLANMASK);
+
+ for (port = 0; port < lan966x->num_phys_ports; port++) {
+ lan_wr(0, lan966x, REW_PORT_VLAN_CFG(port));
+ lan_wr(0, lan966x, REW_TAG_CFG(port));
+ }
+}