diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/core.c | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/core.h | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | 372 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/pci.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/pci.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/reg.h | 790 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1210 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 154 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 765 |
11 files changed, 3164 insertions, 280 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index e36e12219c9b..ce26adcb4988 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -10,6 +10,14 @@ config MLXSW_CORE To compile this driver as a module, choose M here: the module will be called mlxsw_core. +config MLXSW_CORE_HWMON + bool "HWMON support for Mellanox Technologies Switch ASICs" + depends on MLXSW_CORE && HWMON + depends on !(MLXSW_CORE=y && HWMON=m) + default y + ---help--- + Say Y here if you want to expose HWMON interface on mlxsw devices. + config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE @@ -33,7 +41,7 @@ config MLXSW_SWITCHX2 config MLXSW_SPECTRUM tristate "Mellanox Technologies Spectrum support" - depends on MLXSW_CORE && NET_SWITCHDEV + depends on MLXSW_CORE && NET_SWITCHDEV && VLAN_8021Q default m ---help--- This driver supports Mellanox Technologies Spectrum Ethernet diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index af015818fd19..584cac444852 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o +mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o mlxsw_pci-objs := pci.o obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 97f0d93caf99..22379eb8e924 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -105,6 +105,10 @@ struct mlxsw_core { struct debugfs_blob_wrapper vsd_blob; struct debugfs_blob_wrapper psid_blob; } dbg; + struct { + u8 *mapping; /* lag_id+port_index to local_port mapping */ + } lag; + struct mlxsw_hwmon *hwmon; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; @@ -814,6 +818,17 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_alloc_stats; } + if (mlxsw_driver->profile->used_max_lag && + mlxsw_driver->profile->used_max_port_per_lag) { + alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag * + mlxsw_driver->profile->max_port_per_lag; + mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_core->lag.mapping) { + err = -ENOMEM; + goto err_alloc_lag_mapping; + } + } + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); if (err) goto err_bus_init; @@ -822,6 +837,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; + err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); + if (err) + goto err_hwmon_init; + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, mlxsw_bus_info); if (err) @@ -836,10 +855,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core->driver_priv); err_driver_init: +err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: mlxsw_bus->fini(bus_priv); err_bus_init: + kfree(mlxsw_core->lag.mapping); +err_alloc_lag_mapping: free_percpu(mlxsw_core->pcpu_stats); err_alloc_stats: kfree(mlxsw_core); @@ -857,6 +879,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) mlxsw_core->driver->fini(mlxsw_core->driver_priv); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); + kfree(mlxsw_core->lag.mapping); free_percpu(mlxsw_core->pcpu_stats); kfree(mlxsw_core); mlxsw_core_driver_put(device_kind); @@ -1188,11 +1211,25 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; struct mlxsw_core_pcpu_stats *pcpu_stats; - u8 local_port = rx_info->sys_port; + u8 local_port; bool found = false; - dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n", - __func__, rx_info->sys_port, rx_info->trap_id); + if (rx_info->is_lag) { + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n", + __func__, rx_info->u.lag_id, + rx_info->trap_id); + /* Upper layer does not care if the skb came from LAG or not, + * so just get the local_port for the lag port and push it up. + */ + local_port = mlxsw_core_lag_mapping_get(mlxsw_core, + rx_info->u.lag_id, + rx_info->lag_port_index); + } else { + local_port = rx_info->u.sys_port; + } + + dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n", + __func__, local_port, rx_info->trap_id); if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || (local_port >= MLXSW_PORT_MAX_PORTS)) @@ -1236,6 +1273,48 @@ drop: } EXPORT_SYMBOL(mlxsw_core_skb_receive); +static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) +{ + return mlxsw_core->driver->profile->max_port_per_lag * lag_id + + port_index; +} + +void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index, u8 local_port) +{ + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, port_index); + + mlxsw_core->lag.mapping[index] = local_port; +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_set); + +u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index) +{ + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, port_index); + + return mlxsw_core->lag.mapping[index]; +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_get); + +void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 local_port) +{ + int i; + + for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) { + int index = mlxsw_core_lag_mapping_index(mlxsw_core, + lag_id, i); + + if (mlxsw_core->lag.mapping[index] == local_port) + mlxsw_core->lag.mapping[index] = 0; + } +} +EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); + int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 807827350a89..a01723600f0a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -112,13 +112,25 @@ int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload); struct mlxsw_rx_info { - u16 sys_port; + bool is_lag; + union { + u16 sys_port; + u16 lag_id; + } u; + u8 lag_port_index; int trap_id; }; void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, struct mlxsw_rx_info *rx_info); +void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index, u8 local_port); +u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 port_index); +void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, + u16 lag_id, u8 local_port); + #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 struct mlxsw_swid_config { @@ -209,4 +221,24 @@ struct mlxsw_bus_info { u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; }; +struct mlxsw_hwmon; + +#ifdef CONFIG_MLXSW_CORE_HWMON + +int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon); +void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon); + +#else + +static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon) +{ + return 0; +} + +#endif + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c new file mode 100644 index 000000000000..1ac8bf187168 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -0,0 +1,372 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/hwmon.h> +#include <linux/err.h> + +#include "core.h" + +#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127 +#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \ + MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX) + +struct mlxsw_hwmon_attr { + struct device_attribute dev_attr; + struct mlxsw_hwmon *hwmon; + unsigned int type_index; + char name[32]; +}; + +struct mlxsw_hwmon { + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + struct device *hwmon_dev; + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1]; + struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT]; + unsigned int attrs_count; +}; + +static ssize_t mlxsw_hwmon_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, + false, false); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + return sprintf(buf, "%u\n", temp); +} + +static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned int temp_max; + int err; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, + false, false); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n"); + return err; + } + mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL); + return sprintf(buf, "%u\n", temp_max); +} + +static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + if (val != 1) + return -EINVAL; + + mlxsw_reg_mtmp_pack(mtmp_pl, mlwsw_hwmon_attr->type_index, true, true); + err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n"); + return err; + } + return len; +} + +static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsm_pl[MLXSW_REG_MFSM_LEN]; + int err; + + mlxsw_reg_mfsm_pack(mfsm_pl, mlwsw_hwmon_attr->type_index); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsm), mfsm_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); + return err; + } + return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl)); +} + +static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + int err; + + mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, 0); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query PWM\n"); + return err; + } + return sprintf(buf, "%u\n", + mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl)); +} + +static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mfsc_pl[MLXSW_REG_MFSC_LEN]; + unsigned long val; + int err; + + err = kstrtoul(buf, 10, &val); + if (err) + return err; + if (val > 255) + return -EINVAL; + + mlxsw_reg_mfsc_pack(mfsc_pl, mlwsw_hwmon_attr->type_index, val); + err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n"); + return err; + } + return len; +} + +enum mlxsw_hwmon_attr_type { + MLXSW_HWMON_ATTR_TYPE_TEMP, + MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, + MLXSW_HWMON_ATTR_TYPE_TEMP_RST, + MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + MLXSW_HWMON_ATTR_TYPE_PWM, +}; + +static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, + enum mlxsw_hwmon_attr_type attr_type, + unsigned int type_index, unsigned int num) { + struct mlxsw_hwmon_attr *mlxsw_hwmon_attr; + unsigned int attr_index; + + attr_index = mlxsw_hwmon->attrs_count; + mlxsw_hwmon_attr = &mlxsw_hwmon->hwmon_attrs[attr_index]; + + switch (attr_type) { + case MLXSW_HWMON_ATTR_TYPE_TEMP: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_highest", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_RST: + mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_reset_history", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_FAN_RPM: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "fan%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_PWM: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; + mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; + mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "pwm%u", num + 1); + break; + default: + WARN_ON(1); + } + + mlxsw_hwmon_attr->type_index = type_index; + mlxsw_hwmon_attr->hwmon = mlxsw_hwmon; + mlxsw_hwmon_attr->dev_attr.attr.name = mlxsw_hwmon_attr->name; + sysfs_attr_init(&mlxsw_hwmon_attr->dev_attr.attr); + + mlxsw_hwmon->attrs[attr_index] = &mlxsw_hwmon_attr->dev_attr.attr; + mlxsw_hwmon->attrs_count++; +} + +static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + char mtcap_pl[MLXSW_REG_MTCAP_LEN]; + char mtmp_pl[MLXSW_REG_MTMP_LEN]; + u8 sensor_count; + int i; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtcap), mtcap_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); + return err; + } + sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < sensor_count; i++) { + mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true); + err = mlxsw_reg_write(mlxsw_hwmon->core, + MLXSW_REG(mtmp), mtmp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to setup temp sensor number %d\n", + i); + return err; + } + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, i, i); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_RST, i, i); + } + return 0; +} + +static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + char mfcr_pl[MLXSW_REG_MFCR_LEN]; + enum mlxsw_reg_mfcr_pwm_frequency freq; + unsigned int type_index; + unsigned int num; + u16 tacho_active; + u8 pwm_active; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mfcr), mfcr_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get to probe PWMs and Tachometers\n"); + return err; + } + mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); + num = 0; + for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { + if (tacho_active & BIT(type_index)) + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + type_index, num++); + } + num = 0; + for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { + if (pwm_active & BIT(type_index)) + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_PWM, + type_index, num++); + } + return 0; +} + +int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info, + struct mlxsw_hwmon **p_hwmon) +{ + struct mlxsw_hwmon *mlxsw_hwmon; + struct device *hwmon_dev; + int err; + + mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon), + GFP_KERNEL); + if (!mlxsw_hwmon) + return -ENOMEM; + mlxsw_hwmon->core = mlxsw_core; + mlxsw_hwmon->bus_info = mlxsw_bus_info; + + err = mlxsw_hwmon_temp_init(mlxsw_hwmon); + if (err) + goto err_temp_init; + + err = mlxsw_hwmon_fans_init(mlxsw_hwmon); + if (err) + goto err_fans_init; + + mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; + mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; + + hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev, + "mlxsw", + mlxsw_hwmon, + mlxsw_hwmon->groups); + if (IS_ERR(hwmon_dev)) { + err = PTR_ERR(hwmon_dev); + goto err_hwmon_register; + } + + mlxsw_hwmon->hwmon_dev = hwmon_dev; + *p_hwmon = mlxsw_hwmon; + return 0; + +err_hwmon_register: +err_fans_init: +err_temp_init: + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index de69e719dc9d..c071077aafbd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -384,7 +384,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, /* Set CQ of same number of this SDQ. */ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); - mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7); + mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -686,11 +686,15 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - /* We do not support lag now */ - if (mlxsw_pci_cqe_lag_get(cqe)) - goto drop; + if (mlxsw_pci_cqe_lag_get(cqe)) { + rx_info.is_lag = true; + rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); + rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); + } else { + rx_info.is_lag = false; + rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); + } - rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); byte_count = mlxsw_pci_cqe_byte_count_get(cqe); @@ -699,7 +703,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); -put_new_skb: memset(wqe, 0, q->elem_size); err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); if (err) @@ -708,10 +711,6 @@ put_new_skb: q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return; - -drop: - dev_kfree_skb_any(skb); - goto put_new_skb; } static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 142f33d978c5..912106054ff2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -129,13 +129,15 @@ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); */ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); -/* pci_cqe_system_port +/* pci_cqe_system_port/lag_id * When lag=0: System port on which the packet was received * When lag=1: * bits [15:4] LAG ID on which the packet was received * bits [3:0] sub_port on which the packet was received */ MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); +MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); +MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); /* pci_cqe_wqe_counter * WQE count of the WQEs completed on the associated dqn diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 236fb5d2ad69..0c5237264e3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -99,6 +99,55 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); +/* SMID - Switch Multicast ID + * -------------------------- + * The MID record maps from a MID (Multicast ID), which is a unique identifier + * of the multicast group within the stacking domain, into a list of local + * ports into which the packet is replicated. + */ +#define MLXSW_REG_SMID_ID 0x2007 +#define MLXSW_REG_SMID_LEN 0x240 + +static const struct mlxsw_reg_info mlxsw_reg_smid = { + .id = MLXSW_REG_SMID_ID, + .len = MLXSW_REG_SMID_LEN, +}; + +/* reg_smid_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); + +/* reg_smid_mid + * Multicast identifier - global identifier that represents the multicast group + * across all devices. + * Access: Index + */ +MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); + +/* reg_smid_port + * Local port memebership (1 bit per port). + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); + +/* reg_smid_port_mask + * Local port mask (1 bit per port). + * Access: W + */ +MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); + +static inline void mlxsw_reg_smid_pack(char *payload, u16 mid, + u8 port, bool set) +{ + MLXSW_REG_ZERO(smid, payload); + mlxsw_reg_smid_swid_set(payload, 0); + mlxsw_reg_smid_mid_set(payload, mid); + mlxsw_reg_smid_port_set(payload, port, set); + mlxsw_reg_smid_port_mask_set(payload, port, 1); +} + /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -286,6 +335,8 @@ MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, enum mlxsw_reg_sfd_rec_type { MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, + MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG = 0x1, + MLXSW_REG_SFD_REC_TYPE_MULTICAST = 0x2, }; /* reg_sfd_rec_type @@ -376,36 +427,144 @@ MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, MLXSW_REG_SFD_REC_LEN, 0x0C, false); -static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, - enum mlxsw_reg_sfd_rec_policy policy, - const char *mac, u16 vid, - enum mlxsw_reg_sfd_rec_action action, - u8 local_port) +static inline void mlxsw_reg_sfd_rec_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_type rec_type, + const char *mac, + enum mlxsw_reg_sfd_rec_action action) { u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload); if (rec_index >= num_rec) mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); - mlxsw_reg_sfd_rec_type_set(payload, rec_index, - MLXSW_REG_SFD_REC_TYPE_UNICAST); - mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_rec_type_set(payload, rec_index, rec_type); mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); - mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); - mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid); mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); +} + +static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, + u8 local_port) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST, mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, fid_vid); mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); } static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, - char *mac, u16 *p_vid, + char *mac, u16 *p_fid_vid, u8 *p_local_port) { mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); - *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); + *p_fid_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); } +/* reg_sfd_uc_lag_sub_port + * LAG sub port. + * Must be 0 if multichannel VEPA is not enabled. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_lag_fid_vid + * Filtering ID or VLAN ID + * For SwitchX and SwitchX-2: + * - Dynamic entries (policy 2,3) use FID + * - Static entries (policy 0) use VID + * - When independent learning is configured, VID=FID + * For Spectrum: use FID for both Dynamic and Static entries. + * VID should not be used. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_lag_lag_vid + * Indicates VID in case of vFIDs. Reserved for FIDs. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_vid, MLXSW_REG_SFD_BASE_LEN, 16, 12, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_lag_lag_id + * LAG Identifier - pointer into the LAG descriptor table. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_lag_lag_id, MLXSW_REG_SFD_BASE_LEN, 0, 10, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_uc_lag_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 lag_vid, + u16 lag_id) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG, + mac, action); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_uc_lag_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_lag_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_uc_lag_lag_vid_set(payload, rec_index, lag_vid); + mlxsw_reg_sfd_uc_lag_lag_id_set(payload, rec_index, lag_id); +} + +static inline void mlxsw_reg_sfd_uc_lag_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u16 *p_lag_id) +{ + mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfd_uc_lag_fid_vid_get(payload, rec_index); + *p_lag_id = mlxsw_reg_sfd_uc_lag_lag_id_get(payload, rec_index); +} + +/* reg_sfd_mc_pgi + * + * Multicast port group index - index into the port group table. + * Value 0x1FFF indicates the pgi should point to the MID entry. + * For Spectrum this value must be set to 0x1FFF + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_pgi, MLXSW_REG_SFD_BASE_LEN, 16, 13, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_fid_vid + * + * Filtering ID or VLAN ID + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_mc_mid + * + * Multicast identifier - global identifier that represents the multicast + * group across all devices. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, mc_mid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void +mlxsw_reg_sfd_mc_pack(char *payload, int rec_index, + const char *mac, u16 fid_vid, + enum mlxsw_reg_sfd_rec_action action, u16 mid) +{ + mlxsw_reg_sfd_rec_pack(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_MULTICAST, mac, action); + mlxsw_reg_sfd_mc_pgi_set(payload, rec_index, 0x1FFF); + mlxsw_reg_sfd_mc_fid_vid_set(payload, rec_index, fid_vid); + mlxsw_reg_sfd_mc_mid_set(payload, rec_index, mid); +} + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -456,8 +615,12 @@ MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8, enum mlxsw_reg_sfn_rec_type { /* MAC addresses learned on a regular port. */ MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5, - /* Aged-out MAC address on a regular port */ + /* MAC addresses learned on a LAG port. */ + MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG = 0x6, + /* Aged-out MAC address on a regular port. */ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, + /* Aged-out MAC address on a LAG port. */ + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8, }; /* reg_sfn_rec_type @@ -505,6 +668,22 @@ static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index); } +/* reg_sfn_mac_lag_lag_id + * LAG ID (pointer into the LAG descriptor table). + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_lag_lag_id, MLXSW_REG_SFN_BASE_LEN, 0, 10, + MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u16 *p_lag_id) +{ + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); + *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. @@ -865,6 +1044,293 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_port_mask_set(payload, port, 1); } +/* SLDR - Switch LAG Descriptor Register + * ----------------------------------------- + * The switch LAG descriptor register is populated by LAG descriptors. + * Each LAG descriptor is indexed by lag_id. The LAG ID runs from 0 to + * max_lag-1. + */ +#define MLXSW_REG_SLDR_ID 0x2014 +#define MLXSW_REG_SLDR_LEN 0x0C /* counting in only one port in list */ + +static const struct mlxsw_reg_info mlxsw_reg_sldr = { + .id = MLXSW_REG_SLDR_ID, + .len = MLXSW_REG_SLDR_LEN, +}; + +enum mlxsw_reg_sldr_op { + /* Indicates a creation of a new LAG-ID, lag_id must be valid */ + MLXSW_REG_SLDR_OP_LAG_CREATE, + MLXSW_REG_SLDR_OP_LAG_DESTROY, + /* Ports that appear in the list have the Distributor enabled */ + MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST, + /* Removes ports from the disributor list */ + MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST, +}; + +/* reg_sldr_op + * Operation. + * Access: RW + */ +MLXSW_ITEM32(reg, sldr, op, 0x00, 29, 3); + +/* reg_sldr_lag_id + * LAG identifier. The lag_id is the index into the LAG descriptor table. + * Access: Index + */ +MLXSW_ITEM32(reg, sldr, lag_id, 0x00, 0, 10); + +static inline void mlxsw_reg_sldr_lag_create_pack(char *payload, u8 lag_id) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_CREATE); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); +} + +static inline void mlxsw_reg_sldr_lag_destroy_pack(char *payload, u8 lag_id) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_DESTROY); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); +} + +/* reg_sldr_num_ports + * The number of member ports of the LAG. + * Reserved for Create / Destroy operations + * For Add / Remove operations - indicates the number of ports in the list. + * Access: RW + */ +MLXSW_ITEM32(reg, sldr, num_ports, 0x04, 24, 8); + +/* reg_sldr_system_port + * System port. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sldr, system_port, 0x08, 0, 16, 4, 0, false); + +static inline void mlxsw_reg_sldr_lag_add_port_pack(char *payload, u8 lag_id, + u8 local_port) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_ADD_PORT_LIST); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); + mlxsw_reg_sldr_num_ports_set(payload, 1); + mlxsw_reg_sldr_system_port_set(payload, 0, local_port); +} + +static inline void mlxsw_reg_sldr_lag_remove_port_pack(char *payload, u8 lag_id, + u8 local_port) +{ + MLXSW_REG_ZERO(sldr, payload); + mlxsw_reg_sldr_op_set(payload, MLXSW_REG_SLDR_OP_LAG_REMOVE_PORT_LIST); + mlxsw_reg_sldr_lag_id_set(payload, lag_id); + mlxsw_reg_sldr_num_ports_set(payload, 1); + mlxsw_reg_sldr_system_port_set(payload, 0, local_port); +} + +/* SLCR - Switch LAG Configuration 2 Register + * ------------------------------------------- + * The Switch LAG Configuration register is used for configuring the + * LAG properties of the switch. + */ +#define MLXSW_REG_SLCR_ID 0x2015 +#define MLXSW_REG_SLCR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_slcr = { + .id = MLXSW_REG_SLCR_ID, + .len = MLXSW_REG_SLCR_LEN, +}; + +enum mlxsw_reg_slcr_pp { + /* Global Configuration (for all ports) */ + MLXSW_REG_SLCR_PP_GLOBAL, + /* Per port configuration, based on local_port field */ + MLXSW_REG_SLCR_PP_PER_PORT, +}; + +/* reg_slcr_pp + * Per Port Configuration + * Note: Reading at Global mode results in reading port 1 configuration. + * Access: Index + */ +MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1); + +/* reg_slcr_local_port + * Local port number + * Supported from CPU port + * Not supported from router port + * Reserved when pp = Global Configuration + * Access: Index + */ +MLXSW_ITEM32(reg, slcr, local_port, 0x00, 16, 8); + +enum mlxsw_reg_slcr_type { + MLXSW_REG_SLCR_TYPE_CRC, /* default */ + MLXSW_REG_SLCR_TYPE_XOR, + MLXSW_REG_SLCR_TYPE_RANDOM, +}; + +/* reg_slcr_type + * Hash type + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, type, 0x00, 0, 4); + +/* Ingress port */ +#define MLXSW_REG_SLCR_LAG_HASH_IN_PORT BIT(0) +/* SMAC - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_SMAC_IP BIT(1) +/* SMAC - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP BIT(2) +#define MLXSW_REG_SLCR_LAG_HASH_SMAC \ + (MLXSW_REG_SLCR_LAG_HASH_SMAC_IP | \ + MLXSW_REG_SLCR_LAG_HASH_SMAC_NONIP) +/* DMAC - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_DMAC_IP BIT(3) +/* DMAC - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP BIT(4) +#define MLXSW_REG_SLCR_LAG_HASH_DMAC \ + (MLXSW_REG_SLCR_LAG_HASH_DMAC_IP | \ + MLXSW_REG_SLCR_LAG_HASH_DMAC_NONIP) +/* Ethertype - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP BIT(5) +/* Ethertype - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP BIT(6) +#define MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE \ + (MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_IP | \ + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE_NONIP) +/* VLAN ID - for IPv4 and IPv6 packets */ +#define MLXSW_REG_SLCR_LAG_HASH_VLANID_IP BIT(7) +/* VLAN ID - for non-IP packets */ +#define MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP BIT(8) +#define MLXSW_REG_SLCR_LAG_HASH_VLANID \ + (MLXSW_REG_SLCR_LAG_HASH_VLANID_IP | \ + MLXSW_REG_SLCR_LAG_HASH_VLANID_NONIP) +/* Source IP address (can be IPv4 or IPv6) */ +#define MLXSW_REG_SLCR_LAG_HASH_SIP BIT(9) +/* Destination IP address (can be IPv4 or IPv6) */ +#define MLXSW_REG_SLCR_LAG_HASH_DIP BIT(10) +/* TCP/UDP source port */ +#define MLXSW_REG_SLCR_LAG_HASH_SPORT BIT(11) +/* TCP/UDP destination port*/ +#define MLXSW_REG_SLCR_LAG_HASH_DPORT BIT(12) +/* IPv4 Protocol/IPv6 Next Header */ +#define MLXSW_REG_SLCR_LAG_HASH_IPPROTO BIT(13) +/* IPv6 Flow label */ +#define MLXSW_REG_SLCR_LAG_HASH_FLOWLABEL BIT(14) +/* SID - FCoE source ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_SID BIT(15) +/* DID - FCoE destination ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_DID BIT(16) +/* OXID - FCoE originator exchange ID */ +#define MLXSW_REG_SLCR_LAG_HASH_FCOE_OXID BIT(17) +/* Destination QP number - for RoCE packets */ +#define MLXSW_REG_SLCR_LAG_HASH_ROCE_DQP BIT(19) + +/* reg_slcr_lag_hash + * LAG hashing configuration. This is a bitmask, in which each set + * bit includes the corresponding item in the LAG hash calculation. + * The default lag_hash contains SMAC, DMAC, VLANID and + * Ethertype (for all packet types). + * Access: RW + */ +MLXSW_ITEM32(reg, slcr, lag_hash, 0x04, 0, 20); + +static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash) +{ + MLXSW_REG_ZERO(slcr, payload); + mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL); + mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR); + mlxsw_reg_slcr_lag_hash_set(payload, lag_hash); +} + +/* SLCOR - Switch LAG Collector Register + * ------------------------------------- + * The Switch LAG Collector register controls the Local Port membership + * in a LAG and enablement of the collector. + */ +#define MLXSW_REG_SLCOR_ID 0x2016 +#define MLXSW_REG_SLCOR_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_slcor = { + .id = MLXSW_REG_SLCOR_ID, + .len = MLXSW_REG_SLCOR_LEN, +}; + +enum mlxsw_reg_slcor_col { + /* Port is added with collector disabled */ + MLXSW_REG_SLCOR_COL_LAG_ADD_PORT, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_DISABLED, + MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT, +}; + +/* reg_slcor_col + * Collector configuration + * Access: RW + */ +MLXSW_ITEM32(reg, slcor, col, 0x00, 30, 2); + +/* reg_slcor_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, slcor, local_port, 0x00, 16, 8); + +/* reg_slcor_lag_id + * LAG Identifier. Index into the LAG descriptor table. + * Access: Index + */ +MLXSW_ITEM32(reg, slcor, lag_id, 0x00, 0, 10); + +/* reg_slcor_port_index + * Port index in the LAG list. Only valid on Add Port to LAG col. + * Valid range is from 0 to cap_max_lag_members-1 + * Access: RW + */ +MLXSW_ITEM32(reg, slcor, port_index, 0x04, 0, 10); + +static inline void mlxsw_reg_slcor_pack(char *payload, + u8 local_port, u16 lag_id, + enum mlxsw_reg_slcor_col col) +{ + MLXSW_REG_ZERO(slcor, payload); + mlxsw_reg_slcor_col_set(payload, col); + mlxsw_reg_slcor_local_port_set(payload, local_port); + mlxsw_reg_slcor_lag_id_set(payload, lag_id); +} + +static inline void mlxsw_reg_slcor_port_add_pack(char *payload, + u8 local_port, u16 lag_id, + u8 port_index) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_ADD_PORT); + mlxsw_reg_slcor_port_index_set(payload, port_index); +} + +static inline void mlxsw_reg_slcor_port_remove_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_REMOVE_PORT); +} + +static inline void mlxsw_reg_slcor_col_enable_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); +} + +static inline void mlxsw_reg_slcor_col_disable_pack(char *payload, + u8 local_port, u16 lag_id) +{ + mlxsw_reg_slcor_pack(payload, local_port, lag_id, + MLXSW_REG_SLCOR_COL_LAG_COLLECTOR_ENABLED); +} + /* SPMLR - Switch Port MAC Learning Register * ----------------------------------------- * Controls the Switch MAC learning policy per port. @@ -2087,6 +2553,284 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* MFCR - Management Fan Control Register + * -------------------------------------- + * This register controls the settings of the Fan Speed PWM mechanism. + */ +#define MLXSW_REG_MFCR_ID 0x9001 +#define MLXSW_REG_MFCR_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfcr = { + .id = MLXSW_REG_MFCR_ID, + .len = MLXSW_REG_MFCR_LEN, +}; + +enum mlxsw_reg_mfcr_pwm_frequency { + MLXSW_REG_MFCR_PWM_FEQ_11HZ = 0x00, + MLXSW_REG_MFCR_PWM_FEQ_14_7HZ = 0x01, + MLXSW_REG_MFCR_PWM_FEQ_22_1HZ = 0x02, + MLXSW_REG_MFCR_PWM_FEQ_1_4KHZ = 0x40, + MLXSW_REG_MFCR_PWM_FEQ_5KHZ = 0x41, + MLXSW_REG_MFCR_PWM_FEQ_20KHZ = 0x42, + MLXSW_REG_MFCR_PWM_FEQ_22_5KHZ = 0x43, + MLXSW_REG_MFCR_PWM_FEQ_25KHZ = 0x44, +}; + +/* reg_mfcr_pwm_frequency + * Controls the frequency of the PWM signal. + * Access: RW + */ +MLXSW_ITEM32(reg, mfcr, pwm_frequency, 0x00, 0, 6); + +#define MLXSW_MFCR_TACHOS_MAX 10 + +/* reg_mfcr_tacho_active + * Indicates which of the tachometer is active (bit per tachometer). + * Access: RO + */ +MLXSW_ITEM32(reg, mfcr, tacho_active, 0x04, 16, MLXSW_MFCR_TACHOS_MAX); + +#define MLXSW_MFCR_PWMS_MAX 5 + +/* reg_mfcr_pwm_active + * Indicates which of the PWM control is active (bit per PWM). + * Access: RO + */ +MLXSW_ITEM32(reg, mfcr, pwm_active, 0x04, 0, MLXSW_MFCR_PWMS_MAX); + +static inline void +mlxsw_reg_mfcr_pack(char *payload, + enum mlxsw_reg_mfcr_pwm_frequency pwm_frequency) +{ + MLXSW_REG_ZERO(mfcr, payload); + mlxsw_reg_mfcr_pwm_frequency_set(payload, pwm_frequency); +} + +static inline void +mlxsw_reg_mfcr_unpack(char *payload, + enum mlxsw_reg_mfcr_pwm_frequency *p_pwm_frequency, + u16 *p_tacho_active, u8 *p_pwm_active) +{ + *p_pwm_frequency = mlxsw_reg_mfcr_pwm_frequency_get(payload); + *p_tacho_active = mlxsw_reg_mfcr_tacho_active_get(payload); + *p_pwm_active = mlxsw_reg_mfcr_pwm_active_get(payload); +} + +/* MFSC - Management Fan Speed Control Register + * -------------------------------------------- + * This register controls the settings of the Fan Speed PWM mechanism. + */ +#define MLXSW_REG_MFSC_ID 0x9002 +#define MLXSW_REG_MFSC_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfsc = { + .id = MLXSW_REG_MFSC_ID, + .len = MLXSW_REG_MFSC_LEN, +}; + +/* reg_mfsc_pwm + * Fan pwm to control / monitor. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsc, pwm, 0x00, 24, 3); + +/* reg_mfsc_pwm_duty_cycle + * Controls the duty cycle of the PWM. Value range from 0..255 to + * represent duty cycle of 0%...100%. + * Access: RW + */ +MLXSW_ITEM32(reg, mfsc, pwm_duty_cycle, 0x04, 0, 8); + +static inline void mlxsw_reg_mfsc_pack(char *payload, u8 pwm, + u8 pwm_duty_cycle) +{ + MLXSW_REG_ZERO(mfsc, payload); + mlxsw_reg_mfsc_pwm_set(payload, pwm); + mlxsw_reg_mfsc_pwm_duty_cycle_set(payload, pwm_duty_cycle); +} + +/* MFSM - Management Fan Speed Measurement + * --------------------------------------- + * This register controls the settings of the Tacho measurements and + * enables reading the Tachometer measurements. + */ +#define MLXSW_REG_MFSM_ID 0x9003 +#define MLXSW_REG_MFSM_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mfsm = { + .id = MLXSW_REG_MFSM_ID, + .len = MLXSW_REG_MFSM_LEN, +}; + +/* reg_mfsm_tacho + * Fan tachometer index. + * Access: Index + */ +MLXSW_ITEM32(reg, mfsm, tacho, 0x00, 24, 4); + +/* reg_mfsm_rpm + * Fan speed (round per minute). + * Access: RO + */ +MLXSW_ITEM32(reg, mfsm, rpm, 0x04, 0, 16); + +static inline void mlxsw_reg_mfsm_pack(char *payload, u8 tacho) +{ + MLXSW_REG_ZERO(mfsm, payload); + mlxsw_reg_mfsm_tacho_set(payload, tacho); +} + +/* MTCAP - Management Temperature Capabilities + * ------------------------------------------- + * This register exposes the capabilities of the device and + * system temperature sensing. + */ +#define MLXSW_REG_MTCAP_ID 0x9009 +#define MLXSW_REG_MTCAP_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mtcap = { + .id = MLXSW_REG_MTCAP_ID, + .len = MLXSW_REG_MTCAP_LEN, +}; + +/* reg_mtcap_sensor_count + * Number of sensors supported by the device. + * This includes the QSFP module sensors (if exists in the QSFP module). + * Access: RO + */ +MLXSW_ITEM32(reg, mtcap, sensor_count, 0x00, 0, 7); + +/* MTMP - Management Temperature + * ----------------------------- + * This register controls the settings of the temperature measurements + * and enables reading the temperature measurements. Note that temperature + * is in 0.125 degrees Celsius. + */ +#define MLXSW_REG_MTMP_ID 0x900A +#define MLXSW_REG_MTMP_LEN 0x20 + +static const struct mlxsw_reg_info mlxsw_reg_mtmp = { + .id = MLXSW_REG_MTMP_ID, + .len = MLXSW_REG_MTMP_LEN, +}; + +/* reg_mtmp_sensor_index + * Sensors index to access. + * 64-127 of sensor_index are mapped to the SFP+/QSFP modules sequentially + * (module 0 is mapped to sensor_index 64). + * Access: Index + */ +MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 7); + +/* Convert to milli degrees Celsius */ +#define MLXSW_REG_MTMP_TEMP_TO_MC(val) (val * 125) + +/* reg_mtmp_temperature + * Temperature reading from the sensor. Reading is in 0.125 Celsius + * degrees units. + * Access: RO + */ +MLXSW_ITEM32(reg, mtmp, temperature, 0x04, 0, 16); + +/* reg_mtmp_mte + * Max Temperature Enable - enables measuring the max temperature on a sensor. + * Access: RW + */ +MLXSW_ITEM32(reg, mtmp, mte, 0x08, 31, 1); + +/* reg_mtmp_mtr + * Max Temperature Reset - clears the value of the max temperature register. + * Access: WO + */ +MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1); + +/* reg_mtmp_max_temperature + * The highest measured temperature from the sensor. + * When the bit mte is cleared, the field max_temperature is reserved. + * Access: RO + */ +MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16); + +#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8 + +/* reg_mtmp_sensor_name + * Sensor Name + * Access: RO + */ +MLXSW_ITEM_BUF(reg, mtmp, sensor_name, 0x18, MLXSW_REG_MTMP_SENSOR_NAME_SIZE); + +static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index, + bool max_temp_enable, + bool max_temp_reset) +{ + MLXSW_REG_ZERO(mtmp, payload); + mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); + mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); + mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); +} + +static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, + unsigned int *p_max_temp, + char *sensor_name) +{ + u16 temp; + + if (p_temp) { + temp = mlxsw_reg_mtmp_temperature_get(payload); + *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (p_max_temp) { + temp = mlxsw_reg_mtmp_max_temperature_get(payload); + *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (sensor_name) + mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); +} + +/* MLCR - Management LED Control Register + * -------------------------------------- + * Controls the system LEDs. + */ +#define MLXSW_REG_MLCR_ID 0x902B +#define MLXSW_REG_MLCR_LEN 0x0C + +static const struct mlxsw_reg_info mlxsw_reg_mlcr = { + .id = MLXSW_REG_MLCR_ID, + .len = MLXSW_REG_MLCR_LEN, +}; + +/* reg_mlcr_local_port + * Local port number. + * Access: RW + */ +MLXSW_ITEM32(reg, mlcr, local_port, 0x00, 16, 8); + +#define MLXSW_REG_MLCR_DURATION_MAX 0xFFFF + +/* reg_mlcr_beacon_duration + * Duration of the beacon to be active, in seconds. + * 0x0 - Will turn off the beacon. + * 0xFFFF - Will turn on the beacon until explicitly turned off. + * Access: RW + */ +MLXSW_ITEM32(reg, mlcr, beacon_duration, 0x04, 0, 16); + +/* reg_mlcr_beacon_remain + * Remaining duration of the beacon, in seconds. + * 0xFFFF indicates an infinite amount of time. + * Access: RO + */ +MLXSW_ITEM32(reg, mlcr, beacon_remain, 0x08, 0, 16); + +static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port, + bool active) +{ + MLXSW_REG_ZERO(mlcr, payload); + mlxsw_reg_mlcr_local_port_set(payload, local_port); + mlxsw_reg_mlcr_beacon_duration_set(payload, active ? + MLXSW_REG_MLCR_DURATION_MAX : 0); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -2357,6 +3101,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SGCR"; case MLXSW_REG_SPAD_ID: return "SPAD"; + case MLXSW_REG_SMID_ID: + return "SMID"; case MLXSW_REG_SSPR_ID: return "SSPR"; case MLXSW_REG_SFDAT_ID: @@ -2375,6 +3121,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; + case MLXSW_REG_SLDR_ID: + return "SLDR"; + case MLXSW_REG_SLCR_ID: + return "SLCR"; + case MLXSW_REG_SLCOR_ID: + return "SLCOR"; case MLXSW_REG_SPMLR_ID: return "SPMLR"; case MLXSW_REG_SVFA_ID: @@ -2405,6 +3157,18 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_MFCR_ID: + return "MFCR"; + case MLXSW_REG_MFSC_ID: + return "MFSC"; + case MLXSW_REG_MFSM_ID: + return "MFSM"; + case MLXSW_REG_MTCAP_ID: + return "MTCAP"; + case MLXSW_REG_MTMP_ID: + return "MTMP"; + case MLXSW_REG_MLCR_ID: + return "MLCR"; case MLXSW_REG_SBPR_ID: return "SBPR"; case MLXSW_REG_SBCM_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3be4a2355ead..ce6845d534a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -48,6 +48,7 @@ #include <linux/workqueue.h> #include <linux/jiffies.h> #include <linux/bitops.h> +#include <linux/list.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -186,33 +187,6 @@ static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - int err; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - - if (err) - return err; - - set_bit(vfid, mlxsw_sp->active_vfids); - return 0; -} - -static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - clear_bit(vfid, mlxsw_sp->active_vfids); - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned char *addr) { @@ -417,6 +391,10 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static void mlxsw_sp_set_rx_mode(struct net_device *dev) +{ +} + static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -545,12 +523,132 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static struct mlxsw_sp_vfid * +mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { + if (vfid->vid == vid) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, + MLXSW_SP_VFID_PORT_MAX); +} + +static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + u16 vid) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->vid = vid; + + list_add(&vfid->list, &mlxsw_sp->port_vfids.list); + set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_vfid *vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); + if (!mlxsw_sp_vport) + return NULL; + + /* dev will be set correctly after the VLAN device is linked + * with the real device. In case of bridge SELF invocation, dev + * will remain as is. + */ + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; + mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; + mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; + mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; + mlxsw_sp_vport->vport.vfid = vfid; + mlxsw_sp_vport->vport.vid = vfid->vid; + + list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); + + return mlxsw_sp_vport; +} + +static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + list_del(&mlxsw_sp_vport->vport.list); + kfree(mlxsw_sp_vport); +} + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *sftr_pl; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is added to HW filter when device goes up, but it is @@ -559,100 +657,105 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (test_bit(vid, mlxsw_sp_port->active_vfids)) { + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { netdev_warn(dev, "VID=%d already configured\n", vid); return 0; } - if (!test_bit(vid, mlxsw_sp->active_vfids)) { - err = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (err) { - netdev_err(dev, "Failed to create vFID=%d\n", - MLXSW_SP_VFID_BASE + vid); - return err; + vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!vfid) { + vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(vfid); } + } - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) { - err = -ENOMEM; - goto err_flood_table_alloc; - } - mlxsw_reg_sftr_pack(sftr_pl, 0, vid, - MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, - MLXSW_PORT_CPU_PORT, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); - kfree(sftr_pl); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + if (!mlxsw_sp_vport) { + netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); + err = -ENOMEM; + goto err_port_vport_create; + } + + if (!vfid->nr_vports) { + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, + true, false); if (err) { - netdev_err(dev, "Failed to configure flood table\n"); - goto err_flood_table_config; + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_vport_flood_set; } } - /* In case we fail in the following steps, we intentionally do not - * destroy the associated vFID. - */ - /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit * {Port, VID} to FID mappings and set the port's mode to Virtual mode. */ - if (!mlxsw_sp_port->nr_vfids) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to Virtual mode\n"); - return err; + goto err_port_vp_mode_trans; } } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, MLXSW_SP_VFID_BASE + vid, vid); + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); if (err) { netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); goto err_port_vid_to_fid_set; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); if (err) { netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); goto err_port_vid_learning_set; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); goto err_port_add_vid; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_FORWARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); goto err_port_stp_state_set; } - mlxsw_sp_port->nr_vfids++; - set_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports++; return 0; -err_flood_table_config: -err_flood_table_alloc: - mlxsw_sp_vfid_destroy(mlxsw_sp, vid); - return err; - err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - MLXSW_SP_VFID_BASE + vid, vid); + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); err_port_vid_to_fid_set: - mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); +err_port_vp_mode_trans: + if (!vfid->nr_vports) + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); +err_vport_flood_set: + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); +err_port_vport_create: + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; } @@ -660,6 +763,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is removed from HW filter when device goes down, but @@ -668,38 +773,42 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, if (!vid) return 0; - if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { netdev_warn(dev, "VID=%d does not exist\n", vid); return 0; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + vfid = mlxsw_sp_vport->vport.vfid; + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_DISCARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); if (err) { netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, MLXSW_SP_VFID_BASE + vid, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); if (err) { netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); return err; } @@ -707,7 +816,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (mlxsw_sp_port->nr_vfids == 1) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to VLAN mode\n"); @@ -715,8 +824,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, } } - mlxsw_sp_port->nr_vfids--; - clear_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports--; + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); + + /* Destroy the vFID if no vPorts are assigned to it anymore. */ + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); return 0; } @@ -725,6 +838,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, .ndo_change_mtu = mlxsw_sp_port_change_mtu, .ndo_get_stats64 = mlxsw_sp_port_get_stats64, @@ -859,6 +973,29 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, } } +static int mlxsw_sp_port_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mlcr_pl[MLXSW_REG_MLCR_LEN]; + bool active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + active = true; + break; + case ETHTOOL_ID_INACTIVE: + active = false; + break; + default: + return -EOPNOTSUPP; + } + + mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); +} + static void mlxsw_sp_port_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -1205,6 +1342,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlxsw_sp_port_get_strings, + .set_phys_id = mlxsw_sp_port_set_phys_id, .get_ethtool_stats = mlxsw_sp_port_get_stats, .get_sset_count = mlxsw_sp_port_get_sset_count, .get_settings = mlxsw_sp_port_get_settings, @@ -1216,6 +1354,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *dev; bool usable; + size_t bytes; int err; dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1225,10 +1364,18 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; - mlxsw_sp_port->learning = 1; - mlxsw_sp_port->learning_sync = 1; - mlxsw_sp_port->uc_flood = 1; - mlxsw_sp_port->pvid = 1; + bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); + mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->active_vlans) { + err = -ENOMEM; + goto err_port_active_vlans_alloc; + } + mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->untagged_vlans) { + err = -ENOMEM; + goto err_port_untagged_vlans_alloc; + } + INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1330,16 +1477,29 @@ err_port_module_check: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: + kfree(mlxsw_sp_port->untagged_vlans); +err_port_untagged_vlans_alloc: + kfree(mlxsw_sp_port->active_vlans); +err_port_active_vlans_alloc: free_netdev(dev); return err; } -static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) { - u16 vfid; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); + list_for_each_entry_safe(mlxsw_sp_vport, tmp, + &mlxsw_sp_port->vports_list, vport.list) { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + /* vPorts created for VLAN devices should already be gone + * by now, since we unregistered the port netdev. + */ + WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); + mlxsw_sp_port_kill_vid(dev, 0, vid); + } } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) @@ -1348,10 +1508,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) if (!mlxsw_sp_port) return; - mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); + kfree(mlxsw_sp_port->untagged_vlans); + kfree(mlxsw_sp_port->active_vlans); free_netdev(mlxsw_sp_port->dev); } @@ -1633,16 +1795,15 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, enum mlxsw_sp_flood_table flood_table; char sfgc_pl[MLXSW_REG_SFGC_LEN]; - if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - flood_table = 0; - } else { + else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) - flood_table = MLXSW_SP_FLOOD_TABLE_UC; - else - flood_table = MLXSW_SP_FLOOD_TABLE_BM; - } + + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + flood_table = MLXSW_SP_FLOOD_TABLE_UC; + else + flood_table = MLXSW_SP_FLOOD_TABLE_BM; mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, flood_table); @@ -1653,9 +1814,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) { int type, err; - /* For non-offloaded netdevs, flood all traffic types to CPU - * port. - */ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { if (type == MLXSW_REG_SFGC_TYPE_RESERVED) continue; @@ -1664,15 +1822,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); if (err) return err; - } - - /* For bridged ports, use one flooding table for unknown unicast - * traffic and a second table for unregistered multicast and - * broadcast. - */ - for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { - if (type == MLXSW_REG_SFGC_TYPE_RESERVED) - continue; err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); @@ -1683,6 +1832,22 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) +{ + char slcr_pl[MLXSW_REG_SLCR_LEN]; + + mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | + MLXSW_REG_SLCR_LAG_HASH_DMAC | + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | + MLXSW_REG_SLCR_LAG_HASH_VLANID | + MLXSW_REG_SLCR_LAG_HASH_SIP | + MLXSW_REG_SLCR_LAG_HASH_DIP | + MLXSW_REG_SLCR_LAG_HASH_SPORT | + MLXSW_REG_SLCR_LAG_HASH_DPORT | + MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); +} + static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -1691,6 +1856,9 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { @@ -1701,7 +1869,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - goto err_ports_create; + return err; } err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); @@ -1728,6 +1896,12 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, goto err_buffers_init; } + err = mlxsw_sp_lag_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); + goto err_lag_init; + } + err = mlxsw_sp_switchdev_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); @@ -1737,6 +1911,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, return 0; err_switchdev_init: +err_lag_init: err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); @@ -1744,8 +1919,6 @@ err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); err_event_register: mlxsw_sp_ports_remove(mlxsw_sp); -err_ports_create: - mlxsw_sp_vfids_fini(mlxsw_sp); return err; } @@ -1757,18 +1930,17 @@ static void mlxsw_sp_fini(void *priv) mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); - mlxsw_sp_vfids_fini(mlxsw_sp); } static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_lag = 1, - .max_lag = 64, + .max_lag = MLXSW_SP_LAG_MAX, .used_max_port_per_lag = 1, - .max_port_per_lag = 16, + .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, .used_max_mid = 1, - .max_mid = 7000, + .max_mid = MLXSW_SP_MID_MAX, .used_max_pgt = 1, .max_pgt = 0, .used_max_system_port = 1, @@ -1782,8 +1954,8 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .flood_mode = 3, .max_fid_offset_flood_tables = 2, .fid_offset_flood_table_size = VLAN_N_VID - 1, - .max_fid_flood_tables = 1, - .fid_flood_table_size = VLAN_N_VID, + .max_fid_flood_tables = 2, + .fid_flood_table_size = MLXSW_SP_VFID_MAX, .used_max_ib_mc = 1, .max_ib_mc = 0, .used_max_pkey = 1, @@ -1824,24 +1996,29 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) */ err = mlxsw_sp_port_kill_vid(dev, 0, 1); if (err) - netdev_err(dev, "Failed to remove VID 1\n"); + return err; - return err; + mlxsw_sp_port->learning = 1; + mlxsw_sp_port->learning_sync = 1; + mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->bridged = 1; + + return 0; } static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; - int err; + + mlxsw_sp_port->learning = 0; + mlxsw_sp_port->learning_sync = 0; + mlxsw_sp_port->uc_flood = 0; + mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to add VID 1\n"); - - return err; + return mlxsw_sp_port_add_vid(dev, 0, 1); } static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, @@ -1865,19 +2042,293 @@ static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->master_bridge.dev = NULL; } -static int mlxsw_sp_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id, u8 port_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id, port_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + u16 *p_lag_id) +{ + struct mlxsw_sp_upper *lag; + int free_lag_id = -1; + int i; + + for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { + lag = mlxsw_sp_lag_get(mlxsw_sp, i); + if (lag->ref_count) { + if (lag->dev == lag_dev) { + *p_lag_id = i; + return 0; + } + } else if (free_lag_id < 0) { + free_lag_id = i; + } + } + if (free_lag_id < 0) + return -EBUSY; + *p_lag_id = free_lag_id; + return 0; +} + +static bool +mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + struct netdev_lag_upper_info *lag_upper_info) +{ + u16 lag_id; + + if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) + return false; + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return false; + return true; +} + +static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, + u16 lag_id, u8 *p_port_index) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { + *p_port_index = i; + return 0; + } + } + return -EBUSY; +} + +static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id; + u8 port_index; + int err; + + err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); + if (err) + return err; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + if (!lag->ref_count) { + err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); + if (err) + return err; + lag->dev = lag_dev; + } + + err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); + if (err) + return err; + err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); + if (err) + goto err_col_port_add; + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); + if (err) + goto err_col_port_enable; + + mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lag_id = lag_id; + mlxsw_sp_port->lagged = 1; + lag->ref_count++; + return 0; + +err_col_port_add: + if (!lag->ref_count) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); +err_col_port_enable: + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + return err; +} + +static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id = mlxsw_sp_port->lag_id; + int err; + + if (!mlxsw_sp_port->lagged) + return 0; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + WARN_ON(lag->ref_count == 0); + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + if (err) + return err; + err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + if (err) + return err; + + if (lag->ref_count == 1) { + err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); + if (err) + return err; + } + + mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lagged = 0; + lag->ref_count--; + return 0; +} + +static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool lag_tx_enabled) +{ + if (lag_tx_enabled) + return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + else + return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); +} + +static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, + struct netdev_lag_lower_state_info *info) +{ + return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev); + +static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + mlxsw_sp_vport->dev = vlan_dev; + + return 0; +} + +static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + /* When removing a VLAN device while still bridged we should first + * remove it from the bridge, as we receive the bridge's notification + * when the vPort is already gone. + */ + if (mlxsw_sp_vport->bridged) { + struct net_device *br_dev; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + } + + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + + return 0; +} + +static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, + unsigned long event, void *ptr) { - struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; int err; - if (!mlxsw_sp_port_dev_check(dev)) - return NOTIFY_DONE; - mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; info = ptr; @@ -1885,28 +2336,66 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; /* HW limitation forbids to put ports to multiple bridges. */ - if (info->master && info->linking && - netif_is_bridge_master(upper_dev) && + if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) return NOTIFY_BAD; + if (netif_is_lag_master(upper_dev) && + !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, + info->upper_info)) + return NOTIFY_BAD; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (info->master && - netif_is_bridge_master(upper_dev)) { + if (is_vlan_dev(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to link VLAN device\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to unlink VLAN device\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_bridge_master(upper_dev)) { if (info->linking) { err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) + if (err) { netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - mlxsw_sp_port->bridged = 1; } else { err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); - if (err) - netdev_err(dev, "Failed to leave bridge\n"); - mlxsw_sp_port->bridged = 0; mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_lag_master(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_lag_join(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join link aggregation\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave link aggregation\n"); + return NOTIFY_BAD; + } } } break; @@ -1915,6 +2404,443 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } +static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info; + struct mlxsw_sp_port *mlxsw_sp_port; + int err; + + mlxsw_sp_port = netdev_priv(dev); + info = ptr; + + switch (event) { + case NETDEV_CHANGELOWERSTATE: + if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { + err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, + info->lower_state_info); + if (err) + netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); + } + break; + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_port_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + switch (event) { + case NETDEV_PRECHANGEUPPER: + case NETDEV_CHANGEUPPER: + return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); + case NETDEV_CHANGELOWERSTATE: + return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static struct mlxsw_sp_vfid * +mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { + if (vfid->br_dev == br_dev) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) +{ + return vfid - MLXSW_SP_VFID_PORT_MAX; +} + +static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) +{ + return MLXSW_SP_VFID_PORT_MAX + br_vfid; +} + +static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, + MLXSW_SP_VFID_BR_MAX); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); + if (n_vfid == MLXSW_SP_VFID_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->br_dev = br_dev; + + list_add(&vfid->list, &mlxsw_sp->br_vfids.list); + set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + + clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid, *new_vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + WARN_ON(!vfid); + return -EINVAL; + } + + /* We need a vFID to go back to after leaving the bridge's vFID. */ + new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!new_vfid) { + new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(new_vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(new_vfid); + } + } + + /* Invalidate existing {Port, VID} to vFID mapping and create a new + * one for the new vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(new_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + new_vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + if (err) { + netdev_err(dev, "Failed to disable learning\n"); + goto err_port_vid_learning_set; + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); + if (err) { + netdev_err(dev, "Failed clear to clear flooding\n"); + goto err_vport_flood_set; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + new_vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = new_vfid; + vfid->nr_vports--; + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; + + return 0; + +err_vport_flood_set: +err_port_vid_learning_set: +err_port_vid_to_fid_validate: +err_port_vid_to_fid_invalidate: + /* Rollback vFID only if new. */ + if (!new_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + return err; +} + +static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create bridge vFID\n"); + return PTR_ERR(vfid); + } + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + if (err) { + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_port_flood_set; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); + if (err) { + netdev_err(dev, "Failed to enable learning\n"); + goto err_port_vid_learning_set; + } + + /* We need to invalidate existing {Port, VID} to vFID mapping and + * create a new one for the bridge's vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + old_vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = vfid; + old_vfid->nr_vports--; + if (!old_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); + + mlxsw_sp_vport->learning = 1; + mlxsw_sp_vport->learning_sync = 1; + mlxsw_sp_vport->uc_flood = 1; + mlxsw_sp_vport->bridged = 1; + + return 0; + +err_port_vid_to_fid_validate: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); +err_port_vid_to_fid_invalidate: + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); +err_port_vid_learning_set: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); +err_port_flood_set: + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + return err; +} + +static bool +mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, + const struct net_device *br_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + return false; + } + + return true; +} + +static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct net_device *upper_dev; + int err; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; + if (!netif_is_bridge_master(upper_dev)) + return NOTIFY_BAD; + /* We can't have multiple VLAN interfaces configured on + * the same port and being members in the same bridge. + */ + if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, + upper_dev)) + return NOTIFY_BAD; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master) + break; + if (info->linking) { + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return NOTIFY_BAD; + } + err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } + } else { + /* We ignore bridge's unlinking notifications if vPort + * is gone, since we already left the bridge when the + * VLAN device was unlinked from the real device. + */ + if (!mlxsw_sp_vport) + return NOTIFY_DONE; + err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, + vid); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, + unsigned long event, void *ptr) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, + vid); + else if (netif_is_lag_master(real_dev)) + return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, + vid); + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (mlxsw_sp_port_dev_check(dev)) + return mlxsw_sp_netdevice_port_event(dev, event, ptr); + + if (netif_is_lag_master(dev)) + return mlxsw_sp_netdevice_lag_event(dev, event, ptr); + + if (is_vlan_dev(dev)) + return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + + return NOTIFY_DONE; +} + static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4365c8bccc6d..a23dc610d259 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -41,16 +41,73 @@ #include <linux/netdevice.h> #include <linux/bitops.h> #include <linux/if_vlan.h> +#include <linux/list.h> #include <net/switchdev.h> +#include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID +#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ +#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) + +#define MLXSW_SP_LAG_MAX 64 +#define MLXSW_SP_PORT_PER_LAG_MAX 16 + +#define MLXSW_SP_MID_MAX 7000 struct mlxsw_sp_port; +struct mlxsw_sp_upper { + struct net_device *dev; + unsigned int ref_count; +}; + +struct mlxsw_sp_vfid { + struct list_head list; + u16 nr_vports; + u16 vfid; /* Starting at 0 */ + struct net_device *br_dev; + u16 vid; +}; + +struct mlxsw_sp_mid { + struct list_head list; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 mid; + unsigned int ref_count; +}; + +static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid) +{ + return MLXSW_SP_VFID_BASE + vfid; +} + +static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) +{ + return fid - MLXSW_SP_VFID_BASE; +} + +static inline bool mlxsw_sp_fid_is_vfid(u16 fid) +{ + return fid >= MLXSW_SP_VFID_BASE; +} + struct mlxsw_sp { - unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)]; + } port_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; + } br_vfids; + struct { + struct list_head list; + unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; + } br_mids; unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; struct mlxsw_sp_port **ports; struct mlxsw_core *core; @@ -63,12 +120,17 @@ struct mlxsw_sp { } fdb_notify; #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; - struct { - struct net_device *dev; - unsigned int ref_count; - } master_bridge; + struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */ + struct mlxsw_sp_upper master_bridge; + struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; }; +static inline struct mlxsw_sp_upper * +mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + return &mlxsw_sp->lags[lag_id]; +} + struct mlxsw_sp_port_pcpu_stats { u64 rx_packets; u64 rx_bytes; @@ -87,15 +149,87 @@ struct mlxsw_sp_port { u8 learning:1, learning_sync:1, uc_flood:1, - bridged:1; + bridged:1, + lagged:1; u16 pvid; + u16 lag_id; + struct { + struct list_head list; + struct mlxsw_sp_vfid *vfid; + u16 vid; + } vport; /* 802.1Q bridge VLANs */ - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long *active_vlans; + unsigned long *untagged_vlans; /* VLAN interfaces */ - unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; - u16 nr_vfids; + struct list_head vports_list; }; +static inline struct mlxsw_sp_port * +mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u8 local_port; + + local_port = mlxsw_core_lag_mapping_get(mlxsw_sp->core, + lag_id, port_index); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; +} + +static inline bool +mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port->vport.vfid; +} + +static inline struct net_device * +mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vfid->br_dev; +} + +static inline u16 +mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vid; +} + +static inline u16 +mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vfid->vfid; +} + +static inline struct mlxsw_sp_port * +mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid) + return mlxsw_sp_vport; + } + + return NULL; +} + +static inline struct mlxsw_sp_port * +mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid) + return mlxsw_sp_vport; + } + + return NULL; +} + enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BM, @@ -118,5 +252,7 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, + bool set, bool only_uc); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 617fb22b5d81..ffe894e6d287 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -51,12 +51,50 @@ #include "core.h" #include "reg.h" +static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid) +{ + u16 fid = vid; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + fid = mlxsw_sp_vfid_to_fid(vfid); + } + + if (!fid) + fid = mlxsw_sp_port->pvid; + + return fid; +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_orig_get(struct net_device *dev, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid; + + if (!is_vlan_dev(dev)) + return mlxsw_sp_port; + + vid = vlan_dev_vlan_id(dev); + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + WARN_ON(!mlxsw_sp_vport); + + return mlxsw_sp_vport; +} + static int mlxsw_sp_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); @@ -105,8 +143,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!spms_pl) return -ENOMEM; mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); - for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + } else { + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + } err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); kfree(spms_pl); @@ -124,22 +168,38 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } +static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) +{ + return vfid >= MLXSW_SP_VFID_PORT_MAX; +} + static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid_begin, u16 fid_end, bool set, + u16 idx_begin, u16 idx_end, bool set, bool only_uc) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 range = fid_end - fid_begin + 1; + u16 local_port = mlxsw_sp_port->local_port; + enum mlxsw_flood_table_type table_type; + u16 range = idx_end - idx_begin + 1; char *sftr_pl; int err; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; + if (mlxsw_sp_vfid_is_vport_br(idx_begin)) + local_port = mlxsw_sp_port->local_port; + else + local_port = MLXSW_PORT_CPU_PORT; + } else { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + } + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin, - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, - mlxsw_sp_port->local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto buffer_out; @@ -150,9 +210,8 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, if (only_uc) goto buffer_out; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin, - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, - mlxsw_sp_port->local_port, set); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, + table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: @@ -167,6 +226,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, last_visited_vid; int err; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + + return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, + set, true); + } + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, true); @@ -185,6 +251,16 @@ err_port_flood_set: return err; } +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, + bool set, bool only_uc) +{ + /* In case of vFIDs, index into the flooding table is relative to + * the start of the vFIDs range. + */ + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, + only_uc); +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, unsigned long brport_flags) @@ -193,6 +269,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, bool set; int err; + if (!mlxsw_sp_port->bridged) + return -EINVAL; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -237,6 +316,22 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } +static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool vlan_enabled) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + /* SWITCHDEV_TRANS_PREPARE phase */ + if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) { + netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n"); + return -EINVAL; + } + + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -244,6 +339,10 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, @@ -257,6 +356,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, attr->u.ageing_time); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.vlan_filtering); + break; default: err = -EOPNOTSUPP; break; @@ -304,7 +408,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { enum mlxsw_reg_svfa_mt mt; - if (mlxsw_sp_port->nr_vfids) + if (!list_empty(&mlxsw_sp_port->vports_list)) mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; else mt = MLXSW_REG_SVFA_MT_VID_TO_FID; @@ -316,7 +420,7 @@ static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { enum mlxsw_reg_svfa_mt mt; - if (!mlxsw_sp_port->nr_vfids) + if (list_empty(&mlxsw_sp_port->vports_list)) return 0; mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; @@ -342,14 +446,35 @@ err_port_add_vid: return err; } +static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, bool is_member, + bool untagged) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, + is_member, untagged); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; + u16 vid, last_visited_vid, old_pvid; enum mlxsw_reg_svfa_mt mt; - u16 vid, vid_e; int err; /* In case this is invoked with BRIDGE_FLAGS_SELF and port is @@ -377,15 +502,18 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) { netdev_err(dev, "Failed to create FID=VID=%d mapping\n", vid); - return err; + goto err_port_vid_to_fid_set; } } + } - /* Set FID mapping according to port's mode */ + /* Set FID mapping according to port's mode */ + for (vid = vid_begin; vid <= vid_end; vid++) { err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); if (err) { netdev_err(dev, "Failed to map FID=%d", vid); - return err; + last_visited_vid = --vid; + goto err_port_fid_map; } } @@ -393,83 +521,133 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, true, false); if (err) { netdev_err(dev, "Failed to configure flooding\n"); - return err; + goto err_port_flood_set; } - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true, - flag_untagged); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n", - vid, vid_e); - return err; - } + err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, + true, flag_untagged); + if (err) { + netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, + vid_end); + goto err_port_vlans_set; } - vid = vid_begin; - if (flag_pvid && mlxsw_sp_port->pvid != vid) { - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + old_pvid = mlxsw_sp_port->pvid; + if (flag_pvid && old_pvid != vid_begin) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin); if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n", - vid); - return err; + netdev_err(dev, "Unable to add PVID %d\n", vid_begin); + goto err_port_pvid_set; } - mlxsw_sp_port->pvid = vid; + mlxsw_sp_port->pvid = vid_begin; } /* Changing activity bits only if HW operation succeded */ - for (vid = vid_begin; vid <= vid_end; vid++) + for (vid = vid_begin; vid <= vid_end; vid++) { set_bit(vid, mlxsw_sp_port->active_vlans); + if (flag_untagged) + set_bit(vid, mlxsw_sp_port->untagged_vlans); + else + clear_bit(vid, mlxsw_sp_port->untagged_vlans); + } - return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, - mlxsw_sp_port->stp_state); + /* STP state change must be done after we set active VLANs */ + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, + mlxsw_sp_port->stp_state); + if (err) { + netdev_err(dev, "Failed to set STP state\n"); + goto err_port_stp_state_set; + } + + return 0; + +err_port_vid_to_fid_set: + mlxsw_sp_fid_destroy(mlxsw_sp, vid); + return err; + +err_port_stp_state_set: + for (vid = vid_begin; vid <= vid_end; vid++) + clear_bit(vid, mlxsw_sp_port->active_vlans); + if (old_pvid != mlxsw_sp_port->pvid) + mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); +err_port_pvid_set: + __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); +err_port_vlans_set: + __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, + false); +err_port_flood_set: + last_visited_vid = vid_end; +err_port_fid_map: + for (vid = last_visited_vid; vid >= vid_begin; vid--) + mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + return err; } static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { - bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; if (switchdev_trans_ph_prepare(trans)) return 0; return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, - untagged_flag, pvid_flag); + flag_untagged, flag_pvid); +} + +static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) +{ + return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; +} + +static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) +{ + return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : + MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, - const char *mac, u16 vid, bool adding, - bool dynamic) +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + bool dynamic) { - enum mlxsw_reg_sfd_rec_policy policy; - enum mlxsw_reg_sfd_op op; char *sfd_pl; int err; - if (!vid) - vid = mlxsw_sp_port->pvid; + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), + mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, + local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + + return err; +} + +static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, + const char *mac, u16 fid, u16 lag_vid, + bool adding, bool dynamic) +{ + char *sfd_pl; + int err; sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); if (!sfd_pl) return -ENOMEM; - policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : - MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; - op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : - MLXSW_REG_SFD_OP_WRITE_REMOVE; - mlxsw_reg_sfd_pack(sfd_pl, op, 0); - mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, - mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, - mlxsw_sp_port->local_port); - err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), - sfd_pl); + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), + mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, + lag_vid, lag_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; @@ -480,11 +658,162 @@ mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); + u16 lag_vid = 0; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + + if (!mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + fdb->addr, fid, true, false); + else + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->lag_id, + fdb->addr, fid, lag_vid, + true, false); +} + +static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, + u16 fid, u16 mid, bool adding) +{ + char *sfd_pl; + int err; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); + mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, + MLXSW_REG_SFD_REC_ACTION_NOP, mid); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); + kfree(sfd_pl); + return err; +} + +static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, + bool add, bool clear_all_ports) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *smid_pl; + int err, i; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); + if (clear_all_ports) { + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + if (mlxsw_sp->ports[i]) + mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + + list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { + if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) + return mid; + } + return NULL; +} + +static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, + const unsigned char *addr, + u16 vid) +{ + struct mlxsw_sp_mid *mid; + u16 mid_idx; + + mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped, + MLXSW_SP_MID_MAX); + if (mid_idx == MLXSW_SP_MID_MAX) + return NULL; + + mid = kzalloc(sizeof(*mid), GFP_KERNEL); + if (!mid) + return NULL; + + set_bit(mid_idx, mlxsw_sp->br_mids.mapped); + ether_addr_copy(mid->addr, addr); + mid->vid = vid; + mid->mid = mid_idx; + mid->ref_count = 0; + list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); + + return mid; +} + +static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + if (--mid->ref_count == 0) { + list_del(&mid->list); + clear_bit(mid->mid, mlxsw_sp->br_mids.mapped); + kfree(mid); + return 1; + } + return 0; +} + +static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + int err = 0; + if (switchdev_trans_ph_prepare(trans)) return 0; - return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, - true, false); + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to allocate MC group\n"); + return -ENOMEM; + } + } + mid->ref_count++; + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, + mid->ref_count == 1); + if (err) { + netdev_err(dev, "Unable to set SMID\n"); + goto err_out; + } + + if (mid->ref_count == 1) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid, + true); + if (err) { + netdev_err(dev, "Unable to set MC SFD\n"); + goto err_out; + } + } + + return 0; + +err_out: + __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); + return err; } static int mlxsw_sp_port_obj_add(struct net_device *dev, @@ -494,8 +823,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + return 0; + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); @@ -505,6 +841,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_FDB(obj), trans); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; default: err = -EOPNOTSUPP; break; @@ -532,7 +873,7 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool init) { struct net_device *dev = mlxsw_sp_port->dev; - u16 vid, vid_e; + u16 vid, pvid; int err; /* In case this is invoked with BRIDGE_FLAGS_SELF and port is @@ -542,30 +883,23 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, if (!init && !mlxsw_sp_port->bridged) return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false, - false); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n", - vid, vid_e); - return err; - } + err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); + if (err) { + netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin, + vid_end); + return err; } - if ((mlxsw_sp_port->pvid >= vid_begin) && - (mlxsw_sp_port->pvid <= vid_end)) { + pvid = mlxsw_sp_port->pvid; + if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { /* Default VLAN is always 1 */ - mlxsw_sp_port->pvid = 1; - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, - mlxsw_sp_port->pvid); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); if (err) { - netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n", - vid); + netdev_err(dev, "Unable to del PVID %d\n", pvid); return err; } + mlxsw_sp_port->pvid = 1; } if (init) @@ -606,8 +940,54 @@ static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) { - return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, - false, false); + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid); + u16 lag_vid = 0; + + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + + if (!mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + fdb->addr, fid, + false, false); + else + return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->lag_id, + fdb->addr, fid, lag_vid, + false, false); +} + +static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_mid *mid; + u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid); + u16 mid_idx; + int err = 0; + + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + if (!mid) { + netdev_err(dev, "Unable to remove port from MC DB\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + + mid_idx = mid->mid; + if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx, + false); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); + } + + return err; } static int mlxsw_sp_port_obj_del(struct net_device *dev, @@ -616,8 +996,15 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) + return 0; + err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; @@ -625,6 +1012,9 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj)); break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + err = mlxsw_sp_port_mdb_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_MDB(obj)); default: err = -EOPNOTSUPP; break; @@ -633,14 +1023,31 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, return err; } +static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, + u16 lag_id) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (mlxsw_sp_port) + return mlxsw_sp_port; + } + return NULL; +} + static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 vport_vid = 0, vport_fid = 0; char *sfd_pl; char mac[ETH_ALEN]; - u16 vid; + u16 fid; u8 local_port; + u16 lag_id; u8 num_rec; int stored_err = 0; int i; @@ -650,11 +1057,19 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; + mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + u16 tmp; + + tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + vport_fid = mlxsw_sp_vfid_to_fid(tmp); + vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + } + mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); do { mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); - err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(sfd), sfd_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); if (err) goto out; @@ -669,21 +1084,46 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, for (i = 0; i < num_rec; i++) { switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { case MLXSW_REG_SFD_REC_TYPE_UNICAST: - mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid, + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, &local_port); if (local_port == mlxsw_sp_port->local_port) { + if (vport_fid && vport_fid != fid) + continue; + else if (vport_fid) + fdb->vid = vport_vid; + else + fdb->vid = fid; + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + err = cb(&fdb->obj); + if (err) + stored_err = err; + } + break; + case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: + mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, + mac, &fid, &lag_id); + if (mlxsw_sp_port == + mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { + if (vport_fid && vport_fid != fid) + continue; + else if (vport_fid) + fdb->vid = vport_vid; + else + fdb->vid = fid; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; - fdb->vid = vid; err = cb(&fdb->obj); if (err) stored_err = err; } + break; } } } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); out: + mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); kfree(sfd_pl); return stored_err ? stored_err : err; } @@ -695,10 +1135,19 @@ static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid; int err = 0; + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + vlan->flags = 0; + vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + return cb(&vlan->obj); + } + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { vlan->flags = 0; if (vid == mlxsw_sp_port->pvid) vlan->flags |= BRIDGE_VLAN_INFO_PVID; + if (test_bit(vid, mlxsw_sp_port->untagged_vlans)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; vlan->vid_begin = vid; vlan->vid_end = vid; err = cb(&vlan->obj); @@ -715,6 +1164,10 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); int err = 0; + mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port); + if (!mlxsw_sp_port) + return -EINVAL; + switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, @@ -740,6 +1193,21 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; +static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, + bool adding, char *mac, u16 vid, + struct net_device *dev) +{ + struct switchdev_notifier_fdb_info info; + unsigned long notifier_type; + + if (learning && learning_sync) { + info.addr = mac; + info.vid = vid; + notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; + call_switchdev_notifiers(notifier_type, dev, &info.info); + } +} + static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, char *sfn_pl, int rec_index, bool adding) @@ -747,34 +1215,119 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port *mlxsw_sp_port; char mac[ETH_ALEN]; u8 local_port; - u16 vid; + u16 vid, fid; + bool do_notification = true; int err; - mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port); + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port); mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) { dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); - return; + goto just_remove; + } + + if (mlxsw_sp_fid_is_vfid(fid)) { + u16 vfid = mlxsw_sp_fid_to_vfid(fid); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, + vfid); + if (!mlxsw_sp_vport) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); + goto just_remove; + } + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + /* Override the physical port with the vPort. */ + mlxsw_sp_port = mlxsw_sp_vport; + } else { + vid = fid; } - err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, - adding && mlxsw_sp_port->learning, true); + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, + adding, true); if (err) { if (net_ratelimit()) netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); return; } - if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { - struct switchdev_notifier_fdb_info info; - unsigned long notifier_type; + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, + mlxsw_sp_port->learning_sync, + adding, mac, vid, mlxsw_sp_port->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; +} - info.addr = mac; - info.vid = vid; - notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; - call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, - &info.info); +static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index, + bool adding) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char mac[ETH_ALEN]; + u16 lag_vid = 0; + u16 lag_id; + u16 vid, fid; + bool do_notification = true; + int err; + + mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id); + mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); + if (!mlxsw_sp_port) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n"); + goto just_remove; } + + if (mlxsw_sp_fid_is_vfid(fid)) { + u16 vfid = mlxsw_sp_fid_to_vfid(fid); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, + vfid); + if (!mlxsw_sp_vport) { + netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); + goto just_remove; + } + + vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + lag_vid = vid; + /* Override the physical port with the vPort. */ + mlxsw_sp_port = mlxsw_sp_vport; + } else { + vid = fid; + } + + adding = adding && mlxsw_sp_port->learning; + +do_fdb_op: + err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, + adding, true); + if (err) { + if (net_ratelimit()) + netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); + return; + } + + if (!do_notification) + return; + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, + mlxsw_sp_port->learning_sync, + adding, mac, vid, + mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); + return; + +just_remove: + adding = false; + do_notification = false; + goto do_fdb_op; } static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, @@ -789,6 +1342,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, rec_index, false); break; + case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG: + mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, + rec_index, true); + break; + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG: + mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, + rec_index, false); + break; } } @@ -812,6 +1373,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); + mutex_lock(&mlxsw_sp->fdb_lock); do { mlxsw_reg_sfn_pack(sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); @@ -824,6 +1386,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); } while (num_rec); + mutex_unlock(&mlxsw_sp->fdb_lock); kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -838,6 +1401,7 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); return err; } + mutex_init(&mlxsw_sp->fdb_lock); INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -877,7 +1441,8 @@ int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) * with VID 1. */ mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, + true); if (err) { netdev_err(dev, "Unable to init VLANs\n"); return err; |