diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
28 files changed, 3291 insertions, 1445 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 6b6c30deee83..2fb8c6585ac7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ spectrum_kvdl.o spectrum_acl_tcam.o \ - spectrum_acl.o spectrum_flower.o + spectrum_acl.o spectrum_flower.o \ + spectrum_cnt.o spectrum_dpipe.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o mlxsw_minimal-objs := minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index a1b48421648a..479511cf79bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -1043,13 +1043,6 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); */ MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1); -/* cmd_mbox_sw2hw_cq_oi - * When set, overrun ignore is enabled. When set, updates of - * CQ consumer counter (poll for completion) or Request completion - * notifications (Arm CQ) DoorBells should not be rung on that CQ. - */ -MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1); - /* cmd_mbox_sw2hw_cq_st * Event delivery state machine * 0x0 - FIRED @@ -1132,11 +1125,6 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1); -/* cmd_mbox_sw2hw_eq_oi - * When set, overrun ignore is enabled. - */ -MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1); - /* cmd_mbox_sw2hw_eq_st * Event delivery state machine * 0x0 - FIRED diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a4c07841aaf6..affe84eb4bff 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -40,9 +40,6 @@ #include <linux/export.h> #include <linux/err.h> #include <linux/if_link.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <linux/u64_stats_sync.h> #include <linux/netdevice.h> #include <linux/completion.h> #include <linux/skbuff.h> @@ -74,23 +71,9 @@ static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock); static const char mlxsw_core_driver_name[] = "mlxsw_core"; -static struct dentry *mlxsw_core_dbg_root; - static struct workqueue_struct *mlxsw_wq; static struct workqueue_struct *mlxsw_owq; -struct mlxsw_core_pcpu_stats { - u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; - u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; - u64 port_rx_packets[MLXSW_PORT_MAX_PORTS]; - u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS]; - struct u64_stats_sync syncp; - u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX]; - u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS]; - u32 trap_rx_invalid; - u32 port_rx_invalid; -}; - struct mlxsw_core_port { struct devlink_port devlink_port; void *port_driver_priv; @@ -121,23 +104,48 @@ struct mlxsw_core { spinlock_t trans_list_lock; /* protects trans_list writes */ bool use_emad; } emad; - struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; - struct dentry *dbg_dir; - struct { - struct debugfs_blob_wrapper vsd_blob; - struct debugfs_blob_wrapper psid_blob; - } dbg; struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ } lag; struct mlxsw_res res; struct mlxsw_hwmon *hwmon; struct mlxsw_thermal *thermal; - struct mlxsw_core_port ports[MLXSW_PORT_MAX_PORTS]; + struct mlxsw_core_port *ports; + unsigned int max_ports; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ }; +#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40 + +static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core) +{ + /* Switch ports are numbered from 1 to queried value */ + if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT)) + mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core, + MAX_SYSTEM_PORT) + 1; + else + mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1; + + mlxsw_core->ports = kcalloc(mlxsw_core->max_ports, + sizeof(struct mlxsw_core_port), GFP_KERNEL); + if (!mlxsw_core->ports) + return -ENOMEM; + + return 0; +} + +static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core) +{ + kfree(mlxsw_core->ports); +} + +unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->max_ports; +} +EXPORT_SYMBOL(mlxsw_core_max_ports); + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) { return mlxsw_core->driver_priv; @@ -703,91 +711,6 @@ err_out: * Core functions *****************/ -static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_core *mlxsw_core = file->private; - struct mlxsw_core_pcpu_stats *p; - u64 rx_packets, rx_bytes; - u64 tmp_rx_packets, tmp_rx_bytes; - u32 rx_dropped, rx_invalid; - unsigned int start; - int i; - int j; - static const char hdr[] = - " NUM RX_PACKETS RX_BYTES RX_DROPPED\n"; - - seq_printf(file, hdr); - for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) { - rx_packets = 0; - rx_bytes = 0; - rx_dropped = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - do { - start = u64_stats_fetch_begin(&p->syncp); - tmp_rx_packets = p->trap_rx_packets[i]; - tmp_rx_bytes = p->trap_rx_bytes[i]; - } while (u64_stats_fetch_retry(&p->syncp, start)); - - rx_packets += tmp_rx_packets; - rx_bytes += tmp_rx_bytes; - rx_dropped += p->trap_rx_dropped[i]; - } - seq_printf(file, "trap %3d %12llu %12llu %10u\n", - i, rx_packets, rx_bytes, rx_dropped); - } - rx_invalid = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - rx_invalid += p->trap_rx_invalid; - } - seq_printf(file, "trap INV %10u\n", - rx_invalid); - - for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) { - rx_packets = 0; - rx_bytes = 0; - rx_dropped = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - do { - start = u64_stats_fetch_begin(&p->syncp); - tmp_rx_packets = p->port_rx_packets[i]; - tmp_rx_bytes = p->port_rx_bytes[i]; - } while (u64_stats_fetch_retry(&p->syncp, start)); - - rx_packets += tmp_rx_packets; - rx_bytes += tmp_rx_bytes; - rx_dropped += p->port_rx_dropped[i]; - } - seq_printf(file, "port %3d %12llu %12llu %10u\n", - i, rx_packets, rx_bytes, rx_dropped); - } - rx_invalid = 0; - for_each_possible_cpu(j) { - p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); - rx_invalid += p->port_rx_invalid; - } - seq_printf(file, "port INV %10u\n", - rx_invalid); - return 0; -} - -static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f) -{ - struct mlxsw_core *mlxsw_core = inode->i_private; - - return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core); -} - -static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { - .owner = THIS_MODULE, - .open = mlxsw_core_rx_stats_dbg_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek -}; - int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) { spin_lock(&mlxsw_core_driver_list_lock); @@ -835,39 +758,13 @@ static void mlxsw_core_driver_put(const char *kind) spin_unlock(&mlxsw_core_driver_list_lock); } -static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core) -{ - const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; - - mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name, - mlxsw_core_dbg_root); - if (!mlxsw_core->dbg_dir) - return -ENOMEM; - debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir, - mlxsw_core, &mlxsw_core_rx_stats_dbg_ops); - mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd; - mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd); - debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir, - &mlxsw_core->dbg.vsd_blob); - mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid; - mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid); - debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir, - &mlxsw_core->dbg.psid_blob); - return 0; -} - -static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) -{ - debugfs_remove_recursive(mlxsw_core->dbg_dir); -} - static int mlxsw_devlink_port_split(struct devlink *devlink, unsigned int port_index, unsigned int count) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= MLXSW_PORT_MAX_PORTS) + if (port_index >= mlxsw_core->max_ports) return -EINVAL; if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; @@ -879,7 +776,7 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= MLXSW_PORT_MAX_PORTS) + if (port_index >= mlxsw_core->max_ports) return -EINVAL; if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; @@ -1101,18 +998,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, mlxsw_core->bus_priv = bus_priv; mlxsw_core->bus_info = mlxsw_bus_info; - mlxsw_core->pcpu_stats = - netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats); - if (!mlxsw_core->pcpu_stats) { - err = -ENOMEM; - goto err_alloc_stats; - } - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, &mlxsw_core->res); if (err) goto err_bus_init; + err = mlxsw_ports_init(mlxsw_core); + if (err) + goto err_ports_init; + if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) { alloc_size = sizeof(u8) * @@ -1148,15 +1042,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_driver_init; } - err = mlxsw_core_debugfs_init(mlxsw_core); - if (err) - goto err_debugfs_init; - return 0; -err_debugfs_init: - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); err_driver_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: @@ -1167,10 +1054,10 @@ err_devlink_register: err_emad_init: kfree(mlxsw_core->lag.mapping); err_alloc_lag_mapping: + mlxsw_ports_fini(mlxsw_core); +err_ports_init: mlxsw_bus->fini(bus_priv); err_bus_init: - free_percpu(mlxsw_core->pcpu_stats); -err_alloc_stats: devlink_free(devlink); err_devlink_alloc: mlxsw_core_driver_put(device_kind); @@ -1183,15 +1070,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) const char *device_kind = mlxsw_core->bus_info->device_kind; struct devlink *devlink = priv_to_devlink(mlxsw_core); - mlxsw_core_debugfs_fini(mlxsw_core); if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); kfree(mlxsw_core->lag.mapping); + mlxsw_ports_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); - free_percpu(mlxsw_core->pcpu_stats); devlink_free(devlink); mlxsw_core_driver_put(device_kind); } @@ -1639,7 +1525,6 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, { struct mlxsw_rx_listener_item *rxl_item; const struct mlxsw_rx_listener *rxl; - struct mlxsw_core_pcpu_stats *pcpu_stats; u8 local_port; bool found = false; @@ -1661,7 +1546,7 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, __func__, local_port, rx_info->trap_id); if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) || - (local_port >= MLXSW_PORT_MAX_PORTS)) + (local_port >= mlxsw_core->max_ports)) goto drop; rcu_read_lock(); @@ -1678,26 +1563,10 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, if (!found) goto drop; - pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats); - u64_stats_update_begin(&pcpu_stats->syncp); - pcpu_stats->port_rx_packets[local_port]++; - pcpu_stats->port_rx_bytes[local_port] += skb->len; - pcpu_stats->trap_rx_packets[rx_info->trap_id]++; - pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len; - u64_stats_update_end(&pcpu_stats->syncp); - rxl->func(skb, local_port, rxl_item->priv); return; drop: - if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX) - this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid); - else - this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]); - if (local_port >= MLXSW_PORT_MAX_PORTS) - this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid); - else - this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]); dev_kfree_skb(skb); } EXPORT_SYMBOL(mlxsw_core_skb_receive); @@ -1926,15 +1795,8 @@ static int __init mlxsw_core_module_init(void) err = -ENOMEM; goto err_alloc_ordered_workqueue; } - mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); - if (!mlxsw_core_dbg_root) { - err = -ENOMEM; - goto err_debugfs_create_dir; - } return 0; -err_debugfs_create_dir: - destroy_workqueue(mlxsw_owq); err_alloc_ordered_workqueue: destroy_workqueue(mlxsw_wq); return err; @@ -1942,7 +1804,6 @@ err_alloc_ordered_workqueue: static void __exit mlxsw_core_module_exit(void) { - debugfs_remove_recursive(mlxsw_core_dbg_root); destroy_workqueue(mlxsw_owq); destroy_workqueue(mlxsw_wq); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index cf38cf9027f8..7fb35395adf5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -57,6 +57,8 @@ struct mlxsw_driver; struct mlxsw_bus; struct mlxsw_bus_info; +unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core); + void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 5f337715a4da..46304ffb9449 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -567,6 +567,89 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, return oneact + MLXSW_AFA_PAYLOAD_OFFSET; } +/* VLAN Action + * ----------- + * VLAN action is used for manipulating VLANs. It can be used to implement QinQ, + * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs + * and more. + */ + +#define MLXSW_AFA_VLAN_CODE 0x02 +#define MLXSW_AFA_VLAN_SIZE 1 + +enum mlxsw_afa_vlan_vlan_tag_cmd { + MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, + MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG, + MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG, +}; + +enum mlxsw_afa_vlan_cmd { + MLXSW_AFA_VLAN_CMD_NOP, + MLXSW_AFA_VLAN_CMD_SET_OUTER, + MLXSW_AFA_VLAN_CMD_SET_INNER, + MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER, + MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER, + MLXSW_AFA_VLAN_CMD_SWAP, +}; + +/* afa_vlan_vlan_tag_cmd + * Tag command: push, pop, nop VLAN header. + */ +MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3); + +/* afa_vlan_vid_cmd */ +MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3); + +/* afa_vlan_vid */ +MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12); + +/* afa_vlan_ethertype_cmd */ +MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3); + +/* afa_vlan_ethertype + * Index to EtherTypes in Switch VLAN EtherType Register (SVER). + */ +MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3); + +/* afa_vlan_pcp_cmd */ +MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3); + +/* afa_vlan_pcp */ +MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3); + +static inline void +mlxsw_afa_vlan_pack(char *payload, + enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd, + enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid, + enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp, + enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype) +{ + mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd); + mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd); + mlxsw_afa_vlan_vid_set(payload, vid); + mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd); + mlxsw_afa_vlan_pcp_set(payload, pcp); + mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd); + mlxsw_afa_vlan_ethertype_set(payload, ethertype); +} + +int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, + u16 vid, u8 pcp, u8 et) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_VLAN_CODE, + MLXSW_AFA_VLAN_SIZE); + + if (!act) + return -ENOBUFS; + mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, + MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, + MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, + MLXSW_AFA_VLAN_CMD_SET_OUTER, et); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify); + /* Trap / Discard Action * --------------------- * The Trap / Discard action enables trapping / mirroring packets to the CPU @@ -677,3 +760,98 @@ err_append_action: return err; } EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); + +/* Policing and Counting Action + * ---------------------------- + * Policing and Counting action is used for binding policer and counter + * to ACL rules. + */ + +#define MLXSW_AFA_POLCNT_CODE 0x08 +#define MLXSW_AFA_POLCNT_SIZE 1 + +enum mlxsw_afa_polcnt_counter_set_type { + /* No count */ + MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00, + /* Count packets and bytes */ + MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, + /* Count only packets */ + MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05, +}; + +/* afa_polcnt_counter_set_type + * Counter set type for flow counters. + */ +MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8); + +/* afa_polcnt_counter_index + * Counter index for flow counters. + */ +MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24); + +static inline void +mlxsw_afa_polcnt_pack(char *payload, + enum mlxsw_afa_polcnt_counter_set_type set_type, + u32 counter_index) +{ + mlxsw_afa_polcnt_counter_set_type_set(payload, set_type); + mlxsw_afa_polcnt_counter_index_set(payload, counter_index); +} + +int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, + u32 counter_index) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_POLCNT_CODE, + MLXSW_AFA_POLCNT_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES, + counter_index); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_counter); + +/* Virtual Router and Forwarding Domain Action + * ------------------------------------------- + * Virtual Switch action is used for manipulate the Virtual Router (VR), + * MPLS label space and the Forwarding Identifier (FID). + */ + +#define MLXSW_AFA_VIRFWD_CODE 0x0E +#define MLXSW_AFA_VIRFWD_SIZE 1 + +enum mlxsw_afa_virfwd_fid_cmd { + /* Do nothing */ + MLXSW_AFA_VIRFWD_FID_CMD_NOOP, + /* Set the Forwarding Identifier (FID) to fid */ + MLXSW_AFA_VIRFWD_FID_CMD_SET, +}; + +/* afa_virfwd_fid_cmd */ +MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3); + +/* afa_virfwd_fid + * The FID value. + */ +MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16); + +static inline void mlxsw_afa_virfwd_pack(char *payload, + enum mlxsw_afa_virfwd_fid_cmd fid_cmd, + u16 fid) +{ + mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd); + mlxsw_afa_virfwd_fid_set(payload, fid); +} + +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_VIRFWD_CODE, + MLXSW_AFA_VIRFWD_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 43f78dcfe394..bd8b91d02880 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -62,5 +62,10 @@ void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); +int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, + u16 vid, u8 pcp, u8 et); +int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, + u32 counter_index); +int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index e4fcba7c2af2..c75e9141e3ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -54,6 +54,8 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_DST_IP6_LO, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_VID, + MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_MAX, }; @@ -88,7 +90,7 @@ struct mlxsw_afk_element_info { MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \ _element, _offset, 0, _size) -/* For the purpose of the driver, define a internal storage scratchpad +/* For the purpose of the driver, define an internal storage scratchpad * that will be used to store key/mask values. For each defined element type * define an internal storage geometry. */ @@ -98,6 +100,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6), MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16), MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), + MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index a223c85dfde0..23f7d828cf67 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -44,8 +44,6 @@ #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <linux/log2.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> #include <linux/string.h> #include "pci_hw.h" @@ -57,8 +55,6 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; -static struct dentry *mlxsw_pci_dbg_root; - #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) #define mlxsw_pci_read32(mlxsw_pci, reg) \ @@ -71,21 +67,6 @@ enum mlxsw_pci_queue_type { MLXSW_PCI_QUEUE_TYPE_EQ, }; -static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type) -{ - switch (q_type) { - case MLXSW_PCI_QUEUE_TYPE_SDQ: - return "sdq"; - case MLXSW_PCI_QUEUE_TYPE_RDQ: - return "rdq"; - case MLXSW_PCI_QUEUE_TYPE_CQ: - return "cq"; - case MLXSW_PCI_QUEUE_TYPE_EQ: - return "eq"; - } - BUG(); -} - #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 static const u16 mlxsw_pci_doorbell_type_offset[] = { @@ -155,7 +136,6 @@ struct mlxsw_pci { u8 __iomem *hw_addr; struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; u32 doorbell_offset; - struct msix_entry msix_entry; struct mlxsw_core *core; struct { struct mlxsw_pci_mem_item *items; @@ -174,7 +154,6 @@ struct mlxsw_pci { } comp; } cmd; struct mlxsw_bus_info bus_info; - struct dentry *dbg_dir; }; static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) @@ -261,21 +240,11 @@ static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); } -static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ); -} - static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) { return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); } -static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ); -} - static struct mlxsw_pci_queue * __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_queue_type q_type, u8 q_num) @@ -390,26 +359,6 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); } -static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM PROD_COUNT CONS_COUNT COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) { - q = mlxsw_pci_sdq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %5d\n", - i, q->producer_counter, q->consumer_counter, - q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, int index, char *frag_data, size_t frag_len, int direction) @@ -544,26 +493,6 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, } } -static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM PROD_COUNT CONS_COUNT COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) { - q = mlxsw_pci_rdq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %5d\n", - i, q->producer_counter, q->consumer_counter, - q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { @@ -580,7 +509,6 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); - mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -602,27 +530,6 @@ static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); } -static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) { - q = mlxsw_pci_cq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %10d %5d\n", - i, q->consumer_counter, q->u.cq.comp_sdq_count, - q->u.cq.comp_rdq_count, q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, @@ -755,7 +662,6 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, } mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ - mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -777,27 +683,6 @@ static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); } -static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data) -{ - struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); - struct mlxsw_pci_queue *q; - int i; - static const char hdr[] = - "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n"; - - seq_printf(file, hdr); - for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) { - q = mlxsw_pci_eq_get(mlxsw_pci, i); - spin_lock_bh(&q->lock); - seq_printf(file, "%3d %10d %10d %10d %10d %5d\n", - i, q->consumer_counter, q->u.eq.ev_cmd_count, - q->u.eq.ev_comp_count, q->u.eq.ev_other_count, - q->count); - spin_unlock_bh(&q->lock); - } - return 0; -} - static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) { mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); @@ -868,7 +753,6 @@ struct mlxsw_pci_queue_ops { void (*fini)(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q); void (*tasklet)(unsigned long data); - int (*dbg_read)(struct seq_file *s, void *data); u16 elem_count; u8 elem_size; }; @@ -877,7 +761,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_SDQ, .init = mlxsw_pci_sdq_init, .fini = mlxsw_pci_sdq_fini, - .dbg_read = mlxsw_pci_sdq_dbg_read, .elem_count = MLXSW_PCI_WQE_COUNT, .elem_size = MLXSW_PCI_WQE_SIZE, }; @@ -886,7 +769,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_RDQ, .init = mlxsw_pci_rdq_init, .fini = mlxsw_pci_rdq_fini, - .dbg_read = mlxsw_pci_rdq_dbg_read, .elem_count = MLXSW_PCI_WQE_COUNT, .elem_size = MLXSW_PCI_WQE_SIZE }; @@ -896,7 +778,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, .tasklet = mlxsw_pci_cq_tasklet, - .dbg_read = mlxsw_pci_cq_dbg_read, .elem_count = MLXSW_PCI_CQE_COUNT, .elem_size = MLXSW_PCI_CQE_SIZE }; @@ -906,7 +787,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { .init = mlxsw_pci_eq_init, .fini = mlxsw_pci_eq_fini, .tasklet = mlxsw_pci_eq_tasklet, - .dbg_read = mlxsw_pci_eq_dbg_read, .elem_count = MLXSW_PCI_EQE_COUNT, .elem_size = MLXSW_PCI_EQE_SIZE }; @@ -984,9 +864,7 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_pci_queue_ops *q_ops, u8 num_qs) { - struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_type_group *queue_group; - char tmp[16]; int i; int err; @@ -1003,10 +881,6 @@ static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, } queue_group->count = num_qs; - sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type)); - debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir, - q_ops->dbg_read); - return 0; err_queue_init: @@ -1534,7 +1408,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_aqs_init; - err = request_irq(mlxsw_pci->msix_entry.vector, + err = request_irq(pci_irq_vector(pdev, 0), mlxsw_pci_eq_irq_handler, 0, mlxsw_pci->bus_info.device_kind, mlxsw_pci); if (err) { @@ -1567,7 +1441,7 @@ static void mlxsw_pci_fini(void *bus_priv) { struct mlxsw_pci *mlxsw_pci = bus_priv; - free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci); + free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); @@ -1842,8 +1716,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_sw_reset; } - err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1); - if (err) { + err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (err < 0) { dev_err(&pdev->dev, "MSI-X init failed\n"); goto err_msix_init; } @@ -1852,14 +1726,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; - mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name, - mlxsw_pci_dbg_root); - if (!mlxsw_pci->dbg_dir) { - dev_err(&pdev->dev, "Failed to create debugfs dir\n"); - err = -ENOMEM; - goto err_dbg_create_dir; - } - err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus, mlxsw_pci); if (err) { @@ -1870,9 +1736,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: - debugfs_remove_recursive(mlxsw_pci->dbg_dir); -err_dbg_create_dir: - pci_disable_msix(mlxsw_pci->pdev); + pci_free_irq_vectors(mlxsw_pci->pdev); err_msix_init: err_sw_reset: iounmap(mlxsw_pci->hw_addr); @@ -1892,8 +1756,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core); - debugfs_remove_recursive(mlxsw_pci->dbg_dir); - pci_disable_msix(mlxsw_pci->pdev); + pci_free_irq_vectors(mlxsw_pci->pdev); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); @@ -1916,15 +1779,11 @@ EXPORT_SYMBOL(mlxsw_pci_driver_unregister); static int __init mlxsw_pci_module_init(void) { - mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); - if (!mlxsw_pci_dbg_root) - return -ENOMEM; return 0; } static void __exit mlxsw_pci_module_exit(void) { - debugfs_remove_recursive(mlxsw_pci_dbg_root); } module_init(mlxsw_pci_module_init); diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index 3d42146473b3..c580abba8d34 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -49,20 +49,12 @@ #define MLXSW_PORT_MID 0xd000 -#define MLXSW_PORT_MAX_PHY_PORTS 0x40 -#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1) - #define MLXSW_PORT_MAX_IB_PHY_PORTS 36 #define MLXSW_PORT_MAX_IB_PORTS (MLXSW_PORT_MAX_IB_PHY_PORTS + 1) -#define MLXSW_PORT_DEVID_BITS_OFFSET 10 -#define MLXSW_PORT_PHY_BITS_OFFSET 4 -#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) - #define MLXSW_PORT_CPU_PORT 0x0 -#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2) -#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) +#define MLXSW_PORT_DONT_CARE 0xFF #define MLXSW_PORT_MODULE_MAX_WIDTH 4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index d9616daf8a70..83b277c8090e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4125,6 +4125,60 @@ MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16); */ MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12); +/* Shared between ingress/egress */ +enum mlxsw_reg_ritr_counter_set_type { + /* No Count. */ + MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT = 0x0, + /* Basic. Used for router interfaces, counting the following: + * - Error and Discard counters. + * - Unicast, Multicast and Broadcast counters. Sharing the + * same set of counters for the different type of traffic + * (IPv4, IPv6 and mpls). + */ + MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC = 0x9, +}; + +/* reg_ritr_ingress_counter_index + * Counter Index for flow counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ingress_counter_index, 0x38, 0, 24); + +/* reg_ritr_ingress_counter_set_type + * Igress Counter Set Type for router interface counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ingress_counter_set_type, 0x38, 24, 8); + +/* reg_ritr_egress_counter_index + * Counter Index for flow counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, egress_counter_index, 0x3C, 0, 24); + +/* reg_ritr_egress_counter_set_type + * Egress Counter Set Type for router interface counter. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, egress_counter_set_type, 0x3C, 24, 8); + +static inline void mlxsw_reg_ritr_counter_pack(char *payload, u32 index, + bool enable, bool egress) +{ + enum mlxsw_reg_ritr_counter_set_type set_type; + + if (enable) + set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_BASIC; + else + set_type = MLXSW_REG_RITR_COUNTER_SET_TYPE_NO_COUNT; + mlxsw_reg_ritr_egress_counter_set_type_set(payload, set_type); + + if (egress) + mlxsw_reg_ritr_egress_counter_index_set(payload, index); + else + mlxsw_reg_ritr_ingress_counter_index_set(payload, index); +} + static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif) { MLXSW_REG_ZERO(ritr, payload); @@ -4141,7 +4195,8 @@ static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, enum mlxsw_reg_ritr_if_type type, - u16 rif, u16 mtu, const char *mac) + u16 rif, u16 vr_id, u16 mtu, + const char *mac) { bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; @@ -4153,6 +4208,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); + mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); } @@ -4285,6 +4341,129 @@ static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac); } +/* RICNT - Router Interface Counter Register + * ----------------------------------------- + * The RICNT register retrieves per port performance counters + */ +#define MLXSW_REG_RICNT_ID 0x800B +#define MLXSW_REG_RICNT_LEN 0x100 + +MLXSW_REG_DEFINE(ricnt, MLXSW_REG_RICNT_ID, MLXSW_REG_RICNT_LEN); + +/* reg_ricnt_counter_index + * Counter index + * Access: RW + */ +MLXSW_ITEM32(reg, ricnt, counter_index, 0x04, 0, 24); + +enum mlxsw_reg_ricnt_counter_set_type { + /* No Count. */ + MLXSW_REG_RICNT_COUNTER_SET_TYPE_NO_COUNT = 0x00, + /* Basic. Used for router interfaces, counting the following: + * - Error and Discard counters. + * - Unicast, Multicast and Broadcast counters. Sharing the + * same set of counters for the different type of traffic + * (IPv4, IPv6 and mpls). + */ + MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC = 0x09, +}; + +/* reg_ricnt_counter_set_type + * Counter Set Type for router interface counter + * Access: RW + */ +MLXSW_ITEM32(reg, ricnt, counter_set_type, 0x04, 24, 8); + +enum mlxsw_reg_ricnt_opcode { + /* Nop. Supported only for read access*/ + MLXSW_REG_RICNT_OPCODE_NOP = 0x00, + /* Clear. Setting the clr bit will reset the counter value for + * all counters of the specified Router Interface. + */ + MLXSW_REG_RICNT_OPCODE_CLEAR = 0x08, +}; + +/* reg_ricnt_opcode + * Opcode + * Access: RW + */ +MLXSW_ITEM32(reg, ricnt, op, 0x00, 28, 4); + +/* reg_ricnt_good_unicast_packets + * good unicast packets. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_unicast_packets, 0x08, 0, 64); + +/* reg_ricnt_good_multicast_packets + * good multicast packets. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_multicast_packets, 0x10, 0, 64); + +/* reg_ricnt_good_broadcast_packets + * good broadcast packets + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_broadcast_packets, 0x18, 0, 64); + +/* reg_ricnt_good_unicast_bytes + * A count of L3 data and padding octets not including L2 headers + * for good unicast frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_unicast_bytes, 0x20, 0, 64); + +/* reg_ricnt_good_multicast_bytes + * A count of L3 data and padding octets not including L2 headers + * for good multicast frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_multicast_bytes, 0x28, 0, 64); + +/* reg_ritr_good_broadcast_bytes + * A count of L3 data and padding octets not including L2 headers + * for good broadcast frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, good_broadcast_bytes, 0x30, 0, 64); + +/* reg_ricnt_error_packets + * A count of errored frames that do not pass the router checks. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, error_packets, 0x38, 0, 64); + +/* reg_ricnt_discrad_packets + * A count of non-errored frames that do not pass the router checks. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, discard_packets, 0x40, 0, 64); + +/* reg_ricnt_error_bytes + * A count of L3 data and padding octets not including L2 headers + * for errored frames. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, error_bytes, 0x48, 0, 64); + +/* reg_ricnt_discard_bytes + * A count of L3 data and padding octets not including L2 headers + * for non-errored frames that do not pass the router checks. + * Access: RW + */ +MLXSW_ITEM64(reg, ricnt, discard_bytes, 0x50, 0, 64); + +static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index, + enum mlxsw_reg_ricnt_opcode op) +{ + MLXSW_REG_ZERO(ricnt, payload); + mlxsw_reg_ricnt_op_set(payload, op); + mlxsw_reg_ricnt_counter_index_set(payload, index); + mlxsw_reg_ricnt_counter_set_type_set(payload, + MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC); +} + /* RALTA - Router Algorithmic LPM Tree Allocation Register * ------------------------------------------------------- * RALTA is used to allocate the LPM trees of the SHSPM method. @@ -5504,6 +5683,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e, mlxsw_reg_mpsc_rate_set(payload, rate); } +/* MGPC - Monitoring General Purpose Counter Set Register + * The MGPC register retrieves and sets the General Purpose Counter Set. + */ +#define MLXSW_REG_MGPC_ID 0x9081 +#define MLXSW_REG_MGPC_LEN 0x18 + +MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN); + +enum mlxsw_reg_mgpc_counter_set_type { + /* No count */ + MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00, + /* Count packets and bytes */ + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, + /* Count only packets */ + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05, +}; + +/* reg_mgpc_counter_set_type + * Counter set type. + * Access: OP + */ +MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8); + +/* reg_mgpc_counter_index + * Counter index. + * Access: Index + */ +MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24); + +enum mlxsw_reg_mgpc_opcode { + /* Nop */ + MLXSW_REG_MGPC_OPCODE_NOP = 0x00, + /* Clear counters */ + MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08, +}; + +/* reg_mgpc_opcode + * Opcode. + * Access: OP + */ +MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4); + +/* reg_mgpc_byte_counter + * Byte counter value. + * Access: RW + */ +MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64); + +/* reg_mgpc_packet_counter + * Packet counter value. + * Access: RW + */ +MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64); + +static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, + enum mlxsw_reg_mgpc_opcode opcode, + enum mlxsw_reg_mgpc_counter_set_type set_type) +{ + MLXSW_REG_ZERO(mgpc, payload); + mlxsw_reg_mgpc_counter_index_set(payload, counter_index); + mlxsw_reg_mgpc_counter_set_type_set(payload, set_type); + mlxsw_reg_mgpc_opcode_set(payload, opcode); +} + /* SBPR - Shared Buffer Pools Register * ----------------------------------- * The SBPR configures and retrieves the shared buffer pools and configuration. @@ -5960,6 +6203,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rgcr), MLXSW_REG(ritr), MLXSW_REG(ratr), + MLXSW_REG(ricnt), MLXSW_REG(ralta), MLXSW_REG(ralst), MLXSW_REG(raltb), @@ -5977,6 +6221,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mpar), MLXSW_REG(mlcr), MLXSW_REG(mpsc), + MLXSW_REG(mgpc), MLXSW_REG(sbpr), MLXSW_REG(sbcm), MLXSW_REG(sbpm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index bce8c2e00630..9556d934714b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -43,11 +43,15 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, MLXSW_RES_ID_MAX_TRAP_GROUPS, + MLXSW_RES_ID_COUNTER_POOL_SIZE, MLXSW_RES_ID_MAX_SPAN, + MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, + MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC, MLXSW_RES_ID_MAX_SYSTEM_PORT, MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, + MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, MLXSW_RES_ID_ACL_MAX_TCAM_RULES, MLXSW_RES_ID_ACL_MAX_REGIONS, @@ -59,6 +63,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, + MLXSW_RES_ID_MAX_LPM_TREES, /* Internal resources. * Determined by the SW, not queried from the HW. @@ -75,11 +80,15 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, + [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410, [MLXSW_RES_ID_MAX_SPAN] = 0x2420, + [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443, + [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449, [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ + [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, @@ -91,6 +100,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, + [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, }; struct mlxsw_res { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 16484f24b7db..88357cee7679 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -66,6 +66,8 @@ #include "port.h" #include "trap.h" #include "txheader.h" +#include "spectrum_cnt.h" +#include "spectrum_dpipe.h" static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp_driver_version[] = "1.0"; @@ -138,6 +140,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); +int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index, u64 *packets, + u64 *bytes) +{ + char mgpc_pl[MLXSW_REG_MGPC_LEN]; + int err; + + mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP, + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); + if (err) + return err; + *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl); + *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl); + return 0; +} + +static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index) +{ + char mgpc_pl[MLXSW_REG_MGPC_LEN]; + + mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR, + MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl); +} + +int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, + unsigned int *p_counter_index) +{ + int err; + + err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, + p_counter_index); + if (err) + return err; + err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index); + if (err) + goto err_counter_clear; + return 0; + +err_counter_clear: + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, + *p_counter_index); + return err; +} + +void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index) +{ + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW, + counter_index); +} + static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -304,9 +360,10 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) return false; } -static int mlxsw_sp_span_mtu_to_buffsize(int mtu) +static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, + int mtu) { - return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; + return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; } static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) @@ -319,8 +376,9 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) * updated according to the mtu value */ if (mlxsw_sp_span_is_egress_mirror(port)) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, - mlxsw_sp_span_mtu_to_buffsize(mtu)); + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); if (err) { netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); @@ -357,8 +415,10 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, /* if it is an egress SPAN, bind a shared buffer to it */ if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, - mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, + port->dev->mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); if (err) { netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); @@ -745,19 +805,47 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, - bool pause_en, bool pfc_en, u16 delay) +static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, + int mtu) { - u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); + return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); +} - delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : - MLXSW_SP_PAUSE_DELAY; +#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ + +static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, + u16 delay) +{ + delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, + BITS_PER_BYTE)); + return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, + mtu); +} + +/* Maximum delay buffer needed in case of PAUSE frames, in bytes. + * Assumes 100m cable and maximum MTU. + */ +#define MLXSW_SP_PAUSE_DELAY 58752 - if (pause_en || pfc_en) - mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, - pg_size + delay, pg_size); +static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, + u16 delay, bool pfc, bool pause) +{ + if (pfc) + return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); + else if (pause) + return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); + else + return 0; +} + +static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, + bool lossy) +{ + if (lossy) + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); else - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); + mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, + thres); } int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, @@ -778,6 +866,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + bool lossy; + u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -789,7 +879,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, if (!configure) continue; - mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); + + lossy = !(pfc || pause_en); + thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, + pause_en); + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); @@ -966,8 +1061,9 @@ mlxsw_sp_port_get_stats64(struct net_device *dev, memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); } -int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, - u16 vid_end, bool is_member, bool untagged) +static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool is_member, bool untagged) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *spvm_pl; @@ -984,6 +1080,26 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, return err; } +int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, + u16 vid_end, bool is_member, bool untagged) +{ + u16 vid, vid_e; + int err; + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, + is_member, untagged); + if (err) + return err; + } + + return 0; +} + static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) { enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; @@ -1368,7 +1484,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, tc->cls_mall); return 0; default: - return -EINVAL; + return -EOPNOTSUPP; } case TC_SETUP_CLSFLOWER: switch (tc->cls_flower->command) { @@ -1379,6 +1495,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, tc->cls_flower); return 0; + case TC_CLSFLOWER_STATS: + return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, + tc->cls_flower); default: return -EOPNOTSUPP; } @@ -1492,6 +1611,7 @@ err_port_pause_configure: struct mlxsw_sp_port_hw_stats { char str[ETH_GSTRING_LEN]; u64 (*getter)(const char *payload); + bool cells_bytes; }; static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { @@ -1612,17 +1732,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) -static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl) -{ - u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); - - return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); -} - static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { { .str = "tc_transmit_queue_tc", - .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, + .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, + .cells_bytes = true, }, { .str = "tc_no_buffer_discard_uc_tc", @@ -1734,6 +1848,8 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, enum mlxsw_reg_ppcnt_grp grp, int prio, u64 *data, int data_index) { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port_hw_stats *hw_stats; char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; int i, len; @@ -1743,8 +1859,13 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, if (err) return; mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); - for (i = 0; i < len; i++) + for (i = 0; i < len; i++) { data[data_index + i] = hw_stats[i].getter(ppcnt_pl); + if (!hw_stats[i].cells_bytes) + continue; + data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, + data[data_index + i]); + } } static void mlxsw_sp_port_get_stats(struct net_device *dev, @@ -2537,25 +2658,33 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) { int i; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->port_to_module); kfree(mlxsw_sp->ports); } static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); u8 module, width, lane; size_t alloc_size; int i; int err; - alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; + alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports; mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); if (!mlxsw_sp->ports) return -ENOMEM; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL); + if (!mlxsw_sp->port_to_module) { + err = -ENOMEM; + goto err_port_to_module_alloc; + } + + for (i = 1; i < max_ports; i++) { err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, &width, &lane); if (err) @@ -2575,6 +2704,8 @@ err_port_module_info_get: for (i--; i >= 1; i--) if (mlxsw_sp_port_created(mlxsw_sp, i)) mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->port_to_module); +err_port_to_module_alloc: kfree(mlxsw_sp->ports); return err; } @@ -2877,6 +3008,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false), MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), + MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), /* L3 traps */ MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), @@ -3158,6 +3290,18 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); } +static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create); + +static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, true); +} + +static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_vfid_op(mlxsw_sp, MLXSW_SP_DUMMY_FID, false); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -3224,6 +3368,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_acl_init; } + err = mlxsw_sp_counter_pool_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); + goto err_counter_pool_init; + } + + err = mlxsw_sp_dpipe_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); + goto err_dpipe_init; + } + + err = mlxsw_sp_dummy_fid_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init dummy FID\n"); + goto err_dummy_fid_init; + } + err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); @@ -3233,6 +3395,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return 0; err_ports_create: + mlxsw_sp_dummy_fid_fini(mlxsw_sp); +err_dummy_fid_init: + mlxsw_sp_dpipe_fini(mlxsw_sp); +err_dpipe_init: + mlxsw_sp_counter_pool_fini(mlxsw_sp); +err_counter_pool_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); @@ -3255,6 +3423,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_dummy_fid_fini(mlxsw_sp); + mlxsw_sp_dpipe_fini(mlxsw_sp); + mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); @@ -3326,13 +3497,13 @@ bool mlxsw_sp_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; } -static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) +static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data) { - struct mlxsw_sp_port **port = data; + struct mlxsw_sp_port **p_mlxsw_sp_port = data; int ret = 0; if (mlxsw_sp_port_dev_check(lower_dev)) { - *port = netdev_priv(lower_dev); + *p_mlxsw_sp_port = netdev_priv(lower_dev); ret = 1; } @@ -3341,18 +3512,18 @@ static int mlxsw_lower_dev_walk(struct net_device *lower_dev, void *data) static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) { - struct mlxsw_sp_port *port; + struct mlxsw_sp_port *mlxsw_sp_port; if (mlxsw_sp_port_dev_check(dev)) return netdev_priv(dev); - port = NULL; - netdev_walk_all_lower_dev(dev, mlxsw_lower_dev_walk, &port); + mlxsw_sp_port = NULL; + netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port); - return port; + return mlxsw_sp_port; } -static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) +struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port; @@ -3362,15 +3533,16 @@ static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) { - struct mlxsw_sp_port *port; + struct mlxsw_sp_port *mlxsw_sp_port; if (mlxsw_sp_port_dev_check(dev)) return netdev_priv(dev); - port = NULL; - netdev_walk_all_lower_dev_rcu(dev, mlxsw_lower_dev_walk, &port); + mlxsw_sp_port = NULL; + netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk, + &mlxsw_sp_port); - return port; + return mlxsw_sp_port; } struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) @@ -3390,546 +3562,6 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) dev_put(mlxsw_sp_port->dev); } -static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, - unsigned long event) -{ - switch (event) { - case NETDEV_UP: - if (!r) - return true; - r->ref_count++; - return false; - case NETDEV_DOWN: - if (r && --r->ref_count == 0) - return true; - /* It is possible we already removed the RIF ourselves - * if it was assigned to a netdev that is now a bridge - * or LAG slave. - */ - return false; - } - - return false; -} - -static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) -{ - int i; - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) - if (!mlxsw_sp->rifs[i]) - return i; - - return MLXSW_SP_INVALID_RIF; -} - -static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, - bool *p_lagged, u16 *p_system_port) -{ - u8 local_port = mlxsw_sp_vport->local_port; - - *p_lagged = mlxsw_sp_vport->lagged; - *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; -} - -static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *l3_dev, u16 rif, - bool create) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - bool lagged = mlxsw_sp_vport->lagged; - char ritr_pl[MLXSW_REG_RITR_LEN]; - u16 system_port; - - mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, - l3_dev->mtu, l3_dev->dev_addr); - - mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); - mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, - mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); - -static struct mlxsw_sp_fid * -mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) -{ - struct mlxsw_sp_fid *f; - - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - return NULL; - - f->leave = mlxsw_sp_vport_rif_sp_leave; - f->ref_count = 0; - f->dev = l3_dev; - f->fid = fid; - - return f; -} - -static struct mlxsw_sp_rif * -mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) -{ - struct mlxsw_sp_rif *r; - - r = kzalloc(sizeof(*r), GFP_KERNEL); - if (!r) - return NULL; - - INIT_LIST_HEAD(&r->nexthop_list); - INIT_LIST_HEAD(&r->neigh_list); - ether_addr_copy(r->addr, l3_dev->dev_addr); - r->mtu = l3_dev->mtu; - r->ref_count = 1; - r->dev = l3_dev; - r->rif = rif; - r->f = f; - - return r; -} - -static struct mlxsw_sp_rif * -mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *l3_dev) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - struct mlxsw_sp_fid *f; - struct mlxsw_sp_rif *r; - u16 fid, rif; - int err; - - rif = mlxsw_sp_avail_rif_get(mlxsw_sp); - if (rif == MLXSW_SP_INVALID_RIF) - return ERR_PTR(-ERANGE); - - err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); - if (err) - return ERR_PTR(err); - - fid = mlxsw_sp_rif_sp_to_fid(rif); - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); - if (err) - goto err_rif_fdb_op; - - f = mlxsw_sp_rfid_alloc(fid, l3_dev); - if (!f) { - err = -ENOMEM; - goto err_rfid_alloc; - } - - r = mlxsw_sp_rif_alloc(rif, l3_dev, f); - if (!r) { - err = -ENOMEM; - goto err_rif_alloc; - } - - f->r = r; - mlxsw_sp->rifs[rif] = r; - - return r; - -err_rif_alloc: - kfree(f); -err_rfid_alloc: - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); -err_rif_fdb_op: - mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); - return ERR_PTR(err); -} - -static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, - struct mlxsw_sp_rif *r) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - struct net_device *l3_dev = r->dev; - struct mlxsw_sp_fid *f = r->f; - u16 fid = f->fid; - u16 rif = r->rif; - - mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); - - mlxsw_sp->rifs[rif] = NULL; - f->r = NULL; - - kfree(r); - - kfree(f); - - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); - - mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); -} - -static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *l3_dev) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - struct mlxsw_sp_rif *r; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); - if (!r) { - r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); - if (IS_ERR(r)) - return PTR_ERR(r); - } - - mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); - r->f->ref_count++; - - netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); - - return 0; -} - -static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) -{ - struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); - - netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); - - mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); - if (--f->ref_count == 0) - mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); -} - -static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, - struct net_device *port_dev, - unsigned long event, u16 vid) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); - struct mlxsw_sp_port *mlxsw_sp_vport; - - mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (WARN_ON(!mlxsw_sp_vport)) - return -EINVAL; - - switch (event) { - case NETDEV_UP: - return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); - case NETDEV_DOWN: - mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); - break; - } - - return 0; -} - -static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, - unsigned long event) -{ - if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) - return 0; - - return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); -} - -static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, - struct net_device *lag_dev, - unsigned long event, u16 vid) -{ - struct net_device *port_dev; - struct list_head *iter; - int err; - - netdev_for_each_lower_dev(lag_dev, port_dev, iter) { - if (mlxsw_sp_port_dev_check(port_dev)) { - err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, - event, vid); - if (err) - return err; - } - } - - return 0; -} - -static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, - unsigned long event) -{ - if (netif_is_bridge_port(lag_dev)) - return 0; - - return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); -} - -static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev) -{ - u16 fid; - - if (is_vlan_dev(l3_dev)) - fid = vlan_dev_vlan_id(l3_dev); - else if (mlxsw_sp->master_bridge.dev == l3_dev) - fid = 1; - else - return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); - - return mlxsw_sp_fid_find(mlxsw_sp, fid); -} - -static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) -{ - return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : - MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; -} - -static u16 mlxsw_sp_flood_table_index_get(u16 fid) -{ - return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; -} - -static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, - bool set) -{ - enum mlxsw_flood_table_type table_type; - char *sftr_pl; - u16 index; - int err; - - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) - return -ENOMEM; - - table_type = mlxsw_sp_flood_table_type_get(fid); - index = mlxsw_sp_flood_table_index_get(fid); - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, - 1, MLXSW_PORT_ROUTER_PORT, set); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); - - kfree(sftr_pl); - return err; -} - -static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) -{ - if (mlxsw_sp_fid_is_vfid(fid)) - return MLXSW_REG_RITR_FID_IF; - else - return MLXSW_REG_RITR_VLAN_IF; -} - -static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - u16 fid, u16 rif, - bool create) -{ - enum mlxsw_reg_ritr_if_type rif_type; - char ritr_pl[MLXSW_REG_RITR_LEN]; - - rif_type = mlxsw_sp_rif_type_get(fid); - mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, - l3_dev->dev_addr); - mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - struct mlxsw_sp_fid *f) -{ - struct mlxsw_sp_rif *r; - u16 rif; - int err; - - rif = mlxsw_sp_avail_rif_get(mlxsw_sp); - if (rif == MLXSW_SP_INVALID_RIF) - return -ERANGE; - - err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); - if (err) - return err; - - err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); - if (err) - goto err_rif_bridge_op; - - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); - if (err) - goto err_rif_fdb_op; - - r = mlxsw_sp_rif_alloc(rif, l3_dev, f); - if (!r) { - err = -ENOMEM; - goto err_rif_alloc; - } - - f->r = r; - mlxsw_sp->rifs[rif] = r; - - netdev_dbg(l3_dev, "RIF=%d created\n", rif); - - return 0; - -err_rif_alloc: - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); -err_rif_fdb_op: - mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); -err_rif_bridge_op: - mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); - return err; -} - -void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) -{ - struct net_device *l3_dev = r->dev; - struct mlxsw_sp_fid *f = r->f; - u16 rif = r->rif; - - mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r); - - mlxsw_sp->rifs[rif] = NULL; - f->r = NULL; - - kfree(r); - - mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); - - mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); - - mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); - - netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); -} - -static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, - struct net_device *br_dev, - unsigned long event) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); - struct mlxsw_sp_fid *f; - - /* FID can either be an actual FID if the L3 device is the - * VLAN-aware bridge or a VLAN device on top. Otherwise, the - * L3 device is a VLAN-unaware bridge and we get a vFID. - */ - f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); - if (WARN_ON(!f)) - return -EINVAL; - - switch (event) { - case NETDEV_UP: - return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); - case NETDEV_DOWN: - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); - break; - } - - return 0; -} - -static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, - unsigned long event) -{ - struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); - u16 vid = vlan_dev_vlan_id(vlan_dev); - - if (mlxsw_sp_port_dev_check(real_dev)) - return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, - vid); - else if (netif_is_lag_master(real_dev)) - return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, - vid); - else if (netif_is_bridge_master(real_dev) && - mlxsw_sp->master_bridge.dev == real_dev) - return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, - event); - - return 0; -} - -static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; - struct net_device *dev = ifa->ifa_dev->dev; - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_rif *r; - int err = 0; - - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - goto out; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!mlxsw_sp_rif_should_config(r, event)) - goto out; - - if (mlxsw_sp_port_dev_check(dev)) - err = mlxsw_sp_inetaddr_port_event(dev, event); - else if (netif_is_lag_master(dev)) - err = mlxsw_sp_inetaddr_lag_event(dev, event); - else if (netif_is_bridge_master(dev)) - err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); - else if (is_vlan_dev(dev)) - err = mlxsw_sp_inetaddr_vlan_event(dev, event); - -out: - return notifier_from_errno(err); -} - -static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, - const char *mac, int mtu) -{ - char ritr_pl[MLXSW_REG_RITR_LEN]; - int err; - - mlxsw_reg_ritr_rif_pack(ritr_pl, rif); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - if (err) - return err; - - mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); - mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); - mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) -{ - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_rif *r; - int err; - - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - return 0; - - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!r) - return 0; - - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); - if (err) - return err; - - err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); - if (err) - goto err_rif_edit; - - err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); - if (err) - goto err_rif_fdb_op; - - ether_addr_copy(r->addr, dev->dev_addr); - r->mtu = dev->mtu; - - netdev_dbg(dev, "Updated RIF=%d\n", r->rif); - - return 0; - -err_rif_fdb_op: - mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); -err_rif_edit: - mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); - return err; -} - static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, u16 fid) { @@ -4220,7 +3852,7 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, - u16 lag_id) + struct net_device *lag_dev, u16 lag_id) { struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_fid *f; @@ -4238,6 +3870,7 @@ mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_vport->lag_id = lag_id; mlxsw_sp_vport->lagged = 1; + mlxsw_sp_vport->dev = lag_dev; } static void @@ -4254,6 +3887,7 @@ mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) if (f) f->leave(mlxsw_sp_vport); + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; mlxsw_sp_vport->lagged = 0; } @@ -4293,7 +3927,7 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->lagged = 1; lag->ref_count++; - mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); + mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id); return 0; @@ -4403,6 +4037,56 @@ static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_vport->dev = mlxsw_sp_port->dev; } +static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_reg_spms_state spms_state; + char *spms_pl; + u16 vid; + int err; + + spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING : + MLXSW_REG_SPMS_STATE_DISCARDING; + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); + + for (vid = 0; vid < VLAN_N_VID; vid++) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); + if (err) + return err; + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, + true, false); + if (err) + goto err_port_vlan_set; + return 0; + +err_port_vlan_set: + mlxsw_sp_port_stp_set(mlxsw_sp_port, false); + return err; +} + +static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, + false, false); + mlxsw_sp_port_stp_set(mlxsw_sp_port, false); +} + static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, unsigned long event, void *ptr) { @@ -4421,7 +4105,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, upper_dev = info->upper_dev; if (!is_vlan_dev(upper_dev) && !netif_is_lag_master(upper_dev) && - !netif_is_bridge_master(upper_dev)) + !netif_is_bridge_master(upper_dev) && + !netif_is_ovs_master(upper_dev)) return -EINVAL; if (!info->linking) break; @@ -4438,6 +4123,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) return -EINVAL; + if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4446,8 +4135,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); else - mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { if (info->linking) err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, @@ -4461,6 +4150,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, else mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); + } else if (netif_is_ovs_master(upper_dev)) { + if (info->linking) + err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); + else + mlxsw_sp_port_ovs_leave(mlxsw_sp_port); } else { err = -EINVAL; WARN_ON(1); @@ -4552,8 +4246,8 @@ static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f; f = mlxsw_sp_fid_find(mlxsw_sp, fid); - if (f && f->r) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f && f->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); if (f && --f->ref_count == 0) mlxsw_sp_fid_destroy(mlxsw_sp, f); } @@ -4564,33 +4258,40 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, struct netdev_notifier_changeupper_info *info; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0; mlxsw_sp = mlxsw_sp_lower_get(br_dev); if (!mlxsw_sp) return 0; - if (br_dev != mlxsw_sp->master_bridge.dev) - return 0; info = ptr; switch (event) { - case NETDEV_CHANGEUPPER: + case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; if (!is_vlan_dev(upper_dev)) - break; - if (info->linking) { - err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, - upper_dev); - if (err) - return err; + return -EINVAL; + if (is_vlan_dev(upper_dev) && + br_dev != mlxsw_sp->master_bridge.dev) + return -EINVAL; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (is_vlan_dev(upper_dev)) { + if (info->linking) + err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, + upper_dev); + else + mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, + upper_dev); } else { - mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); + err = -EINVAL; + WARN_ON(1); } break; } - return 0; + return err; } static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) @@ -4657,8 +4358,8 @@ static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, clear_bit(vfid, mlxsw_sp->vfids.mapped); list_del(&f->list); - if (f->r) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); kfree(f); @@ -4810,6 +4511,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, int err = 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) + return 0; switch (event) { case NETDEV_PRECHANGEUPPER: @@ -4821,22 +4524,24 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ - if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, + if (netif_is_bridge_master(upper_dev) && + !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (info->linking) { - if (WARN_ON(!mlxsw_sp_vport)) - return -EINVAL; - err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, - upper_dev); + if (netif_is_bridge_master(upper_dev)) { + if (info->linking) + err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, + upper_dev); + else + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } else { - if (!mlxsw_sp_vport) - return 0; - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); + err = -EINVAL; + WARN_ON(1); } + break; } return err; @@ -4878,6 +4583,15 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return 0; } +static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + + if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER) + return false; + return netif_is_l3_master(info->upper_dev); +} + static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -4886,6 +4600,8 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) err = mlxsw_sp_netdevice_router_port_event(dev); + else if (mlxsw_sp_is_vrf_event(event, ptr)) + err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); else if (mlxsw_sp_port_dev_check(dev)) err = mlxsw_sp_netdevice_port_event(dev, event, ptr); else if (netif_is_lag_master(dev)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 13ec85e7c392..0c23bc1e946d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -57,41 +57,21 @@ #define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */ +#define MLXSW_SP_DUMMY_FID 15359 + #define MLXSW_SP_RFID_BASE 15360 -#define MLXSW_SP_INVALID_RIF 0xffff #define MLXSW_SP_MID_MAX 7000 #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 -#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ -#define MLXSW_SP_LPM_TREE_MAX 22 -#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN) - #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ -#define MLXSW_SP_BYTES_PER_CELL 96 - -#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) -#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) - #define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ #define MLXSW_SP_KVD_GRANULARITY 128 -/* Maximum delay buffer needed in case of PAUSE frames, in cells. - * Assumes 100m cable and maximum MTU. - */ -#define MLXSW_SP_PAUSE_DELAY 612 - -#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ - -static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) -{ - delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); - return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu); -} - struct mlxsw_sp_port; +struct mlxsw_sp_rif; struct mlxsw_sp_upper { struct net_device *dev; @@ -103,21 +83,10 @@ struct mlxsw_sp_fid { struct list_head list; unsigned int ref_count; struct net_device *dev; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; u16 fid; }; -struct mlxsw_sp_rif { - struct list_head nexthop_list; - struct list_head neigh_list; - struct net_device *dev; - unsigned int ref_count; - struct mlxsw_sp_fid *f; - unsigned char addr[ETH_ALEN]; - int mtu; - u16 rif; -}; - struct mlxsw_sp_mid { struct list_head list; unsigned char addr[ETH_ALEN]; @@ -138,17 +107,7 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) static inline bool mlxsw_sp_fid_is_vfid(u16 fid) { - return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; -} - -static inline bool mlxsw_sp_fid_is_rfid(u16 fid) -{ - return fid >= MLXSW_SP_RFID_BASE; -} - -static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif) -{ - return MLXSW_SP_RFID_BASE + rif; + return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID; } struct mlxsw_sp_sb_pr { @@ -177,12 +136,15 @@ struct mlxsw_sp_sb_pm { #define MLXSW_SP_SB_POOL_COUNT 4 #define MLXSW_SP_SB_TC_COUNT 8 +struct mlxsw_sp_sb_port { + struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; + struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; +}; + struct mlxsw_sp_sb { struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; - struct { - struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; - struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; - } ports[MLXSW_PORT_MAX_PORTS]; + struct mlxsw_sp_sb_port *ports; + u32 cell_size; }; #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) @@ -207,11 +169,9 @@ struct mlxsw_sp_fib; struct mlxsw_sp_vr { u16 id; /* virtual router ID */ - bool used; - enum mlxsw_sp_l3proto proto; u32 tb_id; /* kernel fib table id */ - struct mlxsw_sp_lpm_tree *lpm_tree; - struct mlxsw_sp_fib *fib; + unsigned int rif_count; + struct mlxsw_sp_fib *fib4; }; enum mlxsw_sp_span_type { @@ -253,12 +213,15 @@ struct mlxsw_sp_port_mall_tc_entry { }; struct mlxsw_sp_router { - struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; struct mlxsw_sp_vr *vrs; struct rhashtable neigh_ht; struct rhashtable nexthop_group_ht; struct rhashtable nexthop_ht; struct { + struct mlxsw_sp_lpm_tree *trees; + unsigned int tree_count; + } lpm; + struct { struct delayed_work dw; unsigned long interval; /* ms */ } neighs_update; @@ -269,6 +232,7 @@ struct mlxsw_sp_router { }; struct mlxsw_sp_acl; +struct mlxsw_sp_counter_pool; struct mlxsw_sp { struct { @@ -296,7 +260,7 @@ struct mlxsw_sp { u32 ageing_time; struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper *lags; - u8 port_to_module[MLXSW_PORT_MAX_PORTS]; + u8 *port_to_module; struct mlxsw_sp_sb sb; struct mlxsw_sp_router router; struct mlxsw_sp_acl *acl; @@ -304,6 +268,7 @@ struct mlxsw_sp { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; + struct mlxsw_sp_counter_pool *counter_pool; struct { struct mlxsw_sp_span_entry *entries; int entries_count; @@ -317,6 +282,18 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) return &mlxsw_sp->lags[lag_id]; } +static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, + u32 cells) +{ + return mlxsw_sp->sb.cell_size * cells; +} + +static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, + u32 bytes) +{ + return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size); +} + struct mlxsw_sp_port_pcpu_stats { u64 rx_packets; u64 rx_bytes; @@ -386,6 +363,7 @@ struct mlxsw_sp_port { }; bool mlxsw_sp_port_dev_check(const struct net_device *dev); +struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); @@ -497,19 +475,6 @@ mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, return NULL; } -static inline struct mlxsw_sp_rif * -mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev) -{ - int i; - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) - if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) - return mlxsw_sp->rifs[i]; - - return NULL; -} - enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BC, @@ -570,8 +535,6 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, bool adding); struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); -void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, bool dwrr, u8 dwrr_weight); @@ -608,10 +571,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr); -void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r); +int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); +int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr); +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif); +int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, + struct netdev_notifier_changeupper_info *info); -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, + u32 *p_entry_index); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); @@ -620,6 +589,8 @@ struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; struct mlxsw_afa_block *act_block; + unsigned int counter_index; + bool counter_valid; }; enum mlxsw_sp_acl_profile { @@ -639,6 +610,8 @@ struct mlxsw_sp_acl_profile_ops { void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); + int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, + bool *activity); }; struct mlxsw_sp_acl_ops { @@ -679,6 +652,14 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct net_device *out_dev); +int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 action, u16 vid, u16 proto, u8 prio); +int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u16 fid); struct mlxsw_sp_acl_rule; @@ -698,6 +679,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, unsigned long cookie); struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); +int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use); int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); @@ -708,5 +692,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, __be16 protocol, struct tc_cls_flower_offload *f); void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); +int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f); +int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index, u64 *packets, + u64 *bytes); +int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, + unsigned int *p_counter_index); +void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 8a18b3aa70dc..317f7b14627f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -39,6 +39,7 @@ #include <linux/string.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> +#include <net/tc_act/tc_vlan.h> #include "reg.h" #include "core.h" @@ -49,10 +50,17 @@ #include "spectrum_acl_flex_keys.h" struct mlxsw_sp_acl { + struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; struct mlxsw_afa *afa; const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; + struct list_head rules; + struct { + struct delayed_work dw; + unsigned long interval; /* ms */ +#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000 + } rule_activity_update; unsigned long priv[0]; /* priv has to be always the last item */ }; @@ -79,9 +87,13 @@ struct mlxsw_sp_acl_ruleset { struct mlxsw_sp_acl_rule { struct rhash_head ht_node; /* Member of rule HT */ + struct list_head list; unsigned long cookie; /* HT key */ struct mlxsw_sp_acl_ruleset *ruleset; struct mlxsw_sp_acl_rule_info *rulei; + u64 last_used; + u64 last_packets; + u64 last_bytes; unsigned long priv[0]; /* priv has to be always the last item */ }; @@ -237,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); } +static int +mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) +{ + int err; + + err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index); + if (err) + return err; + rulei->counter_valid = true; + return 0; +} + +static void +mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) +{ + rulei->counter_valid = false; + mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index); +} + struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) { @@ -335,6 +368,48 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, local_port, in_port); } +int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 action, u16 vid, u16 proto, u8 prio) +{ + u8 ethertype; + + if (action == TCA_VLAN_ACT_MODIFY) { + switch (proto) { + case ETH_P_8021Q: + ethertype = 0; + break; + case ETH_P_8021AD: + ethertype = 1; + break; + default: + dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n", + proto); + return -EINVAL; + } + + return mlxsw_afa_block_append_vlan_modify(rulei->act_block, + vid, prio, ethertype); + } else { + dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n"); + return -EINVAL; + } +} + +int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_afa_block_append_counter(rulei->act_block, + rulei->counter_index); +} + +int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u16 fid) +{ + return mlxsw_afa_block_append_fid_set(rulei->act_block, fid); +} + struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, @@ -358,8 +433,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, err = PTR_ERR(rule->rulei); goto err_rulei_create; } + + err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei); + if (err) + goto err_counter_alloc; return rule; +err_counter_alloc: + mlxsw_sp_acl_rulei_destroy(rule->rulei); err_rulei_create: kfree(rule); err_alloc: @@ -372,6 +453,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei); mlxsw_sp_acl_rulei_destroy(rule->rulei); kfree(rule); mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); @@ -393,6 +475,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_insert; + list_add_tail(&rule->list, &mlxsw_sp->acl->rules); return 0; err_rhashtable_insert: @@ -406,6 +489,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + list_del(&rule->list); rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, mlxsw_sp_acl_rule_ht_params); ops->rule_del(mlxsw_sp, rule->priv); @@ -426,6 +510,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule) return rule->rulei; } +static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + bool active; + int err; + + err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active); + if (err) + return err; + if (active) + rule->last_used = jiffies; + return 0; +} + +static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) +{ + struct mlxsw_sp_acl_rule *rule; + int err; + + /* Protect internal structures from changes */ + rtnl_lock(); + list_for_each_entry(rule, &acl->rules, list) { + err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, + rule); + if (err) + goto err_rule_update; + } + rtnl_unlock(); + return 0; + +err_rule_update: + rtnl_unlock(); + return err; +} + +static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl) +{ + unsigned long interval = acl->rule_activity_update.interval; + + mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, + msecs_to_jiffies(interval)); +} + +static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work) +{ + struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl, + rule_activity_update.dw.work); + int err; + + err = mlxsw_sp_acl_rules_activity_update(acl); + if (err) + dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity"); + + mlxsw_sp_acl_rule_activity_work_schedule(acl); +} + +int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule, + u64 *packets, u64 *bytes, u64 *last_use) + +{ + struct mlxsw_sp_acl_rule_info *rulei; + u64 current_packets; + u64 current_bytes; + int err; + + rulei = mlxsw_sp_acl_rule_rulei(rule); + err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index, + ¤t_packets, ¤t_bytes); + if (err) + return err; + + *packets = current_packets - rule->last_packets; + *bytes = current_bytes - rule->last_bytes; + *last_use = rule->last_used; + + rule->last_bytes = current_bytes; + rule->last_packets = current_packets; + + return 0; +} + #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, @@ -434,7 +602,6 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, struct mlxsw_sp *mlxsw_sp = priv; char pefa_pl[MLXSW_REG_PEFA_LEN]; u32 kvdl_index; - int ret; int err; /* The first action set of a TCAM entry is stored directly in TCAM, @@ -443,10 +610,10 @@ static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, if (is_first) return 0; - ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE); - if (ret < 0) - return ret; - kvdl_index = ret; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE, + &kvdl_index); + if (err) + return err; mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); if (err) @@ -475,13 +642,11 @@ static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, struct mlxsw_sp *mlxsw_sp = priv; char ppbs_pl[MLXSW_REG_PPBS_LEN]; u32 kvdl_index; - int ret; int err; - ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1); - if (ret < 0) - return ret; - kvdl_index = ret; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + if (err) + return err; mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); if (err) @@ -518,7 +683,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) if (!acl) return -ENOMEM; mlxsw_sp->acl = acl; - + acl->mlxsw_sp = mlxsw_sp; acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_FLEX_KEYS), mlxsw_sp_afk_blocks, @@ -541,11 +706,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_rhashtable_init; + INIT_LIST_HEAD(&acl->rules); err = acl_ops->init(mlxsw_sp, acl->priv); if (err) goto err_acl_ops_init; acl->ops = acl_ops; + + /* Create the delayed work for the rule activity_update */ + INIT_DELAYED_WORK(&acl->rule_activity_update.dw, + mlxsw_sp_acl_rul_activity_update_work); + acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS; + mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0); return 0; err_acl_ops_init: @@ -564,7 +736,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_acl *acl = mlxsw_sp->acl; const struct mlxsw_sp_acl_ops *acl_ops = acl->ops; + cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); acl_ops->fini(mlxsw_sp, acl->priv); + WARN_ON(!list_empty(&acl->rules)); rhashtable_destroy(&acl->ruleset_ht); mlxsw_afa_destroy(acl->afa); mlxsw_afk_destroy(acl->afk); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 82b81cf7f4a7..af7b7bad48df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -39,11 +39,15 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = { MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = { MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 13, 3), + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x08, 0, 12), MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16), }; @@ -65,6 +69,8 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = { + MLXSW_AFK_ELEMENT_INST_U32(VID, 0x00, 0, 12), + MLXSW_AFK_ELEMENT_INST_U32(PCP, 0x08, 29, 3), MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16), MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 7382832215fa..3a24289979d9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); } +static int +mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int offset, + bool *activity) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + int err; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ, + region->tcam_region_info, offset); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); + if (err) + return err; + *activity = mlxsw_reg_ptce2_a_get(ptce2_pl); + return 0; +} + #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U) static int @@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); } +static int +mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry, + bool *activity) +{ + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region, + entry->parman_item.index, + activity); +} + static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, MLXSW_AFK_ELEMENT_DMAC, @@ -950,6 +981,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_DST_IP4, MLXSW_AFK_ELEMENT_DST_L4_PORT, MLXSW_AFK_ELEMENT_SRC_L4_PORT, + MLXSW_AFK_ELEMENT_VID, + MLXSW_AFK_ELEMENT_PCP, }; static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { @@ -1046,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); } +static int +mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, + void *rule_priv, bool *activity) +{ + struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, + activity); +} + static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset), .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add, @@ -1055,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, + .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, }; static const struct mlxsw_sp_acl_profile_ops * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index a7468262f118..997189cfe7fd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -162,8 +162,8 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, } static const u16 mlxsw_sp_pbs[] = { - [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), - [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), + [0] = 2 * ETH_FRAME_LEN, + [9] = 2 * MLXSW_PORT_MAX_MTU, }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) @@ -171,20 +171,22 @@ static const u16 mlxsw_sp_pbs[] = { static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; int i; mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { + u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]); + if (i == MLXSW_SP_PB_UNUSED) continue; - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]); + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size); } mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); - return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(pbmc), pbmc_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port) @@ -209,11 +211,25 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } -#define MLXSW_SP_SB_PR_INGRESS_SIZE \ - (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) +static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + + mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port), + GFP_KERNEL); + if (!mlxsw_sp->sb.ports) + return -ENOMEM; + return 0; +} + +static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) +{ + kfree(mlxsw_sp->sb.ports); +} + +#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) -#define MLXSW_SP_SB_PR_EGRESS_SIZE \ - (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) +#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000 #define MLXSW_SP_SB_PR(_mode, _size) \ { \ @@ -223,18 +239,17 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)), + MLXSW_SP_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)), + MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), }; #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), @@ -251,11 +266,9 @@ static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, int err; for (i = 0; i < prs_len; i++) { - const struct mlxsw_sp_sb_pr *pr; + u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size); - pr = &prs[i]; - err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, - pr->mode, pr->size); + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size); if (err) return err; } @@ -284,7 +297,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) } static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0), + MLXSW_SP_SB_CM(10000, 8, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), @@ -293,20 +306,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3), + MLXSW_SP_SB_CM(20000, 1, 3), }; #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), + MLXSW_SP_SB_CM(1500, 9, 0), MLXSW_SP_SB_CM(0, 0, 0), MLXSW_SP_SB_CM(0, 0, 0), MLXSW_SP_SB_CM(0, 0, 0), @@ -330,7 +343,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), + MLXSW_SP_SB_CM(10000, 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -370,13 +383,17 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, for (i = 0; i < cms_len; i++) { const struct mlxsw_sp_sb_cm *cm; + u32 min_buff; if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; + /* All pools are initialized using dynamic thresholds, + * therefore 'max_buff' isn't specified in cells. + */ + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, - cm->min_buff, cm->max_buff, - cm->pool); + min_buff, cm->max_buff, cm->pool); if (err) return err; } @@ -484,21 +501,21 @@ struct mlxsw_sp_sb_mm { } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), - MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), + MLXSW_SP_SB_MM(20000, 0xff, 0), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) @@ -511,10 +528,15 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { const struct mlxsw_sp_sb_mm *mc; + u32 min_buff; mc = &mlxsw_sp_sb_mms[i]; - mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff, - mc->max_buff, mc->pool); + /* All pools are initialized using dynamic thresholds, + * therefore 'max_buff' isn't specified in cells. + */ + min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff); + mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff, + mc->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); if (err) return err; @@ -522,32 +544,53 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } -#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024) - int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { + u64 sb_size; int err; - err = mlxsw_sp_sb_prs_init(mlxsw_sp); + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE)) + return -EIO; + mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) + return -EIO; + sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); + + err = mlxsw_sp_sb_ports_init(mlxsw_sp); if (err) return err; + err = mlxsw_sp_sb_prs_init(mlxsw_sp); + if (err) + goto err_sb_prs_init; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); if (err) - return err; + goto err_sb_cpu_port_sb_cms_init; err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) - return err; - return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, - MLXSW_SP_SB_SIZE, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_POOL_COUNT, - MLXSW_SP_SB_TC_COUNT, - MLXSW_SP_SB_TC_COUNT); + goto err_sb_mms_init; + err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_TC_COUNT, + MLXSW_SP_SB_TC_COUNT); + if (err) + goto err_devlink_sb_register; + + return 0; + +err_devlink_sb_register: +err_sb_mms_init: +err_sb_cpu_port_sb_cms_init: +err_sb_prs_init: + mlxsw_sp_sb_ports_fini(mlxsw_sp); + return err; } void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) { devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); + mlxsw_sp_sb_ports_fini(mlxsw_sp); } int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) @@ -596,7 +639,7 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); pool_info->pool_type = (enum devlink_sb_pool_type) dir; - pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size); + pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; return 0; } @@ -606,9 +649,9 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, enum devlink_sb_threshold_type threshold_type) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size); u8 pool = pool_get(pool_index); enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); - u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size); enum mlxsw_reg_sbpr_mode mode; if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) @@ -627,7 +670,7 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; - return MLXSW_SP_CELLS_TO_BYTES(max_buff); + return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff); } static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, @@ -645,7 +688,7 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, return -EINVAL; *p_max_buff = val; } else { - *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold); + *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold); } return 0; } @@ -761,7 +804,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, masked_count = 0; for (local_port = cb_ctx.local_port_1; - local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { @@ -775,7 +818,7 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, } masked_count = 0; for (local_port = cb_ctx.local_port_1; - local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { @@ -817,7 +860,7 @@ next_batch: mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); } - for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); @@ -847,7 +890,7 @@ do_query: cb_priv); if (err) goto out; - if (local_port < MLXSW_PORT_MAX_PORTS) + if (local_port < mlxsw_core_max_ports(mlxsw_core)) goto next_batch; out: @@ -882,7 +925,7 @@ next_batch: mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); } - for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) { if (!mlxsw_sp->ports[local_port]) continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); @@ -908,7 +951,7 @@ do_query: &bulk_list, NULL, 0); if (err) goto out; - if (local_port < MLXSW_PORT_MAX_PORTS) + if (local_port < mlxsw_core_max_ports(mlxsw_core)) goto next_batch; out: @@ -932,8 +975,8 @@ int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); - *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur); - *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max); + *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur); + *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max); return 0; } @@ -951,7 +994,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); - *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur); - *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max); + *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur); + *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c new file mode 100644 index 000000000000..0f46775e0307 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -0,0 +1,207 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> + +#include "spectrum_cnt.h" + +#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096 + +struct mlxsw_sp_counter_sub_pool { + unsigned int base_index; + unsigned int size; + unsigned int entry_size; + unsigned int bank_count; +}; + +struct mlxsw_sp_counter_pool { + unsigned int pool_size; + unsigned long *usage; /* Usage bitmap */ + struct mlxsw_sp_counter_sub_pool *sub_pools; +}; + +static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = { + [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = { + .bank_count = 6, + }, + [MLXSW_SP_COUNTER_SUB_POOL_RIF] = { + .bank_count = 2, + } +}; + +static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp) +{ + unsigned int total_bank_config = 0; + unsigned int pool_size; + int i; + + pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); + /* Check config is valid, no bank over subscription */ + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) + total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count; + if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1) + return -EINVAL; + return 0; +} + +static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_counter_sub_pool *sub_pool; + + /* Prepare generic flow pool*/ + sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW]; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES)) + return -EIO; + sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + COUNTER_SIZE_PACKETS_BYTES); + /* Prepare erif pool*/ + sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF]; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC)) + return -EIO; + sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + COUNTER_SIZE_ROUTER_BASIC); + return 0; +} + +int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_counter_sub_pool *sub_pool; + struct mlxsw_sp_counter_pool *pool; + unsigned int base_index; + unsigned int map_size; + int i; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE)) + return -EIO; + + err = mlxsw_sp_counter_pool_validate(mlxsw_sp); + if (err) + return err; + + err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp); + if (err) + return err; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return -ENOMEM; + + pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); + map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long); + + pool->usage = kzalloc(map_size, GFP_KERNEL); + if (!pool->usage) { + err = -ENOMEM; + goto err_usage_alloc; + } + + pool->sub_pools = mlxsw_sp_counter_sub_pools; + /* Allocation is based on bank count which should be + * specified for each sub pool statically. + */ + base_index = 0; + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) { + sub_pool = &pool->sub_pools[i]; + sub_pool->size = sub_pool->bank_count * + MLXSW_SP_COUNTER_POOL_BANK_SIZE; + sub_pool->base_index = base_index; + base_index += sub_pool->size; + /* The last bank can't be fully used */ + if (sub_pool->base_index + sub_pool->size > pool->pool_size) + sub_pool->size = pool->pool_size - sub_pool->base_index; + } + + mlxsw_sp->counter_pool = pool; + return 0; + +err_usage_alloc: + kfree(pool); + return err; +} + +void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; + + WARN_ON(find_first_bit(pool->usage, pool->pool_size) != + pool->pool_size); + kfree(pool->usage); + kfree(pool); +} + +int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int *p_counter_index) +{ + struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; + struct mlxsw_sp_counter_sub_pool *sub_pool; + unsigned int entry_index; + unsigned int stop_index; + int i; + + sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id]; + stop_index = sub_pool->base_index + sub_pool->size; + entry_index = sub_pool->base_index; + + entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index); + if (entry_index == stop_index) + return -ENOBUFS; + /* The sub-pools can contain non-integer number of entries + * so we must check for overflow + */ + if (entry_index + sub_pool->entry_size > stop_index) + return -ENOBUFS; + for (i = 0; i < sub_pool->entry_size; i++) + __set_bit(entry_index + i, pool->usage); + + *p_counter_index = entry_index; + return 0; +} + +void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int counter_index) +{ + struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; + struct mlxsw_sp_counter_sub_pool *sub_pool; + int i; + + if (WARN_ON(counter_index >= pool->pool_size)) + return; + sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id]; + for (i = 0; i < sub_pool->entry_size; i++) + __clear_bit(counter_index + i, pool->usage); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h new file mode 100644 index 000000000000..fd34d0a01073 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -0,0 +1,54 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_CNT_H +#define _MLXSW_SPECTRUM_CNT_H + +#include "spectrum.h" + +enum mlxsw_sp_counter_sub_pool_id { + MLXSW_SP_COUNTER_SUB_POOL_FLOW, + MLXSW_SP_COUNTER_SUB_POOL_RIF, +}; + +int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int *p_counter_index); +void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_counter_sub_pool_id sub_pool_id, + unsigned int counter_index); +int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c new file mode 100644 index 000000000000..ea56f6ade6b4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -0,0 +1,351 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky <arakdis@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <net/devlink.h> + +#include "spectrum.h" +#include "spectrum_dpipe.h" +#include "spectrum_router.h" + +enum mlxsw_sp_field_metadata_id { + MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, + MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, + MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, +}; + +static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = { + { .name = "erif_port", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, + .bitwidth = 32, + .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX, + }, + { .name = "l3_forward", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, + .bitwidth = 1, + }, + { .name = "l3_drop", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, + .bitwidth = 1, + }, +}; + +enum mlxsw_sp_dpipe_header_id { + MLXSW_SP_DPIPE_HEADER_METADATA, +}; + +static struct devlink_dpipe_header mlxsw_sp_dpipe_header_metadata = { + .name = "mlxsw_meta", + .id = MLXSW_SP_DPIPE_HEADER_METADATA, + .fields = mlxsw_sp_dpipe_fields_metadata, + .fields_count = ARRAY_SIZE(mlxsw_sp_dpipe_fields_metadata), +}; + +static struct devlink_dpipe_header *mlxsw_dpipe_headers[] = { + &mlxsw_sp_dpipe_header_metadata, +}; + +static struct devlink_dpipe_headers mlxsw_sp_dpipe_headers = { + .headers = mlxsw_dpipe_headers, + .headers_count = ARRAY_SIZE(mlxsw_dpipe_headers), +}; + +static int mlxsw_sp_dpipe_table_erif_actions_dump(void *priv, + struct sk_buff *skb) +{ + struct devlink_dpipe_action action = {0}; + int err; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &mlxsw_sp_dpipe_header_metadata; + action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD; + + err = devlink_dpipe_action_put(skb, &action); + if (err) + return err; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &mlxsw_sp_dpipe_header_metadata; + action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP; + + return devlink_dpipe_action_put(skb, &action); +} + +static int mlxsw_sp_dpipe_table_erif_matches_dump(void *priv, + struct sk_buff *skb) +{ + struct devlink_dpipe_match match = {0}; + + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &mlxsw_sp_dpipe_header_metadata; + match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; + + return devlink_dpipe_match_put(skb, &match); +} + +static void mlxsw_sp_erif_entry_clear(struct devlink_dpipe_entry *entry) +{ + unsigned int value_count, value_index; + struct devlink_dpipe_value *value; + + value = entry->action_values; + value_count = entry->action_values_count; + for (value_index = 0; value_index < value_count; value_index++) { + kfree(value[value_index].value); + kfree(value[value_index].mask); + } + + value = entry->match_values; + value_count = entry->match_values_count; + for (value_index = 0; value_index < value_count; value_index++) { + kfree(value[value_index].value); + kfree(value[value_index].mask); + } +} + +static void +mlxsw_sp_erif_match_action_prepare(struct devlink_dpipe_match *match, + struct devlink_dpipe_action *action) +{ + action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action->header = &mlxsw_sp_dpipe_header_metadata; + action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD; + + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match->header = &mlxsw_sp_dpipe_header_metadata; + match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; +} + +static int mlxsw_sp_erif_entry_prepare(struct devlink_dpipe_entry *entry, + struct devlink_dpipe_value *match_value, + struct devlink_dpipe_match *match, + struct devlink_dpipe_value *action_value, + struct devlink_dpipe_action *action) +{ + entry->match_values = match_value; + entry->match_values_count = 1; + + entry->action_values = action_value; + entry->action_values_count = 1; + + match_value->match = match; + match_value->value_size = sizeof(u32); + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + action_value->action = action; + action_value->value_size = sizeof(u32); + action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); + if (!action_value->value) + goto err_action_alloc; + return 0; + +err_action_alloc: + kfree(match_value->value); + return -ENOMEM; +} + +static int mlxsw_sp_erif_entry_get(struct mlxsw_sp *mlxsw_sp, + struct devlink_dpipe_entry *entry, + struct mlxsw_sp_rif *rif, + bool counters_enabled) +{ + u32 *action_value; + u32 *rif_value; + u64 cnt; + int err; + + /* Set Match RIF index */ + rif_value = entry->match_values->value; + *rif_value = mlxsw_sp_rif_index(rif); + entry->match_values->mapping_value = mlxsw_sp_rif_dev_ifindex(rif); + entry->match_values->mapping_valid = true; + + /* Set Action Forwarding */ + action_value = entry->action_values->value; + *action_value = 1; + + entry->counter_valid = false; + entry->counter = 0; + if (!counters_enabled) + return 0; + + entry->index = mlxsw_sp_rif_index(rif); + err = mlxsw_sp_rif_counter_value_get(mlxsw_sp, rif, + MLXSW_SP_RIF_COUNTER_EGRESS, + &cnt); + if (!err) { + entry->counter = cnt; + entry->counter_valid = true; + } + return 0; +} + +static int +mlxsw_sp_table_erif_entries_dump(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct devlink_dpipe_value match_value = {{0}}, action_value = {{0}}; + struct devlink_dpipe_action action = {0}; + struct devlink_dpipe_match match = {0}; + struct devlink_dpipe_entry entry = {0}; + struct mlxsw_sp *mlxsw_sp = priv; + unsigned int rif_count; + int i, j; + int err; + + mlxsw_sp_erif_match_action_prepare(&match, &action); + err = mlxsw_sp_erif_entry_prepare(&entry, &match_value, &match, + &action_value, &action); + if (err) + return err; + + rif_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + rtnl_lock(); + i = 0; +start_again: + err = devlink_dpipe_entry_ctx_prepare(dump_ctx); + if (err) + return err; + j = 0; + for (; i < rif_count; i++) { + if (!mlxsw_sp->rifs[i]) + continue; + err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, + mlxsw_sp->rifs[i], + counters_enabled); + if (err) + goto err_entry_get; + err = devlink_dpipe_entry_ctx_append(dump_ctx, &entry); + if (err) { + if (err == -EMSGSIZE) { + if (!j) + goto err_entry_append; + break; + } + goto err_entry_append; + } + j++; + } + + devlink_dpipe_entry_ctx_close(dump_ctx); + if (i != rif_count) + goto start_again; + rtnl_unlock(); + + mlxsw_sp_erif_entry_clear(&entry); + return 0; +err_entry_append: +err_entry_get: + rtnl_unlock(); + mlxsw_sp_erif_entry_clear(&entry); + return err; +} + +static int mlxsw_sp_table_erif_counters_update(void *priv, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = priv; + int i; + + rtnl_lock(); + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { + if (!mlxsw_sp->rifs[i]) + continue; + if (enable) + mlxsw_sp_rif_counter_alloc(mlxsw_sp, + mlxsw_sp->rifs[i], + MLXSW_SP_RIF_COUNTER_EGRESS); + else + mlxsw_sp_rif_counter_free(mlxsw_sp, + mlxsw_sp->rifs[i], + MLXSW_SP_RIF_COUNTER_EGRESS); + } + rtnl_unlock(); + return 0; +} + +static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { + .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump, + .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump, + .entries_dump = mlxsw_sp_table_erif_entries_dump, + .counters_set_update = mlxsw_sp_table_erif_counters_update, +}; + +static int mlxsw_sp_dpipe_erif_table_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + u64 table_size; + + table_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + return devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ERIF, + &mlxsw_sp_erif_ops, + mlxsw_sp, table_size, + false); +} + +static void mlxsw_sp_dpipe_erif_table_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + devlink_dpipe_table_unregister(devlink, MLXSW_SP_DPIPE_TABLE_NAME_ERIF); +} + +int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + int err; + + err = devlink_dpipe_headers_register(devlink, + &mlxsw_sp_dpipe_headers); + if (err) + return err; + err = mlxsw_sp_dpipe_erif_table_init(mlxsw_sp); + if (err) + goto err_erif_register; + return 0; + +err_erif_register: + devlink_dpipe_headers_unregister(priv_to_devlink(mlxsw_sp->core)); + return err; +} + +void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp); + devlink_dpipe_headers_unregister(devlink); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h new file mode 100644 index 000000000000..d2089298cba3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h @@ -0,0 +1,43 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_PIPELINE_H_ +#define _MLXSW_PIPELINE_H_ + +int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp); + +#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif" + +#endif /* _MLXSW_PIPELINE_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ae6cccc666e4..7d87e23578a3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -39,6 +39,7 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> +#include <net/tc_act/tc_vlan.h> #include "spectrum.h" #include "core_acl_flex_keys.h" @@ -55,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (tc_no_actions(exts)) return 0; + /* Count action is inserted first */ + err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei); + if (err) + return err; + tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_shot(a)) { @@ -65,6 +71,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; + err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, + MLXSW_SP_DUMMY_FID); + if (err) + return err; + out_dev = __dev_get_by_index(dev_net(dev), ifindex); if (out_dev == dev) out_dev = NULL; @@ -73,6 +84,15 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, out_dev); if (err) return err; + } else if (is_tcf_vlan(a)) { + u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); + u32 action = tcf_vlan_action(a); + u8 prio = tcf_vlan_push_prio(a); + u16 vid = tcf_vlan_push_vid(a); + + return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + action, vid, + proto, prio); } else { dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; @@ -173,7 +193,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS))) { + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); return -EOPNOTSUPP; } @@ -234,6 +255,27 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, sizeof(key->src)); } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + if (mask->vlan_id != 0) + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_VID, + key->vlan_id, + mask->vlan_id); + if (mask->vlan_priority != 0) + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_PCP, + key->vlan_priority, + mask->vlan_priority); + } + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) mlxsw_sp_flower_parse_ipv4(rulei, f); @@ -314,3 +356,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); } + +int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, + struct tc_cls_flower_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + struct tc_action *a; + LIST_HEAD(actions); + u64 packets; + u64 lastuse; + u64 bytes; + int err; + + ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, + ingress, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (WARN_ON(IS_ERR(ruleset))) + return -EINVAL; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); + if (!rule) + return -EINVAL; + + err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, + &lastuse); + if (err) + goto err_rule_get_stats; + + preempt_disable(); + + tcf_exts_to_list(f->exts, &actions); + list_for_each_entry(a, &actions, list) + tcf_action_stats_update(a, bytes, packets, lastuse); + + preempt_enable(); + + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return 0; + +err_rule_get_stats: + mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index ac321e8e5c1a..26c26cd30c3d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -45,7 +45,8 @@ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE) #define MLXSW_SP_CHUNK_MAX 32 -int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count) +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, + u32 *p_entry_index) { int entry_index; int size; @@ -72,7 +73,8 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count) for (i = 0; i < type_entries; i++) set_bit(entry_index + i, mlxsw_sp->kvdl.usage); - return entry_index; + *p_entry_index = entry_index; + return 0; } return -ENOBUFS; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index bd8de6b9be71..33cec1cc1642 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -41,14 +41,184 @@ #include <linux/in6.h> #include <linux/notifier.h> #include <linux/inetdevice.h> +#include <linux/netdevice.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> #include <net/ip_fib.h> +#include <net/fib_rules.h> +#include <net/l3mdev.h> #include "spectrum.h" #include "core.h" #include "reg.h" +#include "spectrum_cnt.h" +#include "spectrum_dpipe.h" +#include "spectrum_router.h" + +struct mlxsw_sp_rif { + struct list_head nexthop_list; + struct list_head neigh_list; + struct net_device *dev; + struct mlxsw_sp_fid *f; + unsigned char addr[ETH_ALEN]; + int mtu; + u16 rif_index; + u16 vr_id; + unsigned int counter_ingress; + bool counter_ingress_valid; + unsigned int counter_egress; + bool counter_egress_valid; +}; + +static unsigned int * +mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + switch (dir) { + case MLXSW_SP_RIF_COUNTER_EGRESS: + return &rif->counter_egress; + case MLXSW_SP_RIF_COUNTER_INGRESS: + return &rif->counter_ingress; + } + return NULL; +} + +static bool +mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + switch (dir) { + case MLXSW_SP_RIF_COUNTER_EGRESS: + return rif->counter_egress_valid; + case MLXSW_SP_RIF_COUNTER_INGRESS: + return rif->counter_ingress_valid; + } + return false; +} + +static void +mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, + bool valid) +{ + switch (dir) { + case MLXSW_SP_RIF_COUNTER_EGRESS: + rif->counter_egress_valid = valid; + break; + case MLXSW_SP_RIF_COUNTER_INGRESS: + rif->counter_ingress_valid = valid; + break; + } +} + +static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + unsigned int counter_index, bool enable, + enum mlxsw_sp_rif_counter_dir dir) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + bool is_egress = false; + int err; + + if (dir == MLXSW_SP_RIF_COUNTER_EGRESS) + is_egress = true; + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable, + is_egress); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, u64 *cnt) +{ + char ricnt_pl[MLXSW_REG_RICNT_LEN]; + unsigned int *p_counter_index; + bool valid; + int err; + + valid = mlxsw_sp_rif_counter_valid_get(rif, dir); + if (!valid) + return -EINVAL; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (!p_counter_index) + return -EINVAL; + mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index, + MLXSW_REG_RICNT_OPCODE_NOP); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); + if (err) + return err; + *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl); + return 0; +} + +static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp, + unsigned int counter_index) +{ + char ricnt_pl[MLXSW_REG_RICNT_LEN]; + + mlxsw_reg_ricnt_pack(ricnt_pl, counter_index, + MLXSW_REG_RICNT_OPCODE_CLEAR); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl); +} + +int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + unsigned int *p_counter_index; + int err; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (!p_counter_index) + return -EINVAL; + err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, + p_counter_index); + if (err) + return err; + + err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index); + if (err) + goto err_counter_clear; + + err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, + *p_counter_index, true, dir); + if (err) + goto err_counter_edit; + mlxsw_sp_rif_counter_valid_set(rif, dir, true); + return 0; + +err_counter_edit: +err_counter_clear: + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, + *p_counter_index); + return err; +} + +void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir) +{ + unsigned int *p_counter_index; + + p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir); + if (WARN_ON(!p_counter_index)) + return; + mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index, + *p_counter_index, false, dir); + mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF, + *p_counter_index); + mlxsw_sp_rif_counter_valid_set(rif, dir, false); +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) @@ -89,12 +259,6 @@ mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, } static void -mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage) -{ - memset(prefix_usage, 0, sizeof(*prefix_usage)); -} - -static void mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, unsigned char prefix_len) { @@ -125,7 +289,7 @@ struct mlxsw_sp_fib_node { struct list_head entry_list; struct list_head list; struct rhash_head ht_node; - struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fib *fib; struct mlxsw_sp_fib_key key; }; @@ -149,13 +313,17 @@ struct mlxsw_sp_fib_entry { struct mlxsw_sp_fib { struct rhashtable ht; struct list_head node_list; + struct mlxsw_sp_vr *vr; + struct mlxsw_sp_lpm_tree *lpm_tree; unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; struct mlxsw_sp_prefix_usage prefix_usage; + enum mlxsw_sp_l3proto proto; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; -static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) +static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_fib *fib; int err; @@ -167,6 +335,8 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) if (err) goto err_rhashtable_init; INIT_LIST_HEAD(&fib->node_list); + fib->proto = proto; + fib->vr = vr; return fib; err_rhashtable_init: @@ -177,24 +347,21 @@ err_rhashtable_init: static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) { WARN_ON(!list_empty(&fib->node_list)); + WARN_ON(fib->lpm_tree); rhashtable_destroy(&fib->ht); kfree(fib); } static struct mlxsw_sp_lpm_tree * -mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved) +mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp) { static struct mlxsw_sp_lpm_tree *lpm_tree; int i; - for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { - lpm_tree = &mlxsw_sp->router.lpm_trees[i]; - if (lpm_tree->ref_count == 0) { - if (one_reserved) - one_reserved = false; - else - return lpm_tree; - } + for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) { + lpm_tree = &mlxsw_sp->router.lpm.trees[i]; + if (lpm_tree->ref_count == 0) + return lpm_tree; } return NULL; } @@ -248,12 +415,12 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_lpm_tree * mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_prefix_usage *prefix_usage, - enum mlxsw_sp_l3proto proto, bool one_reserved) + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_lpm_tree *lpm_tree; int err; - lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved); + lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp); if (!lpm_tree) return ERR_PTR(-EBUSY); lpm_tree->proto = proto; @@ -283,13 +450,13 @@ static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_lpm_tree * mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_prefix_usage *prefix_usage, - enum mlxsw_sp_l3proto proto, bool one_reserved) + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_lpm_tree *lpm_tree; int i; - for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { - lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) { + lpm_tree = &mlxsw_sp->router.lpm.trees[i]; if (lpm_tree->ref_count != 0 && lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, @@ -297,7 +464,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, goto inc_ref_count; } lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, - proto, one_reserved); + proto); if (IS_ERR(lpm_tree)) return lpm_tree; @@ -314,15 +481,41 @@ static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) +#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ + +static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_lpm_tree *lpm_tree; + u64 max_trees; int i; - for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { - lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES)) + return -EIO; + + max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES); + mlxsw_sp->router.lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN; + mlxsw_sp->router.lpm.trees = kcalloc(mlxsw_sp->router.lpm.tree_count, + sizeof(struct mlxsw_sp_lpm_tree), + GFP_KERNEL); + if (!mlxsw_sp->router.lpm.trees) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->router.lpm.tree_count; i++) { + lpm_tree = &mlxsw_sp->router.lpm.trees[i]; lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; } + + return 0; +} + +static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) +{ + kfree(mlxsw_sp->router.lpm.trees); +} + +static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) +{ + return !!vr->fib4; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -332,31 +525,31 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; - if (!vr->used) + if (!mlxsw_sp_vr_is_used(vr)) return vr; } return NULL; } static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr) + const struct mlxsw_sp_fib *fib) { char raltb_pl[MLXSW_REG_RALTB_LEN]; - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - (enum mlxsw_reg_ralxx_protocol) vr->proto, - vr->lpm_tree->id); + mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, + (enum mlxsw_reg_ralxx_protocol) fib->proto, + fib->lpm_tree->id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); } static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr) + const struct mlxsw_sp_fib *fib) { char raltb_pl[MLXSW_REG_RALTB_LEN]; /* Bind to tree 0 which is default */ - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - (enum mlxsw_reg_ralxx_protocol) vr->proto, 0); + mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, + (enum mlxsw_reg_ralxx_protocol) fib->proto, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); } @@ -369,8 +562,7 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id) } static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, - u32 tb_id, - enum mlxsw_sp_l3proto proto) + u32 tb_id) { struct mlxsw_sp_vr *vr; int i; @@ -379,69 +571,50 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { vr = &mlxsw_sp->router.vrs[i]; - if (vr->used && vr->proto == proto && vr->tb_id == tb_id) + if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id) return vr; } return NULL; } +static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return vr->fib4; + case MLXSW_SP_L3_PROTO_IPV6: + BUG_ON(1); + } + return NULL; +} + static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, - unsigned char prefix_len, - u32 tb_id, - enum mlxsw_sp_l3proto proto) + u32 tb_id) { - struct mlxsw_sp_prefix_usage req_prefix_usage; - struct mlxsw_sp_lpm_tree *lpm_tree; struct mlxsw_sp_vr *vr; - int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) return ERR_PTR(-EBUSY); - vr->fib = mlxsw_sp_fib_create(); - if (IS_ERR(vr->fib)) - return ERR_CAST(vr->fib); - - vr->proto = proto; + vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(vr->fib4)) + return ERR_CAST(vr->fib4); vr->tb_id = tb_id; - mlxsw_sp_prefix_usage_zero(&req_prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, - proto, true); - if (IS_ERR(lpm_tree)) { - err = PTR_ERR(lpm_tree); - goto err_tree_get; - } - vr->lpm_tree = lpm_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); - if (err) - goto err_tree_bind; - - vr->used = true; return vr; - -err_tree_bind: - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); -err_tree_get: - mlxsw_sp_fib_destroy(vr->fib); - - return ERR_PTR(err); } -static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr) +static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) { - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); - mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); - mlxsw_sp_fib_destroy(vr->fib); - vr->used = false; + mlxsw_sp_fib_destroy(vr->fib4); + vr->fib4 = NULL; } static int -mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, +mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib, struct mlxsw_sp_prefix_usage *req_prefix_usage) { - struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree; + struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree; struct mlxsw_sp_lpm_tree *new_tree; int err; @@ -449,7 +622,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, return 0; new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, - vr->proto, false); + fib->proto); if (IS_ERR(new_tree)) { /* We failed to get a tree according to the required * prefix usage. However, the current tree might be still good @@ -463,8 +636,8 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, } /* Prevent packet loss by overwriting existing binding */ - vr->lpm_tree = new_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + fib->lpm_tree = new_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); if (err) goto err_tree_bind; mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); @@ -472,53 +645,26 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, return 0; err_tree_bind: - vr->lpm_tree = lpm_tree; + fib->lpm_tree = lpm_tree; mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); return err; } -static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, - unsigned char prefix_len, - u32 tb_id, - enum mlxsw_sp_l3proto proto) +static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; - int err; tb_id = mlxsw_sp_fix_tb_id(tb_id); - vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto); - if (!vr) { - vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto); - if (IS_ERR(vr)) - return vr; - } else { - struct mlxsw_sp_prefix_usage req_prefix_usage; - - mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, - &vr->fib->prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); - /* Need to replace LPM tree in case new prefix is required. */ - err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, - &req_prefix_usage); - if (err) - return ERR_PTR(err); - } + vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); + if (!vr) + vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id); return vr; } -static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) +static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) { - /* Destroy virtual router entity in case the associated FIB is empty - * and allow it to be used for other tables in future. Otherwise, - * check if some prefix usage did not disappear and change tree if - * that is the case. Note that in case new, smaller tree cannot be - * allocated, the original one will be kept being used. - */ - if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage)) - mlxsw_sp_vr_destroy(mlxsw_sp, vr); - else - mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, - &vr->fib->prefix_usage); + if (!vr->rif_count && list_empty(&vr->fib4->node_list)) + mlxsw_sp_vr_destroy(vr); } static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) @@ -627,14 +773,14 @@ static struct mlxsw_sp_neigh_entry * mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; int err; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); - if (!r) + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); + if (!rif) return ERR_PTR(-EINVAL); - neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif); + neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index); if (!neigh_entry) return ERR_PTR(-ENOMEM); @@ -642,7 +788,7 @@ mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) if (err) goto err_neigh_entry_insert; - list_add(&neigh_entry->rif_list_node, &r->neigh_list); + list_add(&neigh_entry->rif_list_node, &rif->neigh_list); return neigh_entry; @@ -1050,22 +1196,22 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) } static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif *r) + const struct mlxsw_sp_rif *rif) { char rauht_pl[MLXSW_REG_RAUHT_LEN]; mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, - r->rif, r->addr); + rif->rif_index, rif->addr); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); } static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) + struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; - mlxsw_sp_neigh_rif_flush(mlxsw_sp, r); - list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list, + mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); + list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, rif_list_node) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); } @@ -1082,7 +1228,7 @@ struct mlxsw_sp_nexthop { */ struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. */ @@ -1109,7 +1255,7 @@ struct mlxsw_sp_nexthop_group { u16 ecmp_size; u16 count; struct mlxsw_sp_nexthop nexthops[0]; -#define nh_rif nexthops[0].r +#define nh_rif nexthops[0].rif }; static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { @@ -1171,7 +1317,7 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vr *vr, + const struct mlxsw_sp_fib *fib, u32 adj_index, u16 ecmp_size, u32 new_adj_index, u16 new_ecmp_size) @@ -1179,8 +1325,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, char raleu_pl[MLXSW_REG_RALEU_LEN]; mlxsw_reg_raleu_pack(raleu_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id, - adj_index, ecmp_size, new_adj_index, + (enum mlxsw_reg_ralxx_protocol) fib->proto, + fib->vr->id, adj_index, ecmp_size, new_adj_index, new_ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); } @@ -1190,14 +1336,14 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, u32 old_adj_index, u16 old_ecmp_size) { struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_vr *vr = NULL; + struct mlxsw_sp_fib *fib = NULL; int err; list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { - if (vr == fib_entry->fib_node->vr) + if (fib == fib_entry->fib_node->fib) continue; - vr = fib_entry->fib_node->vr; - err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, + fib = fib_entry->fib_node->fib; + err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib, old_adj_index, old_ecmp_size, nh_grp->adj_index, @@ -1280,7 +1426,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, bool old_adj_index_valid; u32 old_adj_index; u16 old_ecmp_size; - int ret; int i; int err; @@ -1318,15 +1463,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, */ goto set_trap; - ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size); - if (ret < 0) { + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); + if (err) { /* We ran out of KVD linear space, just set the * trap and let everything flow through kernel. */ dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); goto set_trap; } - adj_index = ret; old_adj_index_valid = nh_grp->adj_index_valid; old_adj_index = nh_grp->adj_index; old_ecmp_size = nh_grp->ecmp_size; @@ -1399,22 +1543,22 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, } static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh, - struct mlxsw_sp_rif *r) + struct mlxsw_sp_rif *rif) { - if (nh->r) + if (nh->rif) return; - nh->r = r; - list_add(&nh->rif_list_node, &r->nexthop_list); + nh->rif = rif; + list_add(&nh->rif_list_node, &rif->nexthop_list); } static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh) { - if (!nh->r) + if (!nh->rif) return; list_del(&nh->rif_list_node); - nh->r = NULL; + nh->rif = NULL; } static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, @@ -1505,7 +1649,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, { struct net_device *dev = fib_nh->nh_dev; struct in_device *in_dev; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; int err; nh->nh_grp = nh_grp; @@ -1514,15 +1658,18 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + if (!dev) + return 0; + in_dev = __in_dev_get_rtnl(dev); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && fib_nh->nh_flags & RTNH_F_LINKDOWN) return 0; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!r) + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) return 0; - mlxsw_sp_nexthop_rif_init(nh, r); + mlxsw_sp_nexthop_rif_init(nh, rif); err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); if (err) @@ -1548,7 +1695,7 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_key key; struct mlxsw_sp_nexthop *nh; - struct mlxsw_sp_rif *r; + struct mlxsw_sp_rif *rif; if (mlxsw_sp->router.aborted) return; @@ -1558,13 +1705,13 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, if (WARN_ON_ONCE(!nh)) return; - r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); - if (!r) + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev); + if (!rif) return; switch (event) { case FIB_EVENT_NH_ADD: - mlxsw_sp_nexthop_rif_init(nh, r); + mlxsw_sp_nexthop_rif_init(nh, rif); mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); break; case FIB_EVENT_NH_DEL: @@ -1577,11 +1724,11 @@ static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, } static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) + struct mlxsw_sp_rif *rif) { struct mlxsw_sp_nexthop *nh, *tmp; - list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) { + list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); @@ -1699,7 +1846,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { fib_entry->offloaded = true; - switch (fib_entry->fib_node->vr->proto) { + switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: fib_info_offload_inc(fib_entry->nh_group->key.fi); break; @@ -1711,7 +1858,7 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) static void mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) { - switch (fib_entry->fib_node->vr->proto) { + switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: fib_info_offload_dec(fib_entry->nh_group->key.fi); break; @@ -1751,8 +1898,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralue_op op) { char ralue_pl[MLXSW_REG_RALUE_LEN]; + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -1772,8 +1919,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, } mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->fib_node->key.prefix_len, + (enum mlxsw_reg_ralxx_protocol) fib->proto, op, + fib->vr->id, fib_entry->fib_node->key.prefix_len, *p_dip); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); @@ -1784,27 +1931,28 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { - struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif; + struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; u16 trap_id = 0; - u16 rif = 0; + u16 rif_index = 0; if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; - rif = r->rif; + rif_index = rif->rif_index; } else { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->fib_node->key.prefix_len, + (enum mlxsw_reg_ralxx_protocol) fib->proto, op, + fib->vr->id, fib_entry->fib_node->key.prefix_len, *p_dip); - mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, + rif_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } @@ -1812,13 +1960,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; char ralue_pl[MLXSW_REG_RALUE_LEN]; u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr; mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) vr->proto, op, - vr->id, fib_entry->fib_node->key.prefix_len, + (enum mlxsw_reg_ralxx_protocol) fib->proto, op, + fib->vr->id, fib_entry->fib_node->key.prefix_len, *p_dip); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -1845,7 +1993,7 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, { int err = -EINVAL; - switch (fib_entry->fib_node->vr->proto) { + switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); break; @@ -1877,17 +2025,29 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, { struct fib_info *fi = fen_info->fi; - if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) { + switch (fen_info->type) { + case RTN_BROADCAST: /* fall through */ + case RTN_LOCAL: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; - } - if (fen_info->type != RTN_UNICAST) - return -EINVAL; - if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) + case RTN_UNREACHABLE: /* fall through */ + case RTN_BLACKHOLE: /* fall through */ + case RTN_PROHIBIT: + /* Packets hitting these routes need to be trapped, but + * can do so with a lower priority than packets directed + * at the host, so use action type local instead of trap. + */ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; - else - fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; - return 0; + return 0; + case RTN_UNICAST: + if (fi->fib_nh->nh_scope != RT_SCOPE_LINK) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + return 0; + default: + return -EINVAL; + } } static struct mlxsw_sp_fib_entry * @@ -1996,7 +2156,7 @@ mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, +mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr, size_t addr_len, unsigned char prefix_len) { struct mlxsw_sp_fib_node *fib_node; @@ -2006,18 +2166,15 @@ mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr, return NULL; INIT_LIST_HEAD(&fib_node->entry_list); - list_add(&fib_node->list, &vr->fib->node_list); + list_add(&fib_node->list, &fib->node_list); memcpy(fib_node->key.addr, addr, addr_len); fib_node->key.prefix_len = prefix_len; - mlxsw_sp_fib_node_insert(vr->fib, fib_node); - fib_node->vr = vr; return fib_node; } static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node) { - mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node); list_del(&fib_node->list); WARN_ON(!list_empty(&fib_node->entry_list)); kfree(fib_node); @@ -2034,7 +2191,7 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) { unsigned char prefix_len = fib_node->key.prefix_len; - struct mlxsw_sp_fib *fib = fib_node->vr->fib; + struct mlxsw_sp_fib *fib = fib_node->fib; if (fib->prefix_ref_count[prefix_len]++ == 0) mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); @@ -2043,32 +2200,98 @@ static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node) { unsigned char prefix_len = fib_node->key.prefix_len; - struct mlxsw_sp_fib *fib = fib_node->vr->fib; + struct mlxsw_sp_fib *fib = fib_node->fib; if (--fib->prefix_ref_count[prefix_len] == 0) mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); } +static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + struct mlxsw_sp_fib *fib) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage; + struct mlxsw_sp_lpm_tree *lpm_tree; + int err; + + err = mlxsw_sp_fib_node_insert(fib, fib_node); + if (err) + return err; + fib_node->fib = fib; + + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); + + if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { + err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, + &req_prefix_usage); + if (err) + goto err_tree_check; + } else { + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + fib->proto); + if (IS_ERR(lpm_tree)) + return PTR_ERR(lpm_tree); + fib->lpm_tree = lpm_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); + if (err) + goto err_tree_bind; + } + + mlxsw_sp_fib_node_prefix_inc(fib_node); + + return 0; + +err_tree_bind: + fib->lpm_tree = NULL; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); +err_tree_check: + fib_node->fib = NULL; + mlxsw_sp_fib_node_remove(fib, fib_node); + return err; +} + +static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; + struct mlxsw_sp_fib *fib = fib_node->fib; + + mlxsw_sp_fib_node_prefix_dec(fib_node); + + if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); + fib->lpm_tree = NULL; + mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); + } else { + mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage); + } + + fib_node->fib = NULL; + mlxsw_sp_fib_node_remove(fib, fib_node); +} + static struct mlxsw_sp_fib_node * mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->dst_len, fen_info->tb_id, - MLXSW_SP_L3_PROTO_IPV4); + vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id); if (IS_ERR(vr)) return ERR_CAST(vr); + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); - fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst, + fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, sizeof(fen_info->dst), fen_info->dst_len); if (fib_node) return fib_node; - fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst, + fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst, sizeof(fen_info->dst), fen_info->dst_len); if (!fib_node) { @@ -2076,22 +2299,29 @@ mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, goto err_fib_node_create; } + err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib); + if (err) + goto err_fib_node_init; + return fib_node; +err_fib_node_init: + mlxsw_sp_fib_node_destroy(fib_node); err_fib_node_create: - mlxsw_sp_vr_put(mlxsw_sp, vr); + mlxsw_sp_vr_put(vr); return ERR_PTR(err); } static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_vr *vr = fib_node->vr; + struct mlxsw_sp_vr *vr = fib_node->fib->vr; if (!list_empty(&fib_node->entry_list)) return; + mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node); mlxsw_sp_fib_node_destroy(fib_node); - mlxsw_sp_vr_put(mlxsw_sp, vr); + mlxsw_sp_vr_put(vr); } static struct mlxsw_sp_fib_entry * @@ -2236,8 +2466,6 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, if (err) goto err_fib4_node_entry_add; - mlxsw_sp_fib_node_prefix_inc(fib_node); - return 0; err_fib4_node_entry_add: @@ -2251,7 +2479,6 @@ mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; - mlxsw_sp_fib_node_prefix_dec(fib_node); mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); mlxsw_sp_fib4_node_list_remove(fib_entry); } @@ -2340,9 +2567,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) { char ralta_pl[MLXSW_REG_RALTA_LEN]; char ralst_pl[MLXSW_REG_RALST_LEN]; - char raltb_pl[MLXSW_REG_RALTB_LEN]; - char ralue_pl[MLXSW_REG_RALUE_LEN]; - int err; + int i, err; mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, MLXSW_SP_LPM_TREE_MIN); @@ -2355,16 +2580,33 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) if (err) return err; - mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); - if (err) - return err; + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i]; + char raltb_pl[MLXSW_REG_RALTB_LEN]; + char ralue_pl[MLXSW_REG_RALUE_LEN]; - mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, - MLXSW_REG_RALUE_OP_WRITE_WRITE, 0, 0, 0); - mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + if (!mlxsw_sp_vr_is_used(vr)) + continue; + + mlxsw_reg_raltb_pack(raltb_pl, vr->id, + MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_SP_LPM_TREE_MIN); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), + raltb_pl); + if (err) + return err; + + mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, + MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0, + 0); + mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), + ralue_pl); + if (err) + return err; + } + + return 0; } static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, @@ -2390,7 +2632,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - switch (fib_node->vr->proto) { + switch (fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); break; @@ -2400,26 +2642,32 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, } } -static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto) { + struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); struct mlxsw_sp_fib_node *fib_node, *tmp; - struct mlxsw_sp_vr *vr; + + list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) { + bool do_break = &tmp->list == &fib->node_list; + + mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); + if (do_break) + break; + } +} + +static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) +{ int i; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { - vr = &mlxsw_sp->router.vrs[i]; + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[i]; - if (!vr->used) + if (!mlxsw_sp_vr_is_used(vr)) continue; - - list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list, - list) { - bool do_break = &tmp->list == &vr->fib->node_list; - - mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node); - if (do_break) - break; - } + mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); } } @@ -2437,74 +2685,11 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); } -static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) -{ - char ritr_pl[MLXSW_REG_RITR_LEN]; - int err; - - mlxsw_reg_ritr_rif_pack(ritr_pl, rif); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - if (WARN_ON_ONCE(err)) - return err; - - mlxsw_reg_ritr_enable_set(ritr_pl, false); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); -} - -void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_rif *r) -{ - mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif); - mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r); - mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r); -} - -static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) -{ - char rgcr_pl[MLXSW_REG_RGCR_LEN]; - u64 max_rifs; - int err; - - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) - return -EIO; - - max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); - mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), - GFP_KERNEL); - if (!mlxsw_sp->rifs) - return -ENOMEM; - - mlxsw_reg_rgcr_pack(rgcr_pl, true); - mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); - if (err) - goto err_rgcr_fail; - - return 0; - -err_rgcr_fail: - kfree(mlxsw_sp->rifs); - return err; -} - -static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) -{ - char rgcr_pl[MLXSW_REG_RGCR_LEN]; - int i; - - mlxsw_reg_rgcr_pack(rgcr_pl, false); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); - - for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) - WARN_ON_ONCE(mlxsw_sp->rifs[i]); - - kfree(mlxsw_sp->rifs); -} - struct mlxsw_sp_fib_event_work { struct work_struct work; union { struct fib_entry_notifier_info fen_info; + struct fib_rule_notifier_info fr_info; struct fib_nh_notifier_info fnh_info; }; struct mlxsw_sp *mlxsw_sp; @@ -2516,6 +2701,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + struct fib_rule *rule; bool replace, append; int err; @@ -2539,7 +2725,10 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - mlxsw_sp_router_fib4_abort(mlxsw_sp); + rule = fib_work->fr_info.rule; + if (!fib4_rule_default(rule) && !rule->l3mdev) + mlxsw_sp_router_fib4_abort(mlxsw_sp); + fib_rule_put(rule); break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: @@ -2582,6 +2771,11 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); @@ -2594,6 +2788,707 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } +static struct mlxsw_sp_rif * +mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) +{ + int i; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) + if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) + return mlxsw_sp->rifs[i]; + + return NULL; +} + +static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (WARN_ON_ONCE(err)) + return err; + + mlxsw_reg_ritr_enable_set(ritr_pl, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index); + mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif); + mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); +} + +static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, + const struct in_device *in_dev, + unsigned long event) +{ + switch (event) { + case NETDEV_UP: + if (!rif) + return true; + return false; + case NETDEV_DOWN: + if (rif && !in_dev->ifa_list && + !netif_is_l3_slave(rif->dev)) + return true; + /* It is possible we already removed the RIF ourselves + * if it was assigned to a netdev that is now a bridge + * or LAG slave. + */ + return false; + } + + return false; +} + +#define MLXSW_SP_INVALID_INDEX_RIF 0xffff +static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) + if (!mlxsw_sp->rifs[i]) + return i; + + return MLXSW_SP_INVALID_INDEX_RIF; +} + +static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, + bool *p_lagged, u16 *p_system_port) +{ + u8 local_port = mlxsw_sp_vport->local_port; + + *p_lagged = mlxsw_sp_vport->lagged; + *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; +} + +static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, + u16 vr_id, struct net_device *l3_dev, + u16 rif_index, bool create) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + bool lagged = mlxsw_sp_vport->lagged; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u16 system_port; + + mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif_index, + vr_id, l3_dev->mtu, l3_dev->dev_addr); + + mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); + mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, + mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static u16 mlxsw_sp_rif_sp_to_fid(u16 rif_index) +{ + return MLXSW_SP_RFID_BASE + rif_index; +} + +static struct mlxsw_sp_fid * +mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) +{ + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->leave = mlxsw_sp_vport_rif_sp_leave; + f->ref_count = 0; + f->dev = l3_dev; + f->fid = fid; + + return f; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_alloc(u16 rif_index, u16 vr_id, struct net_device *l3_dev, + struct mlxsw_sp_fid *f) +{ + struct mlxsw_sp_rif *rif; + + rif = kzalloc(sizeof(*rif), GFP_KERNEL); + if (!rif) + return NULL; + + INIT_LIST_HEAD(&rif->nexthop_list); + INIT_LIST_HEAD(&rif->neigh_list); + ether_addr_copy(rif->addr, l3_dev->dev_addr); + rif->mtu = l3_dev->mtu; + rif->vr_id = vr_id; + rif->dev = l3_dev; + rif->rif_index = rif_index; + rif->f = f; + + return rif; +} + +u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif) +{ + return rif->rif_index; +} + +int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) +{ + return rif->dev->ifindex; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u32 tb_id = l3mdev_fib_table(l3_dev); + struct mlxsw_sp_vr *vr; + struct mlxsw_sp_fid *f; + struct mlxsw_sp_rif *rif; + u16 fid, rif_index; + int err; + + rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif_index == MLXSW_SP_INVALID_INDEX_RIF) + return ERR_PTR(-ERANGE); + + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, + rif_index, true); + if (err) + goto err_vport_rif_sp_op; + + fid = mlxsw_sp_rif_sp_to_fid(rif_index); + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); + if (err) + goto err_rif_fdb_op; + + f = mlxsw_sp_rfid_alloc(fid, l3_dev); + if (!f) { + err = -ENOMEM; + goto err_rfid_alloc; + } + + rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f); + if (!rif) { + err = -ENOMEM; + goto err_rif_alloc; + } + + if (devlink_dpipe_table_counter_enabled(priv_to_devlink(mlxsw_sp->core), + MLXSW_SP_DPIPE_TABLE_NAME_ERIF)) { + err = mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, + MLXSW_SP_RIF_COUNTER_EGRESS); + if (err) + netdev_dbg(mlxsw_sp_vport->dev, + "Counter alloc Failed err=%d\n", err); + } + + f->rif = rif; + mlxsw_sp->rifs[rif_index] = rif; + vr->rif_count++; + + return rif; + +err_rif_alloc: + kfree(f); +err_rfid_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); +err_rif_fdb_op: + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index, + false); +err_vport_rif_sp_op: + mlxsw_sp_vr_put(vr); + return ERR_PTR(err); +} + +static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id]; + struct net_device *l3_dev = rif->dev; + struct mlxsw_sp_fid *f = rif->f; + u16 rif_index = rif->rif_index; + u16 fid = f->fid; + + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); + + mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS); + mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_INGRESS); + + vr->rif_count--; + mlxsw_sp->rifs[rif_index] = NULL; + f->rif = NULL; + + kfree(rif); + + kfree(f); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); + + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, vr->id, l3_dev, rif_index, + false); + mlxsw_sp_vr_put(vr); +} + +static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!rif) { + rif = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); + if (IS_ERR(rif)) + return PTR_ERR(rif); + } + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, rif->f); + rif->f->ref_count++; + + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", rif->f->fid); + + return 0; +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->rif); +} + +static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, + struct net_device *port_dev, + unsigned long event, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); + case NETDEV_DOWN: + mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, + unsigned long event) +{ + if (netif_is_bridge_port(port_dev) || + netif_is_lag_port(port_dev) || + netif_is_ovs_port(port_dev)) + return 0; + + return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); +} + +static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, + struct net_device *lag_dev, + unsigned long event, u16 vid) +{ + struct net_device *port_dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, port_dev, iter) { + if (mlxsw_sp_port_dev_check(port_dev)) { + err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, + event, vid); + if (err) + return err; + } + } + + return 0; +} + +static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, + unsigned long event) +{ + if (netif_is_bridge_port(lag_dev)) + return 0; + + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); +} + +static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + u16 fid; + + if (is_vlan_dev(l3_dev)) + fid = vlan_dev_vlan_id(l3_dev); + else if (mlxsw_sp->master_bridge.dev == l3_dev) + fid = 1; + else + return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); + + return mlxsw_sp_fid_find(mlxsw_sp, fid); +} + +static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_core_max_ports(mlxsw_sp->core) + 1; +} + +static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; +} + +static u16 mlxsw_sp_flood_table_index_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; +} + +static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, + bool set) +{ + u8 router_port = mlxsw_sp_router_port(mlxsw_sp); + enum mlxsw_flood_table_type table_type; + char *sftr_pl; + u16 index; + int err; + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + + table_type = mlxsw_sp_flood_table_type_get(fid); + index = mlxsw_sp_flood_table_index_get(fid); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type, + 1, router_port, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + + kfree(sftr_pl); + return err; +} + +static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return MLXSW_REG_RITR_FID_IF; + else + return MLXSW_REG_RITR_VLAN_IF; +} + +static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, u16 vr_id, + struct net_device *l3_dev, + u16 fid, u16 rif, + bool create) +{ + enum mlxsw_reg_ritr_if_type rif_type; + char ritr_pl[MLXSW_REG_RITR_LEN]; + + rif_type = mlxsw_sp_rif_type_get(fid); + mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, vr_id, l3_dev->mtu, + l3_dev->dev_addr); + mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + struct mlxsw_sp_fid *f) +{ + u32 tb_id = l3mdev_fib_table(l3_dev); + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_vr *vr; + u16 rif_index; + int err; + + rif_index = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif_index == MLXSW_SP_INVALID_INDEX_RIF) + return -ERANGE; + + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); + if (IS_ERR(vr)) + return PTR_ERR(vr); + + err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); + if (err) + goto err_port_flood_set; + + err = mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, + rif_index, true); + if (err) + goto err_rif_bridge_op; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); + if (err) + goto err_rif_fdb_op; + + rif = mlxsw_sp_rif_alloc(rif_index, vr->id, l3_dev, f); + if (!rif) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->rif = rif; + mlxsw_sp->rifs[rif_index] = rif; + vr->rif_count++; + + netdev_dbg(l3_dev, "RIF=%d created\n", rif_index); + + return 0; + +err_rif_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); +err_rif_fdb_op: + mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index, + false); +err_rif_bridge_op: + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); +err_port_flood_set: + mlxsw_sp_vr_put(vr); + return err; +} + +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_vr *vr = &mlxsw_sp->router.vrs[rif->vr_id]; + struct net_device *l3_dev = rif->dev; + struct mlxsw_sp_fid *f = rif->f; + u16 rif_index = rif->rif_index; + + mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); + + vr->rif_count--; + mlxsw_sp->rifs[rif_index] = NULL; + f->rif = NULL; + + kfree(rif); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); + + mlxsw_sp_rif_bridge_op(mlxsw_sp, vr->id, l3_dev, f->fid, rif_index, + false); + + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); + + mlxsw_sp_vr_put(vr); + + netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif_index); +} + +static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, + struct net_device *br_dev, + unsigned long event) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + struct mlxsw_sp_fid *f; + + /* FID can either be an actual FID if the L3 device is the + * VLAN-aware bridge or a VLAN device on top. Otherwise, the + * L3 device is a VLAN-unaware bridge and we get a vFID. + */ + f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); + if (WARN_ON(!f)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); + case NETDEV_DOWN: + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, + unsigned long event) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_lag_master(real_dev)) + return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_bridge_master(real_dev) && + mlxsw_sp->master_bridge.dev == real_dev) + return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, + event); + + return 0; +} + +static int __mlxsw_sp_inetaddr_event(struct net_device *dev, + unsigned long event) +{ + if (mlxsw_sp_port_dev_check(dev)) + return mlxsw_sp_inetaddr_port_event(dev, event); + else if (netif_is_lag_master(dev)) + return mlxsw_sp_inetaddr_lag_event(dev, event); + else if (netif_is_bridge_master(dev)) + return mlxsw_sp_inetaddr_bridge_event(dev, dev, event); + else if (is_vlan_dev(dev)) + return mlxsw_sp_inetaddr_vlan_event(dev, event); + else + return 0; +} + +int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err = 0; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event)) + goto out; + + err = __mlxsw_sp_inetaddr_event(dev, event); +out: + return notifier_from_errno(err); +} + +static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, + const char *mac, int mtu) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); + mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) +{ + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + return 0; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, false); + if (err) + return err; + + err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr, + dev->mtu); + if (err) + goto err_rif_edit; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, rif->f->fid, true); + if (err) + goto err_rif_fdb_op; + + ether_addr_copy(rif->addr, dev->dev_addr); + rif->mtu = dev->mtu; + + netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index); + + return 0; + +err_rif_fdb_op: + mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu); +err_rif_edit: + mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, rif->f->fid, true); + return err; +} + +static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + struct mlxsw_sp_rif *rif; + + /* If netdev is already associated with a RIF, then we need to + * destroy it and create a new one with the new virtual router ID. + */ + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (rif) + __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN); + + return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP); +} + +static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!rif) + return; + __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN); +} + +int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, + struct netdev_notifier_changeupper_info *info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + int err = 0; + + if (!mlxsw_sp) + return 0; + + switch (event) { + case NETDEV_PRECHANGEUPPER: + return 0; + case NETDEV_CHANGEUPPER: + if (info->linking) + err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev); + else + mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev); + break; + } + + return err; +} + static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) { struct mlxsw_sp *mlxsw_sp = container_of(nb, struct mlxsw_sp, fib_nb); @@ -2606,6 +3501,48 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) mlxsw_sp_router_fib_flush(mlxsw_sp); } +static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + u64 max_rifs; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) + return -EIO; + + max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + mlxsw_sp->rifs = kcalloc(max_rifs, sizeof(struct mlxsw_sp_rif *), + GFP_KERNEL); + if (!mlxsw_sp->rifs) + return -ENOMEM; + + mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); + if (err) + goto err_rgcr_fail; + + return 0; + +err_rgcr_fail: + kfree(mlxsw_sp->rifs); + return err; +} + +static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + int i; + + mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) + WARN_ON_ONCE(mlxsw_sp->rifs[i]); + + kfree(mlxsw_sp->rifs); +} + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { int err; @@ -2625,7 +3562,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_nexthop_group_ht_init; - mlxsw_sp_lpm_init(mlxsw_sp); + err = mlxsw_sp_lpm_init(mlxsw_sp); + if (err) + goto err_lpm_init; + err = mlxsw_sp_vrs_init(mlxsw_sp); if (err) goto err_vrs_init; @@ -2647,6 +3587,8 @@ err_register_fib_notifier: err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: + mlxsw_sp_lpm_fini(mlxsw_sp); +err_lpm_init: rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); err_nexthop_group_ht_init: rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); @@ -2660,6 +3602,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(&mlxsw_sp->fib_nb); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); + mlxsw_sp_lpm_fini(mlxsw_sp); rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht); rhashtable_destroy(&mlxsw_sp->router.nexthop_ht); __mlxsw_sp_router_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h new file mode 100644 index 000000000000..c3095fef6697 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -0,0 +1,58 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_ROUTER_H_ +#define _MLXSW_ROUTER_H_ + +#include "spectrum.h" + +enum mlxsw_sp_rif_counter_dir { + MLXSW_SP_RIF_COUNTER_INGRESS, + MLXSW_SP_RIF_COUNTER_EGRESS, +}; + +u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); +int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); +int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir, + u64 *cnt); +void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir); +int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif, + enum mlxsw_sp_rif_counter_dir dir); + +#endif /* _MLXSW_ROUTER_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 598727d578c1..0d8411f1f954 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -568,8 +568,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) list_del(&f->list); - if (f->r) - mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f->rif) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif); kfree(f); @@ -745,27 +745,6 @@ err_port_allow_untagged_set: return err; } -static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid_begin, u16 vid_end, bool is_member, - bool untagged) -{ - u16 vid, vid_e; - int err; - - for (vid = vid_begin; vid <= vid_end; - vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { - vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), - vid_end); - - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, - is_member, untagged); - if (err) - return err; - } - - return 0; -} - static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool learn_enable) @@ -804,8 +783,8 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, return err; } - err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, - true, flag_untagged); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end, + true, flag_untagged); if (err) { netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin, vid_end); @@ -863,8 +842,8 @@ err_port_vid_learning_set: if (old_pvid != mlxsw_sp_port->pvid) mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); err_port_pvid_set: - __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, - false); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); err_port_vlans_set: mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); return err; @@ -1012,7 +991,7 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); if (clear_all_ports) { - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) if (mlxsw_sp->ports[i]) mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); } @@ -1171,8 +1150,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, if (pvid >= vid_begin && pvid <= vid_end) mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); - __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, - false); + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index ec1e886d4566..3b0f72455681 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1321,7 +1321,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) { int i; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sx->core); i++) if (mlxsw_sx_port_created(mlxsw_sx, i)) mlxsw_sx_port_remove(mlxsw_sx, i); kfree(mlxsw_sx->ports); @@ -1329,17 +1329,18 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) { + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sx->core); size_t alloc_size; u8 module, width; int i; int err; - alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS; + alloc_size = sizeof(struct mlxsw_sx_port *) * max_ports; mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL); if (!mlxsw_sx->ports) return -ENOMEM; - for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + for (i = 1; i < max_ports; i++) { err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module, &width); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 02ea48b15eb5..e008fdbed20f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -55,6 +55,7 @@ enum { MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, MLXSW_TRAP_ID_PKT_SAMPLE = 0x38, + MLXSW_TRAP_ID_FID_MISS = 0x3D, MLXSW_TRAP_ID_ARPBC = 0x50, MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, |