diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
30 files changed, 2149 insertions, 1030 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index d56eea310509..f4d9c9975ac3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -76,6 +76,8 @@ config MLXSW_SPECTRUM depends on PSAMPLE || PSAMPLE=n depends on BRIDGE || BRIDGE=n depends on IPV6 || IPV6=n + depends on NET_IPGRE || NET_IPGRE=n + depends on IPV6_GRE || IPV6_GRE=n select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 9463c3fa254f..0cadcabfe86f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_cnt.o spectrum_fid.o \ spectrum_ipip.o spectrum_acl_flex_actions.o \ spectrum_mr.o spectrum_mr_tcam.o \ - spectrum_qdisc.o + spectrum_qdisc.o spectrum_span.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 3529b545675d..93ea56620a24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1008,6 +1008,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; struct mlxsw_driver *mlxsw_driver; + struct mlxsw_res *res; size_t alloc_size; int err; @@ -1032,8 +1033,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, mlxsw_core->bus_priv = bus_priv; mlxsw_core->bus_info = mlxsw_bus_info; - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, - &mlxsw_core->res); + res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL; + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res); if (err) goto err_bus_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 5ddafd74dc00..092d39399f3c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -235,8 +235,7 @@ struct mlxsw_config_profile { used_max_pkey:1, used_ar_sec:1, used_adaptive_routing_group_cap:1, - used_kvd_split_data:1; /* indicate for the kvd's values */ - + used_kvd_sizes:1; u8 max_vepa_channels; u16 max_mid; u16 max_pgt; @@ -256,10 +255,8 @@ struct mlxsw_config_profile { u16 adaptive_routing_group_cap; u8 arn; u32 kvd_linear_size; - u16 kvd_hash_granularity; u8 kvd_hash_single_parts; u8 kvd_hash_double_parts; - u8 resource_query_enable; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -316,6 +313,7 @@ struct mlxsw_driver { u64 *p_linear_size); u8 txhdr_len; const struct mlxsw_config_profile *profile; + bool res_query_enabled; }; int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, @@ -326,14 +324,14 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core, bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); -#define MLXSW_CORE_RES_VALID(res, short_res_id) \ - mlxsw_core_res_valid(res, MLXSW_RES_ID_##short_res_id) +#define MLXSW_CORE_RES_VALID(mlxsw_core, short_res_id) \ + mlxsw_core_res_valid(mlxsw_core, MLXSW_RES_ID_##short_res_id) u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, enum mlxsw_res_id res_id); -#define MLXSW_CORE_RES_GET(res, short_res_id) \ - mlxsw_core_res_get(res, MLXSW_RES_ID_##short_res_id) +#define MLXSW_CORE_RES_GET(mlxsw_core, short_res_id) \ + mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id) #define MLXSW_BUS_F_TXRX BIT(0) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 996dc099cd58..3c0d882ba183 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> * * Redistribution and use in source and binary forms, with or without @@ -849,7 +849,6 @@ struct mlxsw_afa_mirror { struct mlxsw_afa_resource resource; int span_id; u8 local_in_port; - u8 local_out_port; bool ingress; }; @@ -859,7 +858,7 @@ mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block, { block->afa->ops->mirror_del(block->afa->ops_priv, mirror->local_in_port, - mirror->local_out_port, + mirror->span_id, mirror->ingress); kfree(mirror); } @@ -875,9 +874,8 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block, } static struct mlxsw_afa_mirror * -mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, - u8 local_in_port, u8 local_out_port, - bool ingress) +mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port, + const struct net_device *out_dev, bool ingress) { struct mlxsw_afa_mirror *mirror; int err; @@ -887,13 +885,12 @@ mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, return ERR_PTR(-ENOMEM); err = block->afa->ops->mirror_add(block->afa->ops_priv, - local_in_port, local_out_port, + local_in_port, out_dev, ingress, &mirror->span_id); if (err) goto err_mirror_add; mirror->ingress = ingress; - mirror->local_out_port = local_out_port; mirror->local_in_port = local_in_port; mirror->resource.destructor = mlxsw_afa_mirror_destructor; mlxsw_afa_resource_add(block, &mirror->resource); @@ -920,13 +917,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, } int -mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, - u8 local_in_port, u8 local_out_port, bool ingress) +mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port, + const struct net_device *out_dev, bool ingress) { struct mlxsw_afa_mirror *mirror; int err; - mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port, + mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev, ingress); if (IS_ERR(mirror)) return PTR_ERR(mirror); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index b91f2b0829b0..3a155d104384 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -36,6 +36,7 @@ #define _MLXSW_CORE_ACL_FLEX_ACTIONS_H #include <linux/types.h> +#include <linux/netdevice.h> struct mlxsw_afa; struct mlxsw_afa_block; @@ -48,9 +49,10 @@ struct mlxsw_afa_ops { void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index); int (*counter_index_get)(void *priv, unsigned int *p_counter_index); void (*counter_index_put)(void *priv, unsigned int counter_index); - int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port, + int (*mirror_add)(void *priv, u8 local_in_port, + const struct net_device *out_dev, bool ingress, int *p_span_id); - void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port, + void (*mirror_del)(void *priv, u8 local_in_port, int span_id, bool ingress); }; @@ -71,7 +73,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, - u8 local_in_port, u8 local_out_port, + u8 local_in_port, + const struct net_device *out_dev, bool ingress); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index ab710e37af99..84185f8dfbae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -218,32 +218,32 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, switch (attr_type) { case MLXSW_HWMON_ATTR_TYPE_TEMP: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_show; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_input", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_TEMP_MAX: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_temp_max_show; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_highest", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_TEMP_RST: mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_temp_rst_store; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0200; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "temp%u_reset_history", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_FAN_RPM: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_rpm_show; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "fan%u_input", num + 1); break; case MLXSW_HWMON_ATTR_TYPE_PWM: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; - mlxsw_hwmon_attr->dev_attr.attr.mode = S_IWUSR | S_IRUGO; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0644; snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "pwm%u", num + 1); break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 85faa87bf42d..3a9381977d6d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1015,16 +1015,14 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, } static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_res *res, - u8 query_enabled) + struct mlxsw_res *res) { int index, i; u64 data; u16 id; int err; - /* Not all the versions support resources query */ - if (!query_enabled) + if (!res) return 0; mlxsw_cmd_mbox_zero(mbox); @@ -1164,7 +1162,7 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( mbox, profile->adaptive_routing_group_cap); } - if (MLXSW_RES_VALID(res, KVD_SIZE)) { + if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) { err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res); if (err) return err; @@ -1376,8 +1374,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; - err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res, - profile->resource_query_enable); + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res); if (err) goto err_query_resources; @@ -1519,8 +1516,7 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, u8 *p_status) { struct mlxsw_pci *mlxsw_pci = bus_priv; - dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; - dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; + dma_addr_t in_mapaddr = 0, out_mapaddr = 0; bool evreq = mlxsw_pci->cmd.nopoll; unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); bool *p_wait_done = &mlxsw_pci->cmd.wait_done; @@ -1532,11 +1528,15 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, if (err) return err; - if (in_mbox) + if (in_mbox) { memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); + in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; + } mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); + if (out_mbox) + out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0e08be41c8e0..6218231e379e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,11 +1,11 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -2872,6 +2872,14 @@ static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port, MLXSW_REG_DEFINE(ptys, MLXSW_REG_PTYS_ID, MLXSW_REG_PTYS_LEN); +/* an_disable_admin + * Auto negotiation disable administrative configuration + * 0 - Device doesn't support AN disable. + * 1 - Device supports AN disable. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, an_disable_admin, 0x00, 30, 1); + /* reg_ptys_local_port * Local port number. * Access: Index @@ -3000,12 +3008,13 @@ MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16); MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32); static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, - u32 proto_admin) + u32 proto_admin, bool autoneg) { MLXSW_REG_ZERO(ptys, payload); mlxsw_reg_ptys_local_port_set(payload, local_port); mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH); mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin); + mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg); } static inline void mlxsw_reg_ptys_eth_unpack(char *payload, @@ -4216,6 +4225,12 @@ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); */ MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1); +/* reg_ritr_ipv6_mc + * IPv6 multicast routing enable. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6_mc, 0x00, 26, 1); + enum mlxsw_reg_ritr_if_type { /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, @@ -4281,6 +4296,14 @@ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); */ MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1); +/* reg_ritr_ipv6_mc_fe + * IPv6 Multicast Forwarding Enable. + * When disabled, forwarding is blocked but local traffic (traps and IP to me) + * will be enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6_mc_fe, 0x04, 26, 1); + /* reg_ritr_lb_en * Loop-back filter enable for unicast packets. * If the flag is set then loop-back filter for unicast packets is @@ -4504,12 +4527,14 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_ipv4_set(payload, 1); mlxsw_reg_ritr_ipv6_set(payload, 1); mlxsw_reg_ritr_ipv4_mc_set(payload, 1); + mlxsw_reg_ritr_ipv6_mc_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_ipv6_fe_set(payload, 1); mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1); + mlxsw_reg_ritr_ipv6_mc_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); @@ -6293,30 +6318,34 @@ MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1); */ MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16); -/* reg_rmft2_dip4 - * Destination IPv4 address +/* reg_rmft2_dip{4,6} + * Destination IPv4/6 address * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, dip6, 0x10, 16); MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32); -/* reg_rmft2_dip4_mask +/* reg_rmft2_dip{4,6}_mask * A bit that is set directs the TCAM to compare the corresponding bit in key. A * bit that is clear directs the TCAM to ignore the corresponding bit in key. * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, dip6_mask, 0x20, 16); MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32); -/* reg_rmft2_sip4 - * Source IPv4 address +/* reg_rmft2_sip{4,6} + * Source IPv4/6 address * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, sip6, 0x30, 16); MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32); -/* reg_rmft2_sip4_mask +/* reg_rmft2_sip{4,6}_mask * A bit that is set directs the TCAM to compare the corresponding bit in key. A * bit that is clear directs the TCAM to ignore the corresponding bit in key. * Access: RW */ +MLXSW_ITEM_BUF(reg, rmft2, sip6_mask, 0x40, 16); MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32); /* reg_rmft2_flexible_action_set @@ -6334,26 +6363,52 @@ MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80, MLXSW_REG_FLEX_ACTION_SET_LEN); static inline void -mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router, - enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, - u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask, - const char *flexible_action_set) +mlxsw_reg_rmft2_common_pack(char *payload, bool v, u16 offset, + u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + const char *flex_action_set) { MLXSW_REG_ZERO(rmft2, payload); mlxsw_reg_rmft2_v_set(payload, v); - mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4); mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE); mlxsw_reg_rmft2_offset_set(payload, offset); mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router); mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask); mlxsw_reg_rmft2_irif_set(payload, irif); + if (flex_action_set) + mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload, + flex_action_set); +} + +static inline void +mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask, + const char *flexible_action_set) +{ + mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router, + irif_mask, irif, flexible_action_set); + mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4); mlxsw_reg_rmft2_dip4_set(payload, dip4); mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask); mlxsw_reg_rmft2_sip4_set(payload, sip4); mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask); - if (flexible_action_set) - mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload, - flexible_action_set); +} + +static inline void +mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + struct in6_addr dip6, struct in6_addr dip6_mask, + struct in6_addr sip6, struct in6_addr sip6_mask, + const char *flexible_action_set) +{ + mlxsw_reg_rmft2_common_pack(payload, v, offset, virtual_router, + irif_mask, irif, flexible_action_set); + mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV6); + mlxsw_reg_rmft2_dip6_memcpy_to(payload, (void *)&dip6); + mlxsw_reg_rmft2_dip6_mask_memcpy_to(payload, (void *)&dip6_mask); + mlxsw_reg_rmft2_sip6_memcpy_to(payload, (void *)&sip6); + mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask); } /* MFCR - Management Fan Control Register @@ -6772,8 +6827,104 @@ MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1); */ MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1); +enum mlxsw_reg_mpat_span_type { + /* Local SPAN Ethernet. + * The original packet is not encapsulated. + */ + MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0, + + /* Encapsulated Remote SPAN Ethernet L3 GRE. + * The packet is encapsulated with GRE header. + */ + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3 = 0x3, +}; + +/* reg_mpat_span_type + * SPAN type. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4); + +/* Remote SPAN - Ethernet VLAN + * - - - - - - - - - - - - - - + */ + +/* reg_mpat_eth_rspan_vid + * Encapsulation header VLAN ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_vid, 0x18, 0, 12); + +/* Encapsulated Remote SPAN - Ethernet L2 + * - - - - - - - - - - - - - - - - - - - + */ + +enum mlxsw_reg_mpat_eth_rspan_version { + MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER = 15, +}; + +/* reg_mpat_eth_rspan_version + * RSPAN mirror header version. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_version, 0x10, 18, 4); + +/* reg_mpat_eth_rspan_mac + * Destination MAC address. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_mac, 0x12, 6); + +/* reg_mpat_eth_rspan_tp + * Tag Packet. Indicates whether the mirroring header should be VLAN tagged. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_tp, 0x18, 16, 1); + +/* Encapsulated Remote SPAN - Ethernet L3 + * - - - - - - - - - - - - - - - - - - - + */ + +enum mlxsw_reg_mpat_eth_rspan_protocol { + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4, + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6, +}; + +/* reg_mpat_eth_rspan_protocol + * SPAN encapsulation protocol. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_protocol, 0x18, 24, 4); + +/* reg_mpat_eth_rspan_ttl + * Encapsulation header Time-to-Live/HopLimit. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_ttl, 0x1C, 4, 8); + +/* reg_mpat_eth_rspan_smac + * Source MAC address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_smac, 0x22, 6); + +/* reg_mpat_eth_rspan_dip* + * Destination IP address. The IP version is configured by protocol. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_dip4, 0x4C, 0, 32); +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_dip6, 0x40, 16); + +/* reg_mpat_eth_rspan_sip* + * Source IP address. The IP version is configured by protocol. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, eth_rspan_sip4, 0x5C, 0, 32); +MLXSW_ITEM_BUF(reg, mpat, eth_rspan_sip6, 0x50, 16); + static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, - u16 system_port, bool e) + u16 system_port, bool e, + enum mlxsw_reg_mpat_span_type span_type) { MLXSW_REG_ZERO(mpat, payload); mlxsw_reg_mpat_pa_id_set(payload, pa_id); @@ -6781,6 +6932,49 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, mlxsw_reg_mpat_e_set(payload, e); mlxsw_reg_mpat_qos_set(payload, 1); mlxsw_reg_mpat_be_set(payload, 1); + mlxsw_reg_mpat_span_type_set(payload, span_type); +} + +static inline void mlxsw_reg_mpat_eth_rspan_pack(char *payload, u16 vid) +{ + mlxsw_reg_mpat_eth_rspan_vid_set(payload, vid); +} + +static inline void +mlxsw_reg_mpat_eth_rspan_l2_pack(char *payload, + enum mlxsw_reg_mpat_eth_rspan_version version, + const char *mac, + bool tp) +{ + mlxsw_reg_mpat_eth_rspan_version_set(payload, version); + mlxsw_reg_mpat_eth_rspan_mac_memcpy_to(payload, mac); + mlxsw_reg_mpat_eth_rspan_tp_set(payload, tp); +} + +static inline void +mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(char *payload, u8 ttl, + const char *smac, + u32 sip, u32 dip) +{ + mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl); + mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac); + mlxsw_reg_mpat_eth_rspan_protocol_set(payload, + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4); + mlxsw_reg_mpat_eth_rspan_sip4_set(payload, sip); + mlxsw_reg_mpat_eth_rspan_dip4_set(payload, dip); +} + +static inline void +mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl, + const char *smac, + struct in6_addr sip, struct in6_addr dip) +{ + mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl); + mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac); + mlxsw_reg_mpat_eth_rspan_protocol_set(payload, + MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6); + mlxsw_reg_mpat_eth_rspan_sip6_memcpy_to(payload, (void *)&sip); + mlxsw_reg_mpat_eth_rspan_dip6_memcpy_to(payload, (void *)&dip); } /* MPAR - Monitoring Port Analyzer Register diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bf400c75fcc8..53fffd09d133 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum.c - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> @@ -71,11 +71,12 @@ #include "spectrum_cnt.h" #include "spectrum_dpipe.h" #include "spectrum_acl_flex_actions.h" +#include "spectrum_span.h" #include "../mlxfw/mlxfw.h" #define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1530 -#define MLXSW_FWREV_SUBMINOR 152 +#define MLXSW_FWREV_MINOR 1620 +#define MLXSW_FWREV_SUBMINOR 192 #define MLXSW_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) #define MLXSW_SP_FW_FILENAME \ @@ -487,347 +488,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) return 0; } -static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) -{ - int i; - - if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) - return -EIO; - - mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, - MAX_SPAN); - mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, - sizeof(struct mlxsw_sp_span_entry), - GFP_KERNEL); - if (!mlxsw_sp->span.entries) - return -ENOMEM; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) - INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); - - return 0; -} - -static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) -{ - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); - } - kfree(mlxsw_sp->span.entries); -} - -static struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - struct mlxsw_sp_span_entry *span_entry; - char mpat_pl[MLXSW_REG_MPAT_LEN]; - u8 local_port = port->local_port; - int index; - int i; - int err; - - /* find a free entry to use */ - index = -1; - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - if (!mlxsw_sp->span.entries[i].used) { - index = i; - span_entry = &mlxsw_sp->span.entries[i]; - break; - } - } - if (index < 0) - return NULL; - - /* create a new port analayzer entry for local_port */ - mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); - if (err) - return NULL; - - span_entry->used = true; - span_entry->id = index; - span_entry->ref_count = 1; - span_entry->local_port = local_port; - return span_entry; -} - -static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_span_entry *span_entry) -{ - u8 local_port = span_entry->local_port; - char mpat_pl[MLXSW_REG_MPAT_LEN]; - int pa_id = span_entry->id; - - mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); - span_entry->used = false; -} - -struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port) -{ - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - if (curr->used && curr->local_port == local_port) - return curr; - } - return NULL; -} - -static struct mlxsw_sp_span_entry -*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) -{ - struct mlxsw_sp_span_entry *span_entry; - - span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp, - port->local_port); - if (span_entry) { - /* Already exists, just take a reference */ - span_entry->ref_count++; - return span_entry; - } - - return mlxsw_sp_span_entry_create(port); -} - -static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_span_entry *span_entry) -{ - WARN_ON(!span_entry->ref_count); - if (--span_entry->ref_count == 0) - mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); - return 0; -} - -static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - struct mlxsw_sp_span_inspected_port *p; - int i; - - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; - - list_for_each_entry(p, &curr->bound_ports_list, list) - if (p->local_port == port->local_port && - p->type == MLXSW_SP_SPAN_EGRESS) - return true; - } - - return false; -} - -static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, - int mtu) -{ - return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; -} - -static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - int err; - - /* If port is egress mirrored, the shared buffer size should be - * updated according to the mtu value - */ - if (mlxsw_sp_span_is_egress_mirror(port)) { - u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); - - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - if (err) { - netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); - return err; - } - } - - return 0; -} - -static struct mlxsw_sp_span_inspected_port * -mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - struct mlxsw_sp_port *port, - bool bind) -{ - struct mlxsw_sp_span_inspected_port *p; - - list_for_each_entry(p, &span_entry->bound_ports_list, list) - if (type == p->type && - port->local_port == p->local_port && - bind == p->bound) - return p; - return NULL; -} - -static int -mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) -{ - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char mpar_pl[MLXSW_REG_MPAR_LEN]; - int pa_id = span_entry->id; - - /* bind the port to the SPAN entry */ - mlxsw_reg_mpar_pack(mpar_pl, port->local_port, - (enum mlxsw_reg_mpar_i_e) type, bind, pa_id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); -} - -static int -mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) -{ - struct mlxsw_sp_span_inspected_port *inspected_port; - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - int i; - int err; - - /* A given (source port, direction) can only be bound to one analyzer, - * so if a binding is requested, check for conflicts. - */ - if (bind) - for (i = 0; i < mlxsw_sp->span.entries_count; i++) { - struct mlxsw_sp_span_entry *curr = - &mlxsw_sp->span.entries[i]; - - if (mlxsw_sp_span_entry_bound_port_find(curr, type, - port, bind)) - return -EEXIST; - } - - /* if it is an egress SPAN, bind a shared buffer to it */ - if (type == MLXSW_SP_SPAN_EGRESS) { - u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, - port->dev->mtu); - - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - if (err) { - netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); - return err; - } - } - - if (bind) { - err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - true); - if (err) - goto err_port_bind; - } - - inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); - if (!inspected_port) { - err = -ENOMEM; - goto err_inspected_port_alloc; - } - inspected_port->local_port = port->local_port; - inspected_port->type = type; - inspected_port->bound = bind; - list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); - - return 0; - -err_inspected_port_alloc: - if (bind) - mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - false); -err_port_bind: - if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - } - return err; -} - -static void -mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, - struct mlxsw_sp_span_entry *span_entry, - enum mlxsw_sp_span_type type, - bool bind) -{ - struct mlxsw_sp_span_inspected_port *inspected_port; - struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - - inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, - port, bind); - if (!inspected_port) - return; - - if (bind) - mlxsw_sp_span_inspected_port_bind(port, span_entry, type, - false); - /* remove the SBIB buffer if it was egress SPAN */ - if (type == MLXSW_SP_SPAN_EGRESS) { - mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); - } - - mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); - - list_del(&inspected_port->list); - kfree(inspected_port); -} - -int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type, bool bind) -{ - struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; - struct mlxsw_sp_span_entry *span_entry; - int err; - - span_entry = mlxsw_sp_span_entry_get(to); - if (!span_entry) - return -ENOENT; - - netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", - span_entry->id); - - err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); - if (err) - goto err_port_bind; - - return 0; - -err_port_bind: - mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); - return err; -} - -void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port, - enum mlxsw_sp_span_type type, bool bind) -{ - struct mlxsw_sp_span_entry *span_entry; - - span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp, - destination_port); - if (!span_entry) { - netdev_err(from->dev, "no span entry found\n"); - return; - } - - netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", - span_entry->id); - mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); -} - static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable, u32 rate) { @@ -1380,6 +1040,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev, xstats->tail_drop[i] = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT, + i, ppcnt_pl); + if (err) + continue; + + xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl); + xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl); + } } static void update_stats_cache(struct work_struct *work) @@ -1604,7 +1274,6 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress) { enum mlxsw_sp_span_type span_type; - struct mlxsw_sp_port *to_port; struct net_device *to_dev; to_dev = tcf_mirred_dev(a); @@ -1613,17 +1282,10 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } - if (!mlxsw_sp_port_dev_check(to_dev)) { - netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); - return -EOPNOTSUPP; - } - to_port = netdev_priv(to_dev); - - mirror->to_local_port = to_port->local_port; mirror->ingress = ingress; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type, - true); + return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, + true, &mirror->span_id); } static void @@ -1634,7 +1296,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, span_type = mirror->ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port, + mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id, span_type, true); } @@ -2728,7 +2390,7 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, int err; autoneg = mlxsw_sp_port->link.autoneg; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2762,7 +2424,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, bool autoneg; int err; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2780,7 +2442,7 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, } mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_new); + eth_proto_new, autoneg); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2991,7 +2653,7 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_admin); + eth_proto_admin, mlxsw_sp_port->link.autoneg); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } @@ -3718,6 +3380,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), + MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), @@ -4021,14 +3684,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + + /* Initialize router after SPAN is initialized, so that the FIB and + * neighbor event handlers can issue SPAN respin. + */ err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); goto err_router_init; } - /* Initialize netdevice notifier after router is initialized, so that - * the event handler can use router structures. + /* Initialize netdevice notifier after router and SPAN is initialized, + * so that the event handler can use router structures and call SPAN + * respin. */ mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); @@ -4037,12 +3710,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_netdev_notifier; } - err = mlxsw_sp_span_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); - goto err_span_init; - } - err = mlxsw_sp_acl_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); @@ -4068,12 +3735,12 @@ err_ports_create: err_dpipe_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: - mlxsw_sp_span_fini(mlxsw_sp); -err_span_init: unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -4099,9 +3766,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_ports_remove(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); - mlxsw_sp_span_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -4113,12 +3780,8 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) } static const struct mlxsw_config_profile mlxsw_sp_config_profile = { - .used_max_vepa_channels = 1, - .max_vepa_channels = 0, .used_max_mid = 1, .max_mid = MLXSW_SP_MID_MAX, - .used_max_pgt = 1, - .max_pgt = 0, .used_flood_tables = 1, .used_flood_mode = 1, .flood_mode = 3, @@ -4130,8 +3793,7 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .max_ib_mc = 0, .used_max_pkey = 1, .max_pkey = 0, - .used_kvd_split_data = 1, - .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, + .used_kvd_sizes = 1, .kvd_hash_single_parts = 59, .kvd_hash_double_parts = 41, .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, @@ -4141,73 +3803,8 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .type = MLXSW_PORT_SWID_TYPE_ETH, } }, - .resource_query_enable = 1, }; -static bool -mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack, - u64 size) -{ - const struct mlxsw_config_profile *profile; - - profile = &mlxsw_sp_config_profile; - if (size % profile->kvd_hash_granularity) { - NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity"); - return false; - } - return true; -} - -static int -mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed"); - return -EINVAL; -} - -static int -mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - return 0; -} - -static int -mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) { - NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum"); - return -EINVAL; - } - return 0; -} - -static int -mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size, - struct netlink_ext_ack *extack) -{ - struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - - if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size)) - return -EINVAL; - - if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) { - NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum"); - return -EINVAL; - } - return 0; -} - static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); @@ -4216,23 +3813,10 @@ static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink) return mlxsw_sp_kvdl_occ_get(mlxsw_sp); } -static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = { - .size_validate = mlxsw_sp_resource_kvd_size_validate, -}; - -static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { - .size_validate = mlxsw_sp_resource_kvd_linear_size_validate, +static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = { .occ_get = mlxsw_sp_resource_kvd_linear_occ_get, }; -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = { - .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate, -}; - -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { - .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, -}; - static void mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, struct devlink_resource_size_params *kvd_size_params, @@ -4291,17 +3875,16 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, - true, kvd_size, - MLXSW_SP_RESOURCE_KVD, + kvd_size, MLXSW_SP_RESOURCE_KVD, DEVLINK_RESOURCE_ID_PARENT_TOP, &kvd_size_params, - &mlxsw_sp_resource_kvd_ops); + NULL); if (err) return err; linear_size = profile->kvd_linear_size; err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR, - false, linear_size, + linear_size, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD, &linear_size_params, @@ -4309,27 +3892,31 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) if (err) return err; + err = mlxsw_sp_kvdl_resources_register(mlxsw_core); + if (err) + return err; + double_size = kvd_size - linear_size; double_size *= profile->kvd_hash_double_parts; double_size /= profile->kvd_hash_double_parts + profile->kvd_hash_single_parts; - double_size = rounddown(double_size, profile->kvd_hash_granularity); + double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY); err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE, - false, double_size, + double_size, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, MLXSW_SP_RESOURCE_KVD, &hash_double_size_params, - &mlxsw_sp_resource_kvd_hash_double_ops); + NULL); if (err) return err; single_size = kvd_size - double_size - linear_size; err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE, - false, single_size, + single_size, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD, &hash_single_size_params, - &mlxsw_sp_resource_kvd_hash_single_ops); + NULL); if (err) return err; @@ -4346,8 +3933,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, int err; if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) || - !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE) || - !profile->used_kvd_split_data) + !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) return -EIO; /* The hash part is what left of the kvd without the @@ -4373,7 +3959,7 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, double_size /= profile->kvd_hash_double_parts + profile->kvd_hash_single_parts; *p_double_size = rounddown(double_size, - profile->kvd_hash_granularity); + MLXSW_SP_KVD_GRANULARITY); } err = devlink_resource_size_get(devlink, @@ -4415,6 +4001,7 @@ static struct mlxsw_driver mlxsw_sp_driver = { .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp_config_profile, + .res_query_enabled = true, }; bool mlxsw_sp_port_dev_check(const struct net_device *dev) @@ -4583,13 +4170,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, u16 lag_id; if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { - NL_SET_ERR_MSG(extack, - "spectrum: Exceeded number of supported LAG devices"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); return false; } if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { - NL_SET_ERR_MSG(extack, - "spectrum: LAG device using unsupported Tx type"); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); return false; } return true; @@ -4831,8 +4416,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && !netif_is_ovs_master(upper_dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Unknown upper device type"); + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); return -EINVAL; } if (!info->linking) @@ -4841,8 +4425,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG(extack, - "spectrum: Enslaving a port to a device that already has an upper device is not supported"); + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } if (netif_is_lag_master(upper_dev) && @@ -4850,24 +4433,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, info->upper_info, extack)) return -EINVAL; if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Master device is a LAG master and this device has a VLAN"); + NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN"); return -EINVAL; } if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { - NL_SET_ERR_MSG(extack, - "spectrum: Can not put a VLAN on a LAG port"); + NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port"); return -EINVAL; } if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Master device is an OVS master and this device has a VLAN"); + NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN"); return -EINVAL; } if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { - NL_SET_ERR_MSG(extack, - "spectrum: Can not put a VLAN on an OVS port"); + NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } break; @@ -4980,7 +4559,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; if (!netif_is_bridge_master(upper_dev)) { - NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); + NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers"); return -EINVAL; } if (!info->linking) @@ -4989,7 +4568,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, (!netif_is_bridge_master(upper_dev) || !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev))) { - NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } break; @@ -5067,10 +4646,18 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mlxsw_sp_span_entry *span_entry; struct mlxsw_sp *mlxsw_sp; int err = 0; mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); + if (event == NETDEV_UNREGISTER) { + span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev); + if (span_entry) + mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry); + } + mlxsw_sp_span_respin(mlxsw_sp); + if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, event, ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 92064db2ae44..82820ba43728 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -70,16 +70,23 @@ #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear" #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single" #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks" +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks" enum mlxsw_sp_resource_id { - MLXSW_SP_RESOURCE_KVD, + MLXSW_SP_RESOURCE_KVD = 1, MLXSW_SP_RESOURCE_KVD_LINEAR, MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, }; struct mlxsw_sp_port; struct mlxsw_sp_rif; +struct mlxsw_sp_span_entry; struct mlxsw_sp_upper { struct net_device *dev; @@ -111,35 +118,13 @@ struct mlxsw_sp_mid { unsigned long *ports_in_mid; /* bits array */ }; -enum mlxsw_sp_span_type { - MLXSW_SP_SPAN_EGRESS, - MLXSW_SP_SPAN_INGRESS -}; - -struct mlxsw_sp_span_inspected_port { - struct list_head list; - enum mlxsw_sp_span_type type; - u8 local_port; - - /* Whether this is a directly bound mirror (port-to-port) or an ACL. */ - bool bound; -}; - -struct mlxsw_sp_span_entry { - u8 local_port; - bool used; - struct list_head bound_ports_list; - int ref_count; - int id; -}; - enum mlxsw_sp_port_mall_action_type { MLXSW_SP_PORT_MALL_MIRROR, MLXSW_SP_PORT_MALL_SAMPLE, }; struct mlxsw_sp_port_mall_mirror_tc_entry { - u8 to_local_port; + int span_id; bool ingress; }; @@ -226,6 +211,8 @@ struct mlxsw_sp_port_xstats { u64 wred_drop[TC_MAX_QUEUE]; u64 tail_drop[TC_MAX_QUEUE]; u64 backlog[TC_MAX_QUEUE]; + u64 tx_bytes[IEEE_8021QAZ_MAX_TCS]; + u64 tx_packets[IEEE_8021QAZ_MAX_TCS]; }; struct mlxsw_sp_port { @@ -263,6 +250,7 @@ struct mlxsw_sp_port { struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; struct mlxsw_sp_qdisc *root_qdisc; + struct mlxsw_sp_qdisc *tclass_qdiscs; unsigned acl_rule_count; struct mlxsw_sp_acl_block *ing_acl_block; struct mlxsw_sp_acl_block *eg_acl_block; @@ -400,16 +388,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); -int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, - struct mlxsw_sp_port *to, - enum mlxsw_sp_span_type type, - bool bind); -void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, - u8 destination_port, - enum mlxsw_sp_span_type type, - bool bind); -struct mlxsw_sp_span_entry * -mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port); /* spectrum_dcb.c */ #ifdef CONFIG_MLXSW_SPECTRUM_DCB @@ -465,6 +443,7 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, unsigned int *p_alloc_size); u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core); struct mlxsw_sp_acl_rule_info { unsigned int priority; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 92d90ed7207e..79b1fa27a9a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -160,6 +160,13 @@ bool mlxsw_sp_acl_block_disabled(struct mlxsw_sp_acl_block *block) return block->disable_count; } +static bool +mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) +{ + /* We hold a reference on ruleset ourselves */ + return ruleset->ref_count == 2; +} + static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, @@ -341,21 +348,8 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_ht_insert; - if (!chain_index) { - /* We only need ruleset with chain index 0, the implicit one, - * to be directly bound to device. The rest of the rulesets - * are bound by "Goto action set". - */ - err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block); - if (err) - goto err_ruleset_bind; - } - return ruleset; -err_ruleset_bind: - rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, - mlxsw_sp_acl_ruleset_ht_params); err_ht_insert: ops->ruleset_del(mlxsw_sp, ruleset->priv); err_ops_ruleset_add: @@ -369,12 +363,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; - struct mlxsw_sp_acl_block *block = ruleset->ht_key.block; - u32 chain_index = ruleset->ht_key.chain_index; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - if (!chain_index) - mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, block); rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); ops->ruleset_del(mlxsw_sp, ruleset->priv); @@ -577,7 +567,6 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, struct net_device *out_dev) { struct mlxsw_sp_acl_block_binding *binding; - struct mlxsw_sp_port *out_port; struct mlxsw_sp_port *in_port; if (!list_is_singular(&block->binding_list)) @@ -586,16 +575,10 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, binding = list_first_entry(&block->binding_list, struct mlxsw_sp_acl_block_binding, list); in_port = binding->mlxsw_sp_port; - if (!mlxsw_sp_port_dev_check(out_dev)) - return -EINVAL; - - out_port = netdev_priv(out_dev); - if (out_port->mlxsw_sp != mlxsw_sp) - return -EINVAL; return mlxsw_afa_block_append_mirror(rulei->act_block, in_port->local_port, - out_port->local_port, + out_dev, binding->ingress); } @@ -700,10 +683,25 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_insert; + if (!ruleset->ht_key.chain_index && + mlxsw_sp_acl_ruleset_is_singular(ruleset)) { + /* We only need ruleset with chain index 0, the implicit + * one, to be directly bound to device. The rest of the + * rulesets are bound by "Goto action set". + */ + err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, + ruleset->ht_key.block); + if (err) + goto err_ruleset_block_bind; + } + list_add_tail(&rule->list, &mlxsw_sp->acl->rules); ruleset->ht_key.block->rule_count++; return 0; +err_ruleset_block_bind: + rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, + mlxsw_sp_acl_rule_ht_params); err_rhashtable_insert: ops->rule_del(mlxsw_sp, rule->priv); return err; @@ -717,6 +715,10 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, ruleset->ht_key.block->rule_count--; list_del(&rule->list); + if (!ruleset->ht_key.chain_index && + mlxsw_sp_acl_ruleset_is_singular(ruleset)) + mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, + ruleset->ht_key.block); rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node, mlxsw_sp_acl_rule_ht_params); ops->rule_del(mlxsw_sp, rule->priv); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 6ca6894125f0..510ce48d87f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -1,6 +1,6 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com> * @@ -35,6 +35,7 @@ #include "spectrum_acl_flex_actions.h" #include "core_acl_flex_actions.h" +#include "spectrum_span.h" #define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 @@ -125,40 +126,23 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index) } static int -mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port, +mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, + const struct net_device *out_dev, bool ingress, int *p_span_id) { - struct mlxsw_sp_port *in_port, *out_port; - struct mlxsw_sp_span_entry *span_entry; + struct mlxsw_sp_port *in_port; struct mlxsw_sp *mlxsw_sp = priv; enum mlxsw_sp_span_type type; - int err; type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; - out_port = mlxsw_sp->ports[local_out_port]; in_port = mlxsw_sp->ports[local_in_port]; - err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false); - if (err) - return err; - - span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port); - if (!span_entry) { - err = -ENOENT; - goto err_span_entry_find; - } - - *p_span_id = span_entry->id; - return 0; - -err_span_entry_find: - mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false); - return err; + return mlxsw_sp_span_mirror_add(in_port, out_dev, type, + false, p_span_id); } static void -mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port, - bool ingress) +mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) { struct mlxsw_sp *mlxsw_sp = priv; struct mlxsw_sp_port *in_port; @@ -167,7 +151,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port, type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; in_port = mlxsw_sp->ports[local_in_port]; - mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false); + mlxsw_sp_span_mirror_del(in_port, span_id, type, false); } static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h index 2726192836ad..bd6d552d95b9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h @@ -33,8 +33,8 @@ * POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H -#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_ACTIONS_H #include "spectrum.h" diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index c6e180c2be1e..ad1b548e3cac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -228,10 +228,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, if (err) return err; - err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); - if (err) - goto err_group_update; - err = rhashtable_init(&group->chunk_ht, &mlxsw_sp_acl_tcam_chunk_ht_params); if (err) @@ -240,7 +236,6 @@ mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, return 0; err_rhashtable_init: -err_group_update: mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 7502e53447bd..98d896c14b87 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -33,126 +33,125 @@ */ #include <net/ip_tunnels.h> +#include <net/ip6_tunnel.h> #include "spectrum_ipip.h" struct ip_tunnel_parm -mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev) +mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev) { struct ip_tunnel *tun = netdev_priv(ol_dev); return tun->parms; } -static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms) +struct __ip6_tnl_parm +mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) +{ + struct ip6_tnl *tun = netdev_priv(ol_dev); + + return tun->parms; +} + +static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms) { return !!(parms.i_flags & TUNNEL_KEY); } -static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms) { return !!(parms.o_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms) { - return mlxsw_sp_ipip_parms_has_ikey(parms) ? + return mlxsw_sp_ipip_parms4_has_ikey(parms) ? be32_to_cpu(parms.i_key) : 0; } -static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms) { - return mlxsw_sp_ipip_parms_has_okey(parms) ? + return mlxsw_sp_ipip_parms4_has_okey(parms) ? be32_to_cpu(parms.o_key) : 0; } -static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms) +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms) { - return parms.iph.saddr; + return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto, - struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms) { - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_parms_saddr4(parms), - }; - case MLXSW_SP_L3_PROTO_IPV6: - break; - } - - WARN_ON(1); - return (union mlxsw_sp_l3addr) { - .addr4 = 0, - }; + return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr }; } -static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms) +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms) { - return parms.iph.daddr; + return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto, - struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms) +{ + return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr }; +} + +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) { + struct ip_tunnel_parm parms4; + struct __ip6_tnl_parm parms6; + switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: - return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_parms_daddr4(parms), - }; + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + return mlxsw_sp_ipip_parms4_saddr(parms4); case MLXSW_SP_L3_PROTO_IPV6: - break; + parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); + return mlxsw_sp_ipip_parms6_saddr(parms6); } WARN_ON(1); - return (union mlxsw_sp_l3addr) { - .addr4 = 0, - }; -} - -static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev)); + return (union mlxsw_sp_l3addr) {0}; } -static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev) +static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) { - return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev)); -} -static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev)); -} + struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); -static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev)); + return mlxsw_sp_ipip_parms4_daddr(parms4).addr4; } -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - return mlxsw_sp_ipip_parms_saddr(proto, - mlxsw_sp_ipip_netdev_parms(ol_dev)); -} + struct ip_tunnel_parm parms4; + struct __ip6_tnl_parm parms6; -static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) -{ - return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev)); + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + return mlxsw_sp_ipip_parms4_daddr(parms4); + case MLXSW_SP_L3_PROTO_IPV6: + parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); + return mlxsw_sp_ipip_parms6_daddr(parms6); + } + + WARN_ON(1); + return (union mlxsw_sp_l3addr) {0}; } -static union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev) +bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) { - return mlxsw_sp_ipip_parms_daddr(proto, - mlxsw_sp_ipip_netdev_parms(ol_dev)); + union mlxsw_sp_l3addr naddr = {0}; + + return !memcmp(&addr, &naddr, sizeof(naddr)); } static int @@ -176,12 +175,17 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, u32 tunnel_index, struct mlxsw_sp_ipip_entry *ipip_entry) { - bool has_ikey = mlxsw_sp_ipip_netdev_has_ikey(ipip_entry->ol_dev); u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); - u32 ikey = mlxsw_sp_ipip_netdev_ikey(ipip_entry->ol_dev); char rtdp_pl[MLXSW_REG_RTDP_LEN]; + struct ip_tunnel_parm parms; unsigned int type_check; + bool has_ikey; u32 daddr4; + u32 ikey; + + parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); + has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms); + ikey = mlxsw_sp_ipip_parms4_ikey(parms); mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); @@ -243,15 +247,14 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto, { union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev); union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev); - union mlxsw_sp_l3addr naddr = {0}; /* Tunnels with unset local or remote address are valid in Linux and * used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access * (NBMA) tunnels. In principle these can be offloaded, but the driver * currently doesn't support this. So punt. */ - return memcmp(&saddr, &naddr, sizeof(naddr)) && - memcmp(&daddr, &naddr, sizeof(naddr)); + return !mlxsw_sp_l3addr_is_zero(saddr) && + !mlxsw_sp_l3addr_is_zero(daddr); } static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp, @@ -273,14 +276,15 @@ static struct mlxsw_sp_rif_ipip_lb_config mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { + struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; - lb_ipipt = mlxsw_sp_ipip_netdev_has_okey(ol_dev) ? + lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ? MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; return (struct mlxsw_sp_rif_ipip_lb_config){ .lb_ipipt = lb_ipipt, - .okey = mlxsw_sp_ipip_netdev_okey(ol_dev), + .okey = mlxsw_sp_ipip_parms4_okey(parms), .ul_protocol = MLXSW_SP_L3_PROTO_IPV4, .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, ol_dev), @@ -300,16 +304,12 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, bool update_nhs = false; int err = 0; - new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev); + new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4, - new_parms); - old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4, - ipip_entry->parms); - new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4, - new_parms); - old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4, - ipip_entry->parms); + new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms); + old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4); + new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms); + old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4); if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) { u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); @@ -326,14 +326,14 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, } update_tunnel = true; - } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) != - mlxsw_sp_ipip_parms_okey(new_parms)) || - ipip_entry->parms.link != new_parms.link) { + } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) != + mlxsw_sp_ipip_parms4_okey(new_parms)) || + ipip_entry->parms4.link != new_parms.link) { update_tunnel = true; } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { update_nhs = true; - } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) != - mlxsw_sp_ipip_parms_ikey(new_parms)) { + } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) != + mlxsw_sp_ipip_parms4_ikey(new_parms)) { update_decap = true; } @@ -350,7 +350,7 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, false, false, false, extack); - ipip_entry->parms = new_parms; + ipip_entry->parms4 = new_parms; return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 04b08d9d76e9..6909d867bb59 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -1,7 +1,7 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -37,14 +37,19 @@ #include "spectrum_router.h" #include <net/ip_fib.h> +#include <linux/if_tunnel.h> struct ip_tunnel_parm -mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev); +mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); +struct __ip6_tnl_parm +mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev); union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev); +bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr); + enum mlxsw_sp_ipip_type { MLXSW_SP_IPIP_TYPE_GRE4, MLXSW_SP_IPIP_TYPE_MAX, @@ -56,7 +61,9 @@ struct mlxsw_sp_ipip_entry { struct mlxsw_sp_rif_ipip_lb *ol_lb; struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; - struct ip_tunnel_parm parms; + union { + struct ip_tunnel_parm parms4; + }; }; struct mlxsw_sp_ipip_ops { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 55f9d2d70f9e..8796db44dcc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -55,24 +55,47 @@ #define MLXSW_SP_KVDL_LARGE_CHUNKS_END \ (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1) -#define MLXSW_SP_CHUNK_MAX 32 -#define MLXSW_SP_LARGE_CHUNK_MAX 512 +#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1 +#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32 +#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512 struct mlxsw_sp_kvdl_part_info { unsigned int part_index; unsigned int start_index; unsigned int end_index; unsigned int alloc_size; + enum mlxsw_sp_resource_id resource_id; }; +enum mlxsw_sp_kvdl_part_id { + MLXSW_SP_KVDL_PART_ID_SINGLE, + MLXSW_SP_KVDL_PART_ID_CHUNKS, + MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS, +}; + +#define MLXSW_SP_KVDL_PART_INFO(id) \ +[MLXSW_SP_KVDL_PART_ID_##id] = { \ + .start_index = MLXSW_SP_KVDL_##id##_BASE, \ + .end_index = MLXSW_SP_KVDL_##id##_END, \ + .alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \ + .resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \ +} + +static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = { + MLXSW_SP_KVDL_PART_INFO(SINGLE), + MLXSW_SP_KVDL_PART_INFO(CHUNKS), + MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS), +}; + +#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info) + struct mlxsw_sp_kvdl_part { - struct list_head list; - const struct mlxsw_sp_kvdl_part_info *info; + struct mlxsw_sp_kvdl_part_info info; unsigned long usage[0]; /* Entries */ }; struct mlxsw_sp_kvdl { - struct list_head parts_list; + struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN]; }; static struct mlxsw_sp_kvdl_part * @@ -80,11 +103,13 @@ mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl, unsigned int alloc_size) { struct mlxsw_sp_kvdl_part *part, *min_part = NULL; + int i; - list_for_each_entry(part, &kvdl->parts_list, list) { - if (alloc_size <= part->info->alloc_size && + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (alloc_size <= part->info.alloc_size && (!min_part || - part->info->alloc_size <= min_part->info->alloc_size)) + part->info.alloc_size <= min_part->info.alloc_size)) min_part = part; } @@ -95,10 +120,12 @@ static struct mlxsw_sp_kvdl_part * mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index) { struct mlxsw_sp_kvdl_part *part; + int i; - list_for_each_entry(part, &kvdl->parts_list, list) { - if (kvdl_index >= part->info->start_index && - kvdl_index <= part->info->end_index) + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { + part = kvdl->parts[i]; + if (kvdl_index >= part->info.start_index && + kvdl_index <= part->info.end_index) return part; } @@ -122,7 +149,7 @@ mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info, static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, u32 *p_kvdl_index) { - const struct mlxsw_sp_kvdl_part_info *info = part->info; + const struct mlxsw_sp_kvdl_part_info *info = &part->info; unsigned int entry_index, nr_entries; nr_entries = (info->end_index - info->start_index + 1) / @@ -132,8 +159,7 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, return -ENOBUFS; __set_bit(entry_index, part->usage); - *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info, - entry_index); + *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index); return 0; } @@ -141,10 +167,10 @@ static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part, u32 kvdl_index) { + const struct mlxsw_sp_kvdl_part_info *info = &part->info; unsigned int entry_index; - entry_index = mlxsw_sp_kvdl_index_entry_index(part->info, - kvdl_index); + entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index); __clear_bit(entry_index, part->usage); } @@ -183,135 +209,212 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(part)) return PTR_ERR(part); - *p_alloc_size = part->info->alloc_size; + *p_alloc_size = part->info.alloc_size; return 0; } -static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = { - { - .part_index = 0, - .start_index = MLXSW_SP_KVDL_SINGLE_BASE, - .end_index = MLXSW_SP_KVDL_SINGLE_END, - .alloc_size = 1, - }, - { - .part_index = 1, - .start_index = MLXSW_SP_KVDL_CHUNKS_BASE, - .end_index = MLXSW_SP_KVDL_CHUNKS_END, - .alloc_size = MLXSW_SP_CHUNK_MAX, - }, - { - .part_index = 2, - .start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE, - .end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END, - .alloc_size = MLXSW_SP_LARGE_CHUNK_MAX, - }, -}; - -static struct mlxsw_sp_kvdl_part * -mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index) +static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part, + struct mlxsw_sp_kvdl_part *part_prev, + unsigned int size) { - struct mlxsw_sp_kvdl_part *part; - list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) { - if (part->info->part_index == part_index) - return part; + if (!part_prev) { + part->info.end_index = size - 1; + } else { + part->info.start_index = part_prev->info.end_index + 1; + part->info.end_index = part->info.start_index + size - 1; } - - return NULL; } -static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, - unsigned int part_index) +static struct mlxsw_sp_kvdl_part * +mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_kvdl_part_info *info, + struct mlxsw_sp_kvdl_part *part_prev) { - const struct mlxsw_sp_kvdl_part_info *info; + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_kvdl_part *part; + bool need_update = true; unsigned int nr_entries; size_t usage_size; + u64 resource_size; + int err; - info = &kvdl_parts_info[part_index]; + err = devlink_resource_size_get(devlink, info->resource_id, + &resource_size); + if (err) { + need_update = false; + resource_size = info->end_index - info->start_index + 1; + } - nr_entries = (info->end_index - info->start_index + 1) / - info->alloc_size; + nr_entries = div_u64(resource_size, info->alloc_size); usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); if (!part) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - part->info = info; - list_add(&part->list, &mlxsw_sp->kvdl->parts_list); + memcpy(&part->info, info, sizeof(part->info)); - return 0; + if (need_update) + mlxsw_sp_kvdl_part_update(part, part_prev, resource_size); + return part; } -static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp, - unsigned int part_index) +static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part) { - struct mlxsw_sp_kvdl_part *part; - - part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index); - if (!part) - return; - - list_del(&part->list); kfree(part); } static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; + const struct mlxsw_sp_kvdl_part_info *info; + struct mlxsw_sp_kvdl_part *part_prev = NULL; int err, i; - INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list); - - for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) { - err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i); - if (err) + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) { + info = &mlxsw_sp_kvdl_parts_info[i]; + kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info, + part_prev); + if (IS_ERR(kvdl->parts[i])) { + err = PTR_ERR(kvdl->parts[i]); goto err_kvdl_part_init; + } + part_prev = kvdl->parts[i]; } - return 0; err_kvdl_part_init: for (i--; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); + mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); return err; } static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) { + struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl; int i; - for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--) - mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) + mlxsw_sp_kvdl_part_fini(kvdl->parts[i]); } static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part) { + const struct mlxsw_sp_kvdl_part_info *info = &part->info; unsigned int nr_entries; int bit = -1; u64 occ = 0; - nr_entries = (part->info->end_index - - part->info->start_index + 1) / - part->info->alloc_size; + nr_entries = (info->end_index - + info->start_index + 1) / + info->alloc_size; while ((bit = find_next_bit(part->usage, nr_entries, bit + 1)) < nr_entries) - occ += part->info->alloc_size; + occ += info->alloc_size; return occ; } u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp) { - struct mlxsw_sp_kvdl_part *part; u64 occ = 0; + int i; - list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) - occ += mlxsw_sp_kvdl_part_occ(part); + for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) + occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]); return occ; } +static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE]; + return mlxsw_sp_kvdl_part_occ(part); +} + +static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS]; + return mlxsw_sp_kvdl_part_occ(part); +} + +static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS]; + return mlxsw_sp_kvdl_part_occ(part); +} + +static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = { + .occ_get = mlxsw_sp_kvdl_single_occ_get, +}; + +static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = { + .occ_get = mlxsw_sp_kvdl_chunks_occ_get, +}; + +static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = { + .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get, +}; + +int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + static struct devlink_resource_size_params size_params; + u32 kvdl_max_size; + int err; + + kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) - + MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE); + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES, + MLXSW_SP_KVDL_SINGLE_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params, + &mlxsw_sp_kvdl_single_ops); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS, + MLXSW_SP_KVDL_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params, + &mlxsw_sp_kvdl_chunks_ops); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, 0, kvdl_max_size, + MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE, + MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS, + MLXSW_SP_RESOURCE_KVD_LINEAR, + &size_params, + &mlxsw_sp_kvdl_chunks_large_ops); + return err; +} + int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_kvdl *kvdl; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index d20b143de3b4..a82539609d49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -33,6 +33,7 @@ */ #include <linux/rhashtable.h> +#include <net/ipv6.h> #include "spectrum_mr.h" #include "spectrum_router.h" @@ -47,6 +48,11 @@ struct mlxsw_sp_mr { /* priv has to be always the last item */ }; +struct mlxsw_sp_mr_vif; +struct mlxsw_sp_mr_vif_ops { + bool (*is_regular)(const struct mlxsw_sp_mr_vif *vif); +}; + struct mlxsw_sp_mr_vif { struct net_device *dev; const struct mlxsw_sp_rif *rif; @@ -61,6 +67,9 @@ struct mlxsw_sp_mr_vif { * instance is used as an ingress VIF */ struct list_head route_ivif_list; + + /* Protocol specific operations for a VIF */ + const struct mlxsw_sp_mr_vif_ops *ops; }; struct mlxsw_sp_mr_route_vif_entry { @@ -70,6 +79,17 @@ struct mlxsw_sp_mr_route_vif_entry { struct mlxsw_sp_mr_route *mr_route; }; +struct mlxsw_sp_mr_table; +struct mlxsw_sp_mr_table_ops { + bool (*is_route_valid)(const struct mlxsw_sp_mr_table *mr_table, + const struct mr_mfc *mfc); + void (*key_create)(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + struct mr_mfc *mfc); + bool (*is_route_starg)(const struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_mr_route *mr_route); +}; + struct mlxsw_sp_mr_table { struct list_head node; enum mlxsw_sp_l3proto proto; @@ -78,6 +98,7 @@ struct mlxsw_sp_mr_table { struct mlxsw_sp_mr_vif vifs[MAXVIFS]; struct list_head route_list; struct rhashtable route_ht; + const struct mlxsw_sp_mr_table_ops *ops; char catchall_route_priv[0]; /* catchall_route_priv has to be always the last item */ }; @@ -88,7 +109,7 @@ struct mlxsw_sp_mr_route { struct mlxsw_sp_mr_route_key key; enum mlxsw_sp_mr_route_action route_action; u16 min_mtu; - struct mfc_cache *mfc4; + struct mr_mfc *mfc; void *route_priv; const struct mlxsw_sp_mr_table *mr_table; /* A list of route_vif_entry structs that point to the egress VIFs */ @@ -104,14 +125,9 @@ static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = { .automatic_shrinking = true, }; -static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif) -{ - return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); -} - static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif) { - return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif; + return vif->ops->is_regular(vif) && vif->dev && vif->rif; } static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) @@ -122,18 +138,9 @@ static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) static bool mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route) { - vifi_t ivif; + vifi_t ivif = mr_route->mfc->mfc_parent; - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - ivif = mr_route->mfc4->mfc_parent; - return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } - return false; + return mr_route->mfc->mfc_un.res.ttls[ivif] != 255; } static int @@ -149,19 +156,6 @@ mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route) return valid_evifs; } -static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route) -{ - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY); - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } - return false; -} - static enum mlxsw_sp_mr_route_action mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) { @@ -174,7 +168,8 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) /* The kernel does not match a (*,G) route that the ingress interface is * not one of the egress interfaces, so trap these kind of routes. */ - if (mlxsw_sp_mr_route_starg(mr_route) && + if (mr_route->mr_table->ops->is_route_starg(mr_route->mr_table, + mr_route) && !mlxsw_sp_mr_route_ivif_in_evifs(mr_route)) return MLXSW_SP_MR_ROUTE_ACTION_TRAP; @@ -195,25 +190,11 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) static enum mlxsw_sp_mr_route_prio mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route) { - return mlxsw_sp_mr_route_starg(mr_route) ? + return mr_route->mr_table->ops->is_route_starg(mr_route->mr_table, + mr_route) ? MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG; } -static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, - struct mlxsw_sp_mr_route_key *key, - const struct mfc_cache *mfc) -{ - bool starg = (mfc->mfc_origin == htonl(INADDR_ANY)); - - memset(key, 0, sizeof(*key)); - key->vrid = mr_table->vr_id; - key->proto = mr_table->proto; - key->group.addr4 = mfc->mfc_mcastgrp; - key->group_mask.addr4 = htonl(0xffffffff); - key->source.addr4 = mfc->mfc_origin; - key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff); -} - static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route, struct mlxsw_sp_mr_vif *mr_vif) { @@ -343,8 +324,8 @@ static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table, } static struct mlxsw_sp_mr_route * -mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc) +mlxsw_sp_mr_route_create(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc) { struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; struct mlxsw_sp_mr_route *mr_route; @@ -356,12 +337,13 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, if (!mr_route) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&mr_route->evif_list); - mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc); /* Find min_mtu and link iVIF and eVIFs */ mr_route->min_mtu = ETH_MAX_MTU; - ipmr_cache_hold(mfc); - mr_route->mfc4 = mfc; + mr_cache_hold(mfc); + mr_route->mfc = mfc; + mr_table->ops->key_create(mr_table, &mr_route->key, mr_route->mfc); + mr_route->mr_table = mr_table; for (i = 0; i < MAXVIFS; i++) { if (mfc->mfc_un.res.ttls[i] != 255) { @@ -374,59 +356,38 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, mr_route->min_mtu = mr_table->vifs[i].dev->mtu; } } - mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]); + mlxsw_sp_mr_route_ivif_link(mr_route, + &mr_table->vifs[mfc->mfc_parent]); mr_route->route_action = mlxsw_sp_mr_route_action(mr_route); return mr_route; err: - ipmr_cache_put(mfc); + mr_cache_put(mfc); list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) mlxsw_sp_mr_route_evif_unlink(rve); kfree(mr_route); return ERR_PTR(err); } -static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table, - struct mlxsw_sp_mr_route *mr_route) +static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route) { struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; mlxsw_sp_mr_route_ivif_unlink(mr_route); - ipmr_cache_put(mr_route->mfc4); + mr_cache_put(mr_route->mfc); list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) mlxsw_sp_mr_route_evif_unlink(rve); kfree(mr_route); } -static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, - struct mlxsw_sp_mr_route *mr_route) -{ - switch (mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - mlxsw_sp_mr_route4_destroy(mr_table, mr_route); - break; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } -} - static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route, bool offload) { - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - if (offload) - mr_route->mfc4->mfc_flags |= MFC_OFFLOAD; - else - mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD; - break; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } + if (offload) + mr_route->mfc->mfc_flags |= MFC_OFFLOAD; + else + mr_route->mfc->mfc_flags &= ~MFC_OFFLOAD; } static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route) @@ -448,25 +409,18 @@ static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, mlxsw_sp_mr_route_destroy(mr_table, mr_route); } -int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc, bool replace) +int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc, bool replace) { struct mlxsw_sp_mr_route *mr_orig_route = NULL; struct mlxsw_sp_mr_route *mr_route; int err; - /* If the route is a (*,*) route, abort, as these kind of routes are - * used for proxy routes. - */ - if (mfc->mfc_origin == htonl(INADDR_ANY) && - mfc->mfc_mcastgrp == htonl(INADDR_ANY)) { - dev_warn(mr_table->mlxsw_sp->bus_info->dev, - "Offloading proxy routes is not supported.\n"); + if (!mr_table->ops->is_route_valid(mr_table, mfc)) return -EINVAL; - } /* Create a new route */ - mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc); + mr_route = mlxsw_sp_mr_route_create(mr_table, mfc); if (IS_ERR(mr_route)) return PTR_ERR(mr_route); @@ -511,7 +465,7 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, &mr_orig_route->ht_node, mlxsw_sp_mr_route_ht_params); list_del(&mr_orig_route->node); - mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route); + mlxsw_sp_mr_route_destroy(mr_table, mr_orig_route); } mlxsw_sp_mr_mfc_offload_update(mr_route); @@ -524,17 +478,17 @@ err_rhashtable_insert: list_del(&mr_route->node); err_no_orig_route: err_duplicate_route: - mlxsw_sp_mr_route4_destroy(mr_table, mr_route); + mlxsw_sp_mr_route_destroy(mr_table, mr_route); return err; } -void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc) +void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc) { struct mlxsw_sp_mr_route *mr_route; struct mlxsw_sp_mr_route_key key; - mlxsw_sp_mr_route4_key(mr_table, &key, mfc); + mr_table->ops->key_create(mr_table, &key, mfc); mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key, mlxsw_sp_mr_route_ht_params); if (mr_route) @@ -839,6 +793,125 @@ void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table, } } +/* Protocol specific functions */ +static bool +mlxsw_sp_mr_route4_validate(const struct mlxsw_sp_mr_table *mr_table, + const struct mr_mfc *c) +{ + struct mfc_cache *mfc = (struct mfc_cache *) c; + + /* If the route is a (*,*) route, abort, as these kind of routes are + * used for proxy routes. + */ + if (mfc->mfc_origin == htonl(INADDR_ANY) && + mfc->mfc_mcastgrp == htonl(INADDR_ANY)) { + dev_warn(mr_table->mlxsw_sp->bus_info->dev, + "Offloading proxy routes is not supported.\n"); + return false; + } + return true; +} + +static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + struct mr_mfc *c) +{ + const struct mfc_cache *mfc = (struct mfc_cache *) c; + bool starg; + + starg = (mfc->mfc_origin == htonl(INADDR_ANY)); + + memset(key, 0, sizeof(*key)); + key->vrid = mr_table->vr_id; + key->proto = MLXSW_SP_L3_PROTO_IPV4; + key->group.addr4 = mfc->mfc_mcastgrp; + key->group_mask.addr4 = htonl(0xffffffff); + key->source.addr4 = mfc->mfc_origin; + key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff); +} + +static bool mlxsw_sp_mr_route4_starg(const struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_mr_route *mr_route) +{ + return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY); +} + +static bool mlxsw_sp_mr_vif4_is_regular(const struct mlxsw_sp_mr_vif *vif) +{ + return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); +} + +static bool +mlxsw_sp_mr_route6_validate(const struct mlxsw_sp_mr_table *mr_table, + const struct mr_mfc *c) +{ + struct mfc6_cache *mfc = (struct mfc6_cache *) c; + + /* If the route is a (*,*) route, abort, as these kind of routes are + * used for proxy routes. + */ + if (ipv6_addr_any(&mfc->mf6c_origin) && + ipv6_addr_any(&mfc->mf6c_mcastgrp)) { + dev_warn(mr_table->mlxsw_sp->bus_info->dev, + "Offloading proxy routes is not supported.\n"); + return false; + } + return true; +} + +static void mlxsw_sp_mr_route6_key(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + struct mr_mfc *c) +{ + const struct mfc6_cache *mfc = (struct mfc6_cache *) c; + + memset(key, 0, sizeof(*key)); + key->vrid = mr_table->vr_id; + key->proto = MLXSW_SP_L3_PROTO_IPV6; + key->group.addr6 = mfc->mf6c_mcastgrp; + memset(&key->group_mask.addr6, 0xff, sizeof(key->group_mask.addr6)); + key->source.addr6 = mfc->mf6c_origin; + if (!ipv6_addr_any(&mfc->mf6c_origin)) + memset(&key->source_mask.addr6, 0xff, + sizeof(key->source_mask.addr6)); +} + +static bool mlxsw_sp_mr_route6_starg(const struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_mr_route *mr_route) +{ + return ipv6_addr_any(&mr_route->key.source_mask.addr6); +} + +static bool mlxsw_sp_mr_vif6_is_regular(const struct mlxsw_sp_mr_vif *vif) +{ + return !(vif->vif_flags & MIFF_REGISTER); +} + +static struct +mlxsw_sp_mr_vif_ops mlxsw_sp_mr_vif_ops_arr[] = { + { + .is_regular = mlxsw_sp_mr_vif4_is_regular, + }, + { + .is_regular = mlxsw_sp_mr_vif6_is_regular, + }, +}; + +static struct +mlxsw_sp_mr_table_ops mlxsw_sp_mr_table_ops_arr[] = { + { + .is_route_valid = mlxsw_sp_mr_route4_validate, + .key_create = mlxsw_sp_mr_route4_key, + .is_route_starg = mlxsw_sp_mr_route4_starg, + }, + { + .is_route_valid = mlxsw_sp_mr_route6_validate, + .key_create = mlxsw_sp_mr_route6_key, + .is_route_starg = mlxsw_sp_mr_route6_starg, + }, + +}; + struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, u32 vr_id, enum mlxsw_sp_l3proto proto) @@ -847,6 +920,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, .key = { .vrid = vr_id, + .proto = proto, }, .value = { .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP, @@ -865,6 +939,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, mr_table->vr_id = vr_id; mr_table->mlxsw_sp = mlxsw_sp; mr_table->proto = proto; + mr_table->ops = &mlxsw_sp_mr_table_ops_arr[proto]; INIT_LIST_HEAD(&mr_table->route_list); err = rhashtable_init(&mr_table->route_ht, @@ -875,6 +950,7 @@ struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MAXVIFS; i++) { INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list); INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list); + mr_table->vifs[i].ops = &mlxsw_sp_mr_vif_ops_arr[proto]; } err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, @@ -941,18 +1017,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, &bytes); - switch (mr_route->mr_table->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - if (mr_route->mfc4->mfc_un.res.pkt != packets) - mr_route->mfc4->mfc_un.res.lastuse = jiffies; - mr_route->mfc4->mfc_un.res.pkt = packets; - mr_route->mfc4->mfc_un.res.bytes = bytes; - break; - case MLXSW_SP_L3_PROTO_IPV6: - /* fall through */ - default: - WARN_ON_ONCE(1); - } + if (mr_route->mfc->mfc_un.res.pkt != packets) + mr_route->mfc->mfc_un.res.lastuse = jiffies; + mr_route->mfc->mfc_un.res.pkt = packets; + mr_route->mfc->mfc_un.res.bytes = bytes; } static void mlxsw_sp_mr_stats_update(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index 5d26a122af49..7c864a86811d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -36,6 +36,7 @@ #define _MLXSW_SPECTRUM_MCROUTER_H #include <linux/mroute.h> +#include <linux/mroute6.h> #include "spectrum_router.h" #include "spectrum.h" @@ -109,10 +110,10 @@ struct mlxsw_sp_mr_table; int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_mr_ops *mr_ops); void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc, bool replace); -void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, - struct mfc_cache *mfc); +int mlxsw_sp_mr_route_add(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc, bool replace); +void mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, + struct mr_mfc *mfc); int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table, struct net_device *dev, vifi_t vif_index, unsigned long vif_flags, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 4c7f32d4288d..4f4c0d311883 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -51,7 +51,7 @@ struct mlxsw_sp_mr_tcam_region { }; struct mlxsw_sp_mr_tcam { - struct mlxsw_sp_mr_tcam_region ipv4_tcam_region; + struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX]; }; /* This struct maps to one RIGR2 register entry */ @@ -316,20 +316,37 @@ static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, mlxsw_afa_block_first_set(afa_block)); break; case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON_ONCE(1); + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + key->group.addr6, + key->group_mask.addr6, + key->source.addr6, + key->source_mask.addr6, + mlxsw_afa_block_first_set(afa_block)); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); } static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid, + struct mlxsw_sp_mr_route_key *key, struct parman_item *parman_item) { + struct in6_addr zero_addr = IN6ADDR_ANY_INIT; char rmft2_pl[MLXSW_REG_RMFT2_LEN]; - mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid, - 0, 0, 0, 0, 0, 0, NULL); + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, + vrid, 0, 0, 0, 0, 0, 0, NULL); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index, + vrid, 0, 0, zero_addr, zero_addr, + zero_addr, zero_addr, NULL); + break; + } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); } @@ -353,27 +370,30 @@ mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, return 0; } +static struct mlxsw_sp_mr_tcam_region * +mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam, + enum mlxsw_sp_l3proto proto) +{ + return &mr_tcam->tcam_regions[proto]; +} + static int mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam, struct mlxsw_sp_mr_tcam_route *route, enum mlxsw_sp_mr_route_prio prio) { - struct parman_prio *parman_prio = NULL; + struct mlxsw_sp_mr_tcam_region *tcam_region; int err; - switch (route->key.proto) { - case MLXSW_SP_L3_PROTO_IPV4: - parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio]; - err = parman_item_add(mr_tcam->ipv4_tcam_region.parman, - parman_prio, &route->parman_item); - if (err) - return err; - break; - case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON_ONCE(1); - } - route->parman_prio = parman_prio; + tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, + route->key.proto); + err = parman_item_add(tcam_region->parman, + &tcam_region->parman_prios[prio], + &route->parman_item); + if (err) + return err; + + route->parman_prio = &tcam_region->parman_prios[prio]; return 0; } @@ -381,15 +401,13 @@ static void mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam, struct mlxsw_sp_mr_tcam_route *route) { - switch (route->key.proto) { - case MLXSW_SP_L3_PROTO_IPV4: - parman_item_remove(mr_tcam->ipv4_tcam_region.parman, - route->parman_prio, &route->parman_item); - break; - case MLXSW_SP_L3_PROTO_IPV6: - default: - WARN_ON_ONCE(1); - } + struct mlxsw_sp_mr_tcam_region *tcam_region; + + tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam, + route->key.proto); + + parman_item_remove(tcam_region->parman, + route->parman_prio, &route->parman_item); } static int @@ -462,7 +480,7 @@ static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_tcam *mr_tcam = priv; mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid, - &route->parman_item); + &route->key, &route->parman_item); mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); @@ -806,21 +824,42 @@ mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) { struct mlxsw_sp_mr_tcam *mr_tcam = priv; + struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; + u32 rtar_key; + int err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) || !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) return -EIO; - return mlxsw_sp_mr_tcam_region_init(mlxsw_sp, - &mr_tcam->ipv4_tcam_region, - MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST); + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST; + err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV4], + rtar_key); + if (err) + return err; + + rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST; + err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp, + ®ion[MLXSW_SP_L3_PROTO_IPV6], + rtar_key); + if (err) + goto err_ipv6_region_init; + + return 0; + +err_ipv6_region_init: + mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); + return err; } static void mlxsw_sp_mr_tcam_fini(void *priv) { struct mlxsw_sp_mr_tcam *mr_tcam = priv; + struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0]; - mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region); + mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]); + mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]); } const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 0b7670459051..91262b0573e3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -42,6 +42,8 @@ #include "reg.h" #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) +#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \ + MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1)) enum mlxsw_sp_qdisc_type { MLXSW_SP_QDISC_NO_QDISC, @@ -76,6 +78,7 @@ struct mlxsw_sp_qdisc_ops { struct mlxsw_sp_qdisc { u32 handle; u8 tclass_num; + u8 prio_bitmap; union { struct red_stats red; } xstats_base; @@ -99,6 +102,44 @@ mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, mlxsw_sp_qdisc->handle == handle; } +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent, + bool root_only) +{ + int tclass, child_index; + + if (parent == TC_H_ROOT) + return mlxsw_sp_port->root_qdisc; + + if (root_only || !mlxsw_sp_port->root_qdisc || + !mlxsw_sp_port->root_qdisc->ops || + TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle || + TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS) + return NULL; + + child_index = TC_H_MIN(parent); + tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index); + return &mlxsw_sp_port->tclass_qdiscs[tclass]; +} + +static struct mlxsw_sp_qdisc * +mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle) +{ + int i; + + if (mlxsw_sp_port->root_qdisc->handle == handle) + return mlxsw_sp_port->root_qdisc; + + if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC) + return NULL; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle) + return &mlxsw_sp_port->tclass_qdiscs[i]; + + return NULL; +} + static int mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) @@ -185,6 +226,23 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, return -EOPNOTSUPP; } +static void +mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats, + u8 prio_bitmap, u64 *tx_packets, + u64 *tx_bytes) +{ + int i; + + *tx_packets = 0; + *tx_bytes = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (prio_bitmap & BIT(i)) { + *tx_packets += xstats->tx_packets[i]; + *tx_bytes += xstats->tx_bytes[i]; + } + } +} + static int mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, int tclass_num, u32 min, u32 max, @@ -230,17 +288,16 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, u8 tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; - struct rtnl_link_stats64 *stats; struct red_stats *red_base; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - stats = &mlxsw_sp_port->periodic_hw_stats.stats; stats_base = &mlxsw_sp_qdisc->stats_base; red_base = &mlxsw_sp_qdisc->xstats_base.red; - stats_base->tx_packets = stats->tx_packets; - stats_base->tx_bytes = stats->tx_bytes; - + mlxsw_sp_qdisc_bstats_per_priority_get(xstats, + mlxsw_sp_qdisc->prio_bitmap, + &stats_base->tx_packets, + &stats_base->tx_bytes); red_base->prob_mark = xstats->ecn; red_base->prob_drop = xstats->wred_drop[tclass_num]; red_base->pdrop = xstats->tail_drop[tclass_num]; @@ -255,6 +312,12 @@ static int mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { + struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc; + + if (root_qdisc != mlxsw_sp_qdisc) + root_qdisc->stats_base.backlog -= + mlxsw_sp_qdisc->stats_base.backlog; + return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, mlxsw_sp_qdisc->tclass_num); } @@ -319,6 +382,7 @@ mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port, backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc->stats_base.backlog); p->qstats->backlog -= backlog; + mlxsw_sp_qdisc->stats_base.backlog = 0; } static int @@ -357,14 +421,16 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u8 tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; - struct rtnl_link_stats64 *stats; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - stats = &mlxsw_sp_port->periodic_hw_stats.stats; stats_base = &mlxsw_sp_qdisc->stats_base; - tx_bytes = stats->tx_bytes - stats_base->tx_bytes; - tx_packets = stats->tx_packets - stats_base->tx_packets; + mlxsw_sp_qdisc_bstats_per_priority_get(xstats, + mlxsw_sp_qdisc->prio_bitmap, + &tx_packets, &tx_bytes); + tx_bytes = tx_bytes - stats_base->tx_bytes; + tx_packets = tx_packets - stats_base->tx_packets; + overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - stats_base->overlimits; drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - @@ -406,11 +472,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - if (p->parent != TC_H_ROOT) + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false); + if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; - mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; - if (p->command == TC_RED_REPLACE) return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, mlxsw_sp_qdisc, @@ -441,9 +506,13 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port, { int i; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, MLXSW_SP_PORT_DEFAULT_TCLASS); + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, + &mlxsw_sp_port->tclass_qdiscs[i]); + mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0; + } return 0; } @@ -467,16 +536,41 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, void *params) { struct tc_prio_qopt_offload_params *p = params; - int tclass, i; + struct mlxsw_sp_qdisc *child_qdisc; + int tclass, i, band, backlog; + u8 old_priomap; int err; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]); - err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass); - if (err) - return err; + for (band = 0; band < p->bands; band++) { + tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; + old_priomap = child_qdisc->prio_bitmap; + child_qdisc->prio_bitmap = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (p->priomap[i] == band) { + child_qdisc->prio_bitmap |= BIT(i); + if (BIT(i) & old_priomap) + continue; + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, + i, tclass); + if (err) + return err; + } + } + if (old_priomap != child_qdisc->prio_bitmap && + child_qdisc->ops && child_qdisc->ops->clean_stats) { + backlog = child_qdisc->stats_base.backlog; + child_qdisc->ops->clean_stats(mlxsw_sp_port, + child_qdisc); + child_qdisc->stats_base.backlog = backlog; + } + } + for (; band < IEEE_8021QAZ_MAX_TCS; band++) { + tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band); + child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass]; + child_qdisc->prio_bitmap = 0; + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc); } - return 0; } @@ -513,6 +607,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { drops += xstats->tail_drop[i]; + drops += xstats->wred_drop[i]; backlog += xstats->backlog[i]; } drops = drops - stats_base->drops; @@ -548,8 +643,10 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, stats_base->tx_bytes = stats->tx_bytes; stats_base->drops = 0; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { stats_base->drops += xstats->tail_drop[i]; + stats_base->drops += xstats->wred_drop[i]; + } mlxsw_sp_qdisc->stats_base.backlog = 0; } @@ -564,15 +661,48 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = { .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats, }; +/* Grafting is not supported in mlxsw. It will result in un-offloading of the + * grafted qdisc as well as the qdisc in the qdisc new location. + * (However, if the graft is to the location where the qdisc is already at, it + * will be ignored completely and won't cause un-offloading). + */ +static int +mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_prio_qopt_offload_graft_params *p) +{ + int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band); + struct mlxsw_sp_qdisc *old_qdisc; + + /* Check if the grafted qdisc is already in its "new" location. If so - + * nothing needs to be done. + */ + if (p->band < IEEE_8021QAZ_MAX_TCS && + mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle) + return 0; + + /* See if the grafted qdisc is already offloaded on any tclass. If so, + * unoffload it. + */ + old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, + p->child_handle); + if (old_qdisc) + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc); + + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, + &mlxsw_sp_port->tclass_qdiscs[tclass_num]); + return -EOPNOTSUPP; +} + int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_prio_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - if (p->parent != TC_H_ROOT) + mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true); + if (!mlxsw_sp_qdisc) return -EOPNOTSUPP; - mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; if (p->command == TC_PRIO_REPLACE) return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, mlxsw_sp_qdisc, @@ -589,6 +719,9 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, case TC_PRIO_STATS: return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, &p->stats); + case TC_PRIO_GRAFT: + return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc, + &p->graft_params); default: return -EOPNOTSUPP; } @@ -596,17 +729,36 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) { - mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc), - GFP_KERNEL); - if (!mlxsw_sp_port->root_qdisc) - return -ENOMEM; + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + int i; + mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL); + if (!mlxsw_sp_qdisc) + goto err_root_qdisc_init; + + mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc; + mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff; mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS, + GFP_KERNEL); + if (!mlxsw_sp_qdisc) + goto err_tclass_qdiscs_init; + + mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i; + return 0; + +err_tclass_qdiscs_init: + kfree(mlxsw_sp_port->root_qdisc); +err_root_qdisc_init: + return -ENOMEM; } void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) { + kfree(mlxsw_sp_port->tclass_qdiscs); kfree(mlxsw_sp_port->root_qdisc); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 997e24dcb053..1904c0323d39 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1,10 +1,10 @@ /* * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c - * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> - * Copyright (c) 2017 Petr Machata <petrm@mellanox.com> + * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -70,6 +70,7 @@ #include "spectrum_mr.h" #include "spectrum_mr_tcam.h" #include "spectrum_router.h" +#include "spectrum_span.h" struct mlxsw_sp_fib; struct mlxsw_sp_vr; @@ -466,7 +467,7 @@ struct mlxsw_sp_vr { unsigned int rif_count; struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; - struct mlxsw_sp_mr_table *mr4_table; + struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX]; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -710,7 +711,9 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table; + return !!vr->fib4 || !!vr->fib6 || + !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] || + !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -788,7 +791,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id, struct netlink_ext_ack *extack) { - struct mlxsw_sp_mr_table *mr4_table; + struct mlxsw_sp_mr_table *mr4_table, *mr6_table; struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; struct mlxsw_sp_vr *vr; @@ -796,7 +799,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) { - NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers"); return ERR_PTR(-EBUSY); } fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); @@ -811,15 +814,25 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(mr4_table)) { err = PTR_ERR(mr4_table); - goto err_mr_table_create; + goto err_mr4_table_create; } + mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(mr6_table)) { + err = PTR_ERR(mr6_table); + goto err_mr6_table_create; + } + vr->fib4 = fib4; vr->fib6 = fib6; - vr->mr4_table = mr4_table; + vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table; + vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table; vr->tb_id = tb_id; return vr; -err_mr_table_create: +err_mr6_table_create: + mlxsw_sp_mr_table_destroy(mr4_table); +err_mr4_table_create: mlxsw_sp_fib_destroy(mlxsw_sp, fib6); err_fib6_create: mlxsw_sp_fib_destroy(mlxsw_sp, fib4); @@ -829,8 +842,10 @@ err_fib6_create: static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) { - mlxsw_sp_mr_table_destroy(vr->mr4_table); - vr->mr4_table = NULL; + mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]); + vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL; + mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]); + vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL; mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); vr->fib6 = NULL; mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); @@ -853,7 +868,8 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) { if (!vr->rif_count && list_empty(&vr->fib4->node_list) && list_empty(&vr->fib6->node_list) && - mlxsw_sp_mr_table_empty(vr->mr4_table)) + mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) && + mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6])) mlxsw_sp_vr_destroy(mlxsw_sp, vr); } @@ -1024,9 +1040,11 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_ipip_type ipipt, struct net_device *ol_dev) { + const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_ipip_entry *ret = NULL; + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); if (!ipip_entry) return ERR_PTR(-ENOMEM); @@ -1040,7 +1058,15 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; - ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev); + + switch (ipip_ops->ul_proto) { + case MLXSW_SP_L3_PROTO_IPV4: + ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + break; + case MLXSW_SP_L3_PROTO_IPV6: + WARN_ON(1); + break; + } return ipip_entry; @@ -2371,6 +2397,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) read_unlock_bh(&n->lock); rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + entry_connected = nud_state & NUD_VALID && !dead; neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!entry_connected && !neigh_entry) @@ -2468,7 +2496,8 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, mlxsw_core_schedule_work(&net_work->work); mlxsw_sp_port_dev_put(mlxsw_sp_port); break; - case NETEVENT_MULTIPATH_HASH_UPDATE: + case NETEVENT_IPV4_MPATH_HASH_UPDATE: + case NETEVENT_IPV6_MPATH_HASH_UPDATE: net = ptr; if (!net_eq(net, &init_net)) @@ -5415,10 +5444,20 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, return 0; } +static struct mlxsw_sp_mr_table * +mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family) +{ + if (family == RTNL_FAMILY_IPMR) + return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]; + else + return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]; +} + static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, struct mfc_entry_notifier_info *men_info, bool replace) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr; if (mlxsw_sp->router->aborted) @@ -5428,12 +5467,14 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(vr)) return PTR_ERR(vr); - return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); + return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace); } static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, struct mfc_entry_notifier_info *men_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr; if (mlxsw_sp->router->aborted) @@ -5443,7 +5484,8 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(!vr)) return; - mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family); + mlxsw_sp_mr_route_del(mrt, men_info->mfc); mlxsw_sp_vr_put(mlxsw_sp, vr); } @@ -5451,6 +5493,7 @@ static int mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, struct vif_entry_notifier_info *ven_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_rif *rif; struct mlxsw_sp_vr *vr; @@ -5461,8 +5504,9 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(vr)) return PTR_ERR(vr); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev); - return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev, + return mlxsw_sp_mr_vif_add(mrt, ven_info->dev, ven_info->vif_index, ven_info->vif_flags, rif); } @@ -5471,6 +5515,7 @@ static void mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, struct vif_entry_notifier_info *ven_info) { + struct mlxsw_sp_mr_table *mrt; struct mlxsw_sp_vr *vr; if (mlxsw_sp->router->aborted) @@ -5480,7 +5525,8 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(!vr)) return; - mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index); + mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family); + mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index); mlxsw_sp_vr_put(mlxsw_sp, vr); } @@ -5572,7 +5618,7 @@ static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { - int i; + int i, j; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; @@ -5580,7 +5626,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; - mlxsw_sp_mr_table_flush(vr->mr4_table); + for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++) + mlxsw_sp_mr_table_flush(vr->mr_table[j]); mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); /* If virtual router was only used for IPv4, then it's no @@ -5630,6 +5677,8 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) /* Protect internal structures from changes */ rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_APPEND: /* fall through */ @@ -5672,6 +5721,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) int err; rtnl_lock(); + mlxsw_sp_span_respin(mlxsw_sp); + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_ADD: @@ -5715,11 +5766,11 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); - ipmr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_work->men_info.mfc); break; case FIB_EVENT_ENTRY_DEL: mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); - ipmr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_work->men_info.mfc); break; case FIB_EVENT_VIF_ADD: err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, @@ -5799,7 +5850,7 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); - ipmr_cache_hold(fib_work->men_info.mfc); + mr_cache_hold(fib_work->men_info.mfc); break; case FIB_EVENT_VIF_ADD: /* fall through */ case FIB_EVENT_VIF_DEL: @@ -5841,10 +5892,14 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event, if (!ipmr_rule_default(rule) && !rule->l3mdev) err = -1; break; + case RTNL_FAMILY_IP6MR: + if (!ip6mr_rule_default(rule) && !rule->l3mdev) + err = -1; + break; } if (err < 0) - NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload"); + NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload"); return err; } @@ -5860,7 +5915,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, if (!net_eq(info->net, &init_net) || (info->family != AF_INET && info->family != AF_INET6 && - info->family != RTNL_FAMILY_IPMR)) + info->family != RTNL_FAMILY_IPMR && + info->family != RTNL_FAMILY_IP6MR)) return NOTIFY_DONE; router = container_of(nb, struct mlxsw_sp_router, fib_nb); @@ -5890,6 +5946,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); mlxsw_sp_router_fib6_event(fib_work, info); break; + case RTNL_FAMILY_IP6MR: case RTNL_FAMILY_IPMR: INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); mlxsw_sp_router_fibmr_event(fib_work, info); @@ -6071,7 +6128,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif; struct mlxsw_sp_vr *vr; u16 rif_index; - int err; + int i, err; type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); ops = mlxsw_sp->router->rif_ops_arr[type]; @@ -6083,7 +6140,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); if (err) { - NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces"); + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); goto err_rif_index_alloc; } @@ -6111,9 +6168,11 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_configure; - err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif); - if (err) - goto err_mr_rif_add; + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) { + err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif); + if (err) + goto err_mr_rif_add; + } mlxsw_sp_rif_counters_alloc(rif); mlxsw_sp->router->rifs[rif_index] = rif; @@ -6121,6 +6180,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, return rif; err_mr_rif_add: + for (i--; i >= 0; i--) + mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); err_configure: if (fid) @@ -6140,13 +6201,15 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; struct mlxsw_sp_fid *fid = rif->fid; struct mlxsw_sp_vr *vr; + int i; mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); vr = &mlxsw_sp->router->vrs[rif->vr_id]; mlxsw_sp->router->rifs[rif->rif_index] = NULL; mlxsw_sp_rif_counters_free(rif); - mlxsw_sp_mr_rif_del(vr->mr4_table, rif); + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) + mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); ops->deconfigure(rif); if (fid) /* Loopback RIFs are not associated with a FID. */ @@ -6553,13 +6616,16 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) if (rif->mtu != dev->mtu) { struct mlxsw_sp_vr *vr; + int i; /* The RIF is relevant only to its mr_table instance, as unlike * unicast routing, in multicast routing a RIF cannot be shared * between several multicast routing tables. */ vr = &mlxsw_sp->router->vrs[rif->vr_id]; - mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu); + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) + mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i], + rif, dev->mtu); } ether_addr_copy(rif->addr, dev->dev_addr); @@ -7037,13 +7103,25 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl) static void mlxsw_sp_mp6_hash_init(char *recr2_pl) { + bool only_l3 = !ip6_multipath_hash_policy(&init_net); + mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP); mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP); mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl); mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl); - mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL); mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER); + if (only_l3) { + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_IPV6_FLOW_LABEL); + } else { + mlxsw_sp_mp_hash_header_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_EN_IPV6); + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_SPORT); + mlxsw_sp_mp_hash_field_set(recr2_pl, + MLXSW_REG_RECR2_TCP_UDP_DPORT); + } } static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 1fb82246ce96..a01edcf56797 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -41,6 +41,7 @@ enum mlxsw_sp_l3proto { MLXSW_SP_L3_PROTO_IPV4, MLXSW_SP_L3_PROTO_IPV6, +#define MLXSW_SP_L3_PROTO_MAX (MLXSW_SP_L3_PROTO_IPV6 + 1) }; union mlxsw_sp_l3addr { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c new file mode 100644 index 000000000000..65a77708ff61 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -0,0 +1,824 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * Copyright (c) 2018 Petr Machata <petrm@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/list.h> +#include <net/arp.h> +#include <net/gre.h> +#include <net/ndisc.h> +#include <net/ip6_tunnel.h> + +#include "spectrum.h" +#include "spectrum_span.h" +#include "spectrum_ipip.h" + +int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) + return -EIO; + + mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_SPAN); + mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, + sizeof(struct mlxsw_sp_span_entry), + GFP_KERNEL); + if (!mlxsw_sp->span.entries) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + INIT_LIST_HEAD(&curr->bound_ports_list); + curr->id = i; + } + + return 0; +} + +void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); + } + kfree(mlxsw_sp->span.entries); +} + +static int +mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + sparmsp->dest_port = netdev_priv(to_dev); + return 0; +} + +static int +mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + /* Create a new port analayzer entry for local_port. */ + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_reg_mpat_span_type span_type) +{ + struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { + .can_handle = mlxsw_sp_port_dev_check, + .parms = mlxsw_sp_span_entry_phys_parms, + .configure = mlxsw_sp_span_entry_phys_configure, + .deconfigure = mlxsw_sp_span_entry_phys_deconfigure, +}; + +static int mlxsw_sp_span_dmac(struct neigh_table *tbl, + const void *pkey, + struct net_device *l3edev, + unsigned char dmac[ETH_ALEN]) +{ + struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev); + int err = 0; + + if (!neigh) { + neigh = neigh_create(tbl, pkey, l3edev); + if (IS_ERR(neigh)) + return PTR_ERR(neigh); + } + + neigh_event_send(neigh, NULL); + + read_lock_bh(&neigh->lock); + if ((neigh->nud_state & NUD_VALID) && !neigh->dead) + memcpy(dmac, neigh->ha, ETH_ALEN); + else + err = -ENOENT; + read_unlock_bh(&neigh->lock); + + neigh_release(neigh); + return err; +} + +static int +mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) +{ + sparmsp->dest_port = NULL; + return 0; +} + +static __maybe_unused int +mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, + union mlxsw_sp_l3addr saddr, + union mlxsw_sp_l3addr daddr, + union mlxsw_sp_l3addr gw, + __u8 ttl, + struct neigh_table *tbl, + struct mlxsw_sp_span_parms *sparmsp) +{ + unsigned char dmac[ETH_ALEN]; + + if (mlxsw_sp_l3addr_is_zero(gw)) + gw = daddr; + + if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) || + mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + sparmsp->dest_port = netdev_priv(l3edev); + sparmsp->ttl = ttl; + memcpy(sparmsp->dmac, dmac, ETH_ALEN); + memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN); + sparmsp->saddr = saddr; + sparmsp->daddr = daddr; + return 0; +} + +#if IS_ENABLED(CONFIG_NET_IPGRE) +static struct net_device * +mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, + __be32 *saddrp, __be32 *daddrp) +{ + struct ip_tunnel *tun = netdev_priv(to_dev); + struct net_device *dev = NULL; + struct ip_tunnel_parm parms; + struct rtable *rt = NULL; + struct flowi4 fl4; + + /* We assume "dev" stays valid after rt is put. */ + ASSERT_RTNL(); + + parms = mlxsw_sp_ipip_netdev_parms4(to_dev); + ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, + 0, 0, parms.link, tun->fwmark); + + rt = ip_route_output_key(tun->net, &fl4); + if (IS_ERR(rt)) + return NULL; + + if (rt->rt_type != RTN_UNICAST) + goto out; + + dev = rt->dst.dev; + *saddrp = fl4.saddr; + *daddrp = rt->rt_gateway; + +out: + ip_rt_put(rt); + return dev; +} + +static int +mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); + union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; + union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; + bool inherit_tos = tparm.iph.tos & 0x1; + bool inherit_ttl = !tparm.iph.ttl; + union mlxsw_sp_l3addr gw = daddr; + struct net_device *l3edev; + + if (!(to_dev->flags & IFF_UP) || + /* Reject tunnels with GRE keys, checksums, etc. */ + tparm.i_flags || tparm.o_flags || + /* Require a fixed TTL and a TOS copied from the mirrored packet. */ + inherit_ttl || !inherit_tos || + /* A destination address may not be "any". */ + mlxsw_sp_l3addr_is_zero(daddr)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); + return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, + tparm.iph.ttl, + &arp_tbl, sparmsp); +} + +static int +mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + /* Create a new port analayzer entry for local_port. */ + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, + MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, + sparms.dmac, false); + mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, + sparms.ttl, sparms.smac, + be32_to_cpu(sparms.saddr.addr4), + be32_to_cpu(sparms.daddr.addr4)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); +} + +static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { + .can_handle = is_gretap_dev, + .parms = mlxsw_sp_span_entry_gretap4_parms, + .configure = mlxsw_sp_span_entry_gretap4_configure, + .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, +}; +#endif + +#if IS_ENABLED(CONFIG_IPV6_GRE) +static struct net_device * +mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, + struct in6_addr *saddrp, + struct in6_addr *daddrp) +{ + struct ip6_tnl *t = netdev_priv(to_dev); + struct flowi6 fl6 = t->fl.u.ip6; + struct net_device *dev = NULL; + struct dst_entry *dst; + struct rt6_info *rt6; + + /* We assume "dev" stays valid after dst is released. */ + ASSERT_RTNL(); + + fl6.flowi6_mark = t->parms.fwmark; + if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr)) + return NULL; + + dst = ip6_route_output(t->net, NULL, &fl6); + if (!dst || dst->error) + goto out; + + rt6 = container_of(dst, struct rt6_info, dst); + + dev = dst->dev; + *saddrp = fl6.saddr; + *daddrp = rt6->rt6i_gateway; + +out: + dst_release(dst); + return dev; +} + +static int +mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); + bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; + union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr }; + union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr }; + bool inherit_ttl = !tparm.hop_limit; + union mlxsw_sp_l3addr gw = daddr; + struct net_device *l3edev; + + if (!(to_dev->flags & IFF_UP) || + /* Reject tunnels with GRE keys, checksums, etc. */ + tparm.i_flags || tparm.o_flags || + /* Require a fixed TTL and a TOS copied from the mirrored packet. */ + inherit_ttl || !inherit_tos || + /* A destination address may not be "any". */ + mlxsw_sp_l3addr_is_zero(daddr)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); + return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw, + tparm.hop_limit, + &nd_tbl, sparmsp); +} + +static int +mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + /* Create a new port analayzer entry for local_port. */ + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, + MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, + sparms.dmac, false); + mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, + sparms.saddr.addr6, + sparms.daddr.addr6); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { + .can_handle = is_ip6gretap_dev, + .parms = mlxsw_sp_span_entry_gretap6_parms, + .configure = mlxsw_sp_span_entry_gretap6_configure, + .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, +}; +#endif + +static const +struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { + &mlxsw_sp_span_entry_ops_phys, +#if IS_ENABLED(CONFIG_NET_IPGRE) + &mlxsw_sp_span_entry_ops_gretap4, +#endif +#if IS_ENABLED(CONFIG_IPV6_GRE) + &mlxsw_sp_span_entry_ops_gretap6, +#endif +}; + +static int +mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + return mlxsw_sp_span_entry_unoffloadable(sparmsp); +} + +static int +mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + return 0; +} + +static void +mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ +} + +static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = { + .parms = mlxsw_sp_span_entry_nop_parms, + .configure = mlxsw_sp_span_entry_nop_configure, + .deconfigure = mlxsw_sp_span_entry_nop_deconfigure, +}; + +static void +mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + if (sparms.dest_port) { + if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { + netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", + sparms.dest_port->dev->name); + sparms.dest_port = NULL; + } else if (span_entry->ops->configure(span_entry, sparms)) { + netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", + sparms.dest_port->dev->name); + sparms.dest_port = NULL; + } + } + + span_entry->parms = sparms; +} + +static void +mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + if (span_entry->parms.dest_port) + span_entry->ops->deconfigure(span_entry); +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + const struct mlxsw_sp_span_entry_ops *ops, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_span_entry *span_entry = NULL; + int i; + + /* find a free entry to use */ + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + if (!mlxsw_sp->span.entries[i].ref_count) { + span_entry = &mlxsw_sp->span.entries[i]; + break; + } + } + if (!span_entry) + return NULL; + + span_entry->ops = ops; + span_entry->ref_count = 1; + span_entry->to_dev = to_dev; + mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms); + + return span_entry; +} + +static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure(span_entry); +} + +struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->ref_count && curr->to_dev == to_dev) + return curr; + } + return NULL; +} + +void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure(span_entry); + span_entry->ops = &mlxsw_sp_span_entry_ops_nop; +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->ref_count && curr->id == span_id) + return curr; + } + return NULL; +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + const struct mlxsw_sp_span_entry_ops *ops, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); + if (span_entry) { + /* Already exists, just take a reference */ + span_entry->ref_count++; + return span_entry; + } + + return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); +} + +static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + WARN_ON(!span_entry->ref_count); + if (--span_entry->ref_count == 0) + mlxsw_sp_span_entry_destroy(span_entry); + return 0; +} + +static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_inspected_port *p; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + list_for_each_entry(p, &curr->bound_ports_list, list) + if (p->local_port == port->local_port && + p->type == MLXSW_SP_SPAN_EGRESS) + return true; + } + + return false; +} + +static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp, + int mtu) +{ + return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1; +} + +int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int err; + + /* If port is egress mirrored, the shared buffer size should be + * updated according to the mtu value + */ + if (mlxsw_sp_span_is_egress_mirror(port)) { + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); + return err; + } + } + + return 0; +} + +static struct mlxsw_sp_span_inspected_port * +mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + struct mlxsw_sp_port *port, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *p; + + list_for_each_entry(p, &span_entry->bound_ports_list, list) + if (type == p->type && + port->local_port == p->local_port && + bind == p->bound) + return p; + return NULL; +} + +static int +mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + int pa_id = span_entry->id; + + /* bind the port to the SPAN entry */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, + (enum mlxsw_reg_mpar_i_e)type, bind, pa_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); +} + +static int +mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int i; + int err; + + /* A given (source port, direction) can only be bound to one analyzer, + * so if a binding is requested, check for conflicts. + */ + if (bind) + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = + &mlxsw_sp->span.entries[i]; + + if (mlxsw_sp_span_entry_bound_port_find(curr, type, + port, bind)) + return -EEXIST; + } + + /* if it is an egress SPAN, bind a shared buffer to it */ + if (type == MLXSW_SP_SPAN_EGRESS) { + u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, + port->dev->mtu); + + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); + return err; + } + } + + if (bind) { + err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + true); + if (err) + goto err_port_bind; + } + + inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); + if (!inspected_port) { + err = -ENOMEM; + goto err_inspected_port_alloc; + } + inspected_port->local_port = port->local_port; + inspected_port->type = type; + inspected_port->bound = bind; + list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); + + return 0; + +err_inspected_port_alloc: + if (bind) + mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + false); +err_port_bind: + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + return err; +} + +static void +mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type, + bool bind) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + + inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, + port, bind); + if (!inspected_port) + return; + + if (bind) + mlxsw_sp_span_inspected_port_bind(port, span_entry, type, + false); + /* remove the SBIB buffer if it was egress SPAN */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + + list_del(&inspected_port->list); + kfree(inspected_port); +} + +static const struct mlxsw_sp_span_entry_ops * +mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) + if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) + return mlxsw_sp_span_entry_types[i]; + + return NULL; +} + +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + const struct net_device *to_dev, + enum mlxsw_sp_span_type type, bool bind, + int *p_span_id) +{ + struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; + const struct mlxsw_sp_span_entry_ops *ops; + struct mlxsw_sp_span_parms sparms = {NULL}; + struct mlxsw_sp_span_entry *span_entry; + int err; + + ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); + if (!ops) { + netdev_err(to_dev, "Cannot mirror to %s", to_dev->name); + return -EOPNOTSUPP; + } + + err = ops->parms(to_dev, &sparms); + if (err) + return err; + + span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); + if (!span_entry) + return -ENOENT; + + netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", + span_entry->id); + + err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); + if (err) + goto err_port_bind; + + *p_span_id = span_entry->id; + return 0; + +err_port_bind: + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + return err; +} + +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, + enum mlxsw_sp_span_type type, bool bind) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id); + if (!span_entry) { + netdev_err(from->dev, "no span entry found\n"); + return; + } + + netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", + span_entry->id); + mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); +} + +void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) +{ + int i; + int err; + + ASSERT_RTNL(); + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + struct mlxsw_sp_span_parms sparms = {NULL}; + + if (!curr->ref_count) + continue; + + err = curr->ops->parms(curr->to_dev, &sparms); + if (err) + continue; + + if (memcmp(&sparms, &curr->parms, sizeof(sparms))) { + mlxsw_sp_span_entry_deconfigure(curr); + mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms); + } + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h new file mode 100644 index 000000000000..4b87ec20e658 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -0,0 +1,107 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_SPAN_H +#define _MLXSW_SPECTRUM_SPAN_H + +#include <linux/types.h> +#include <linux/if_ether.h> + +#include "spectrum_router.h" + +struct mlxsw_sp; +struct mlxsw_sp_port; + +enum mlxsw_sp_span_type { + MLXSW_SP_SPAN_EGRESS, + MLXSW_SP_SPAN_INGRESS +}; + +struct mlxsw_sp_span_inspected_port { + struct list_head list; + enum mlxsw_sp_span_type type; + u8 local_port; + + /* Whether this is a directly bound mirror (port-to-port) or an ACL. */ + bool bound; +}; + +struct mlxsw_sp_span_parms { + struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */ + unsigned int ttl; + unsigned char dmac[ETH_ALEN]; + unsigned char smac[ETH_ALEN]; + union mlxsw_sp_l3addr daddr; + union mlxsw_sp_l3addr saddr; +}; + +struct mlxsw_sp_span_entry_ops; + +struct mlxsw_sp_span_entry { + const struct net_device *to_dev; + const struct mlxsw_sp_span_entry_ops *ops; + struct mlxsw_sp_span_parms parms; + struct list_head bound_ports_list; + int ref_count; + int id; +}; + +struct mlxsw_sp_span_entry_ops { + bool (*can_handle)(const struct net_device *to_dev); + int (*parms)(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp); + int (*configure)(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms); + void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry); +}; + +int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp); + +int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + const struct net_device *to_dev, + enum mlxsw_sp_span_type type, + bool bind, int *p_span_id); +void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, + enum mlxsw_sp_span_type type, bool bind); +struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev); + +void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry); + +int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 161bcdc012f0..c11c9a635866 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1844,7 +1844,7 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; if (is_vlan_dev(bridge_port->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge"); + NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); return -EINVAL; } @@ -1907,20 +1907,16 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct netlink_ext_ack *extack) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + struct net_device *dev = bridge_port->dev; u16 vid; - if (!is_vlan_dev(bridge_port->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge"); - return -EINVAL; - } - vid = vlan_dev_vlan_id(bridge_port->dev); - + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return -EINVAL; if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { - NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port"); + NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port"); return -EINVAL; } @@ -1937,8 +1933,10 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - u16 vid = vlan_dev_vlan_id(bridge_port->dev); + struct net_device *dev = bridge_port->dev; + u16 vid; + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return; diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index ab7a29846bfa..c698ec4fd9d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -510,7 +510,6 @@ static const struct mlxsw_config_profile mlxsw_sib_config_profile = { .type = MLXSW_PORT_SWID_TYPE_IB, } }, - .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sib_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index f3c29bbf07e2..a655c5850aa6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -789,7 +789,7 @@ mlxsw_sx_port_get_link_ksettings(struct net_device *dev, u32 supported, advertising, lp_advertising; int err; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to get proto"); @@ -879,7 +879,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev, mlxsw_sx_to_ptys_advert_link(advertising) : mlxsw_sx_to_ptys_speed(speed); - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0); + mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0, false); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to get proto"); @@ -897,7 +897,7 @@ mlxsw_sx_port_set_link_ksettings(struct net_device *dev, return 0; mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, - eth_proto_new); + eth_proto_new, true); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); if (err) { netdev_err(dev, "Failed to set proto admin"); @@ -1029,7 +1029,7 @@ mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width) eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed); mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, - eth_proto_admin); + eth_proto_admin, true); return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl); } @@ -1706,7 +1706,6 @@ static const struct mlxsw_config_profile mlxsw_sx_config_profile = { .type = MLXSW_PORT_SWID_TYPE_IB, } }, - .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sx_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index ec6cef8267ae..399e9d6993f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -77,6 +77,7 @@ enum { MLXSW_TRAP_ID_IPV6_DHCP = 0x69, MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, + MLXSW_TRAP_ID_IPV6_PIM = 0x79, MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, |