diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 44 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_common.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 160 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 318 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_nvm.c | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ptp.c | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 96 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_type.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 124 |
10 files changed, 472 insertions, 330 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d616f698e155..d0c1bf5441d8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -75,11 +75,11 @@ #define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */ /* max 16 qps */ #define i40e_default_queues_per_vmdq(pf) \ - (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) + (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ - (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) + (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 @@ -401,6 +401,27 @@ struct i40e_pf { struct timer_list service_timer; struct work_struct service_task; + u64 hw_features; +#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0) +#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1) +#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2) +#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3) +#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4) +#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5) +#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6) +#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7) +#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8) +#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9) +#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10) +#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11) +#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12) +#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13) +#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14) +#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15) +#define I40E_HW_STOP_FW_LLDP BIT_ULL(16) +#define I40E_HW_PORT_ID_VALID BIT_ULL(17) +#define I40E_HW_RESTART_AUTONEG BIT_ULL(18) + u64 flags; #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) @@ -420,33 +441,14 @@ struct i40e_pf { #define I40E_FLAG_PTP BIT_ULL(25) #define I40E_FLAG_MFP_ENABLED BIT_ULL(26) #define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) -#define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) -#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) -#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32) -#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33) -#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34) -#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35) #define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37) -#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) -#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) -#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) -#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) -#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) -#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) -#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) -#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) -#define I40E_FLAG_PHY_CONTROLS_LEDS BIT_ULL(48) -#define I40E_FLAG_PF_MAC BIT_ULL(50) #define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) -#define I40E_FLAG_HAVE_CRT_RETIMER BIT_ULL(52) -#define I40E_FLAG_PTP_L4_CAPABLE BIT_ULL(53) #define I40E_FLAG_CLIENT_RESET BIT_ULL(54) #define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) #define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) -#define I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(57) #define I40E_FLAG_LEGACY_RX BIT_ULL(58) struct i40e_client_instance *cinst; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 8e082a946411..111426ba5fbc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -328,9 +328,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, len = buf_len; /* write the full 16-byte chunks */ if (hw->debug_mask & mask) { - char prefix[20]; + char prefix[27]; - snprintf(prefix, 20, + snprintf(prefix, sizeof(prefix), "i40e %02x:%02x.%x: \t0x", hw->bus.bus_id, hw->bus.device, @@ -2529,6 +2529,10 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) if (status) return status; + hw->phy.link_info.req_fec_info = + abilities.fec_cfg_curr_mod_ext_info & + (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); + memcpy(hw->phy.link_info.module_type, &abilities.module_type, sizeof(hw->phy.link_info.module_type)); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9692a5294fa3..05e89864f781 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -271,7 +271,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising |= ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) *advertising |= ADVERTISED_1000baseT_Full; - if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { *supported |= SUPPORTED_100baseT_Full; *advertising |= ADVERTISED_100baseT_Full; } @@ -340,12 +340,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising |= ADVERTISED_20000baseKR2_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *supported |= SUPPORTED_10000baseKR_Full | SUPPORTED_Autoneg; *advertising |= ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *advertising |= ADVERTISED_10000baseKR_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { @@ -356,12 +356,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising |= ADVERTISED_10000baseKX4_Full; } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *supported |= SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg; *advertising |= ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - if (!(pf->flags & I40E_FLAG_HAVE_CRT_RETIMER)) + if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) *advertising |= ADVERTISED_1000baseKX_Full; } if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || @@ -474,7 +474,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) advertising |= ADVERTISED_1000baseT_Full; - if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) @@ -1091,7 +1091,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u32 *reg_buf = p; - int i, j, ri; + unsigned int i, j, ri; u32 reg; /* Tell ethtool which driver-version-specific regs output we have. @@ -1550,9 +1550,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + unsigned int j; int i = 0; char *p; - int j; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; @@ -1637,7 +1637,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; char *p = (char *)data; - int i; + unsigned int i; switch (stringset) { case ETH_SS_TEST: @@ -1765,7 +1765,7 @@ static int i40e_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); - if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) + if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | @@ -2005,7 +2005,7 @@ static int i40e_set_phys_id(struct net_device *netdev, switch (state) { case ETHTOOL_ID_ACTIVE: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) { + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { pf->led_status = i40e_led_get(hw); } else { i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); @@ -2015,19 +2015,19 @@ static int i40e_set_phys_id(struct net_device *netdev, } return blink_freq; case ETHTOOL_ID_ON: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) i40e_led_set(hw, 0xf, false); else ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) i40e_led_set(hw, 0x0, false); else ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: - if (!(pf->flags & I40E_FLAG_PHY_CONTROLS_LEDS)) { + if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { i40e_led_set(hw, pf->led_status, false); } else { ret = i40e_led_set_phy(hw, false, pf->led_status, @@ -2194,14 +2194,29 @@ static int __i40e_set_coalesce(struct net_device *netdev, int queue) { struct i40e_netdev_priv *np = netdev_priv(netdev); + u16 intrl_reg, cur_rx_itr, cur_tx_itr; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u16 intrl_reg; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) vsi->work_limit = ec->tx_max_coalesced_frames_irq; + if (queue < 0) { + cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting; + cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting; + } else if (queue < vsi->num_queue_pairs) { + cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting; + cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting; + } else { + netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + vsi->num_queue_pairs - 1); + return -EINVAL; + } + + cur_tx_itr &= ~I40E_ITR_DYNAMIC; + cur_rx_itr &= ~I40E_ITR_DYNAMIC; + /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); @@ -2214,15 +2229,34 @@ static int __i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - if (ec->rx_coalesce_usecs == 0) { - if (ec->use_adaptive_rx_coalesce) - netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || - (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; + if (ec->rx_coalesce_usecs != cur_rx_itr && + ec->use_adaptive_rx_coalesce) { + netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\n"); + return -EINVAL; } + if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if (ec->tx_coalesce_usecs != cur_tx_itr && + ec->use_adaptive_tx_coalesce) { + netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\n"); + return -EINVAL; + } + + if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) { + netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; + } + + if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) + ec->rx_coalesce_usecs = I40E_MIN_ITR << 1; + + if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) + ec->tx_coalesce_usecs = I40E_MIN_ITR << 1; + intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { @@ -2230,27 +2264,14 @@ static int __i40e_set_coalesce(struct net_device *netdev, vsi->int_rate_limit); } - if (ec->tx_coalesce_usecs == 0) { - if (ec->use_adaptive_tx_coalesce) - netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || - (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) { - netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); - return -EINVAL; - } - /* rx and tx usecs has per queue value. If user doesn't specify the queue, * apply to all queues. */ if (queue < 0) { for (i = 0; i < vsi->num_queue_pairs; i++) i40e_set_itr_per_queue(vsi, ec, i); - } else if (queue < vsi->num_queue_pairs) { - i40e_set_itr_per_queue(vsi, ec, queue); } else { - netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", - vsi->num_queue_pairs - 1); - return -EINVAL; + i40e_set_itr_per_queue(vsi, ec, queue); } return 0; @@ -2727,22 +2748,22 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); break; case TCP_V6_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); break; case UDP_V4_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); @@ -2751,7 +2772,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) break; case UDP_V6_FLOW: flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); @@ -4069,23 +4090,26 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u64 changed_flags; + u64 orig_flags, new_flags, changed_flags; u32 i, j; - changed_flags = pf->flags; + orig_flags = READ_ONCE(pf->flags); + new_flags = orig_flags; for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { const struct i40e_priv_flags *priv_flags; priv_flags = &i40e_gstrings_priv_flags[i]; - if (priv_flags->read_only) - continue; - if (flags & BIT(i)) - pf->flags |= priv_flags->flag; + new_flags |= priv_flags->flag; else - pf->flags &= ~(priv_flags->flag); + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; } if (pf->hw.pf_id != 0) @@ -4096,18 +4120,40 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) priv_flags = &i40e_gl_gstrings_priv_flags[j]; - if (priv_flags->read_only) - continue; - if (flags & BIT(i + j)) - pf->flags |= priv_flags->flag; + new_flags |= priv_flags->flag; else - pf->flags &= ~(priv_flags->flag); + new_flags &= ~(priv_flags->flag); + + /* If this is a read-only flag, it can't be changed */ + if (priv_flags->read_only && + ((orig_flags ^ new_flags) & ~BIT(i))) + return -EOPNOTSUPP; } flags_complete: - /* check for flags that changed */ - changed_flags ^= pf->flags; + /* Before we finalize any flag changes, we need to perform some + * checks to ensure that the changes are supported and safe. + */ + + /* ATR eviction is not supported on all devices */ + if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) && + !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) + return -EOPNOTSUPP; + + /* Compare and exchange the new flags into place. If we failed, that + * is if cmpxchg64 returns anything but the old value, this means that + * something else has modified the flags variable since we copied it + * originally. We'll just punt with an error and log something in the + * message buffer. + */ + if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { + dev_warn(&pf->pdev->dev, + "Unable to update pf->flags as it was modified by another thread...\n"); + return -EAGAIN; + } + + changed_flags = orig_flags ^ new_flags; /* Process any additional changes needed as a result of flag changes. * The changed_flags value reflects the list of bits that were @@ -4121,10 +4167,6 @@ flags_complete: set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); } - /* Only allow ATR evict on hardware that is capable of handling it */ - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; - if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { u16 sw_flags = 0, valid_flags = 0; int ret; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2db93d3f6d23..6498da8806cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2595,9 +2595,20 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid) { int err; - if (!vid || vsi->info.pvid) + if (vsi->info.pvid) return -EINVAL; + /* The network stack will attempt to add VID=0, with the intention to + * receive priority tagged packets with a VLAN of 0. Our HW receives + * these packets by default when configured to receive untagged + * packets, so we don't need to add a filter for this case. + * Additionally, HW interprets adding a VID=0 filter as meaning to + * receive *only* tagged traffic and stops receiving untagged traffic. + * Thus, we do not want to actually add a filter for VID=0 + */ + if (!vid) + return 0; + /* Locked once because all functions invoked below iterates list*/ spin_lock_bh(&vsi->mac_filter_hash_lock); err = i40e_add_vlan_all_mac(vsi, vid); @@ -2674,15 +2685,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, if (vid >= VLAN_N_VID) return -EINVAL; - /* If the network stack called us with vid = 0 then - * it is asking to receive priority tagged packets with - * vlan id 0. Our HW receives them by default when configured - * to receive untagged packets so there is no need to add an - * extra filter for vlan 0 tagged packets. - */ - if (vid) - ret = i40e_vsi_add_vlan(vsi, vid); - + ret = i40e_vsi_add_vlan(vsi, vid); if (!ret) set_bit(vid, vsi->active_vlans); @@ -2714,44 +2717,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, } /** - * i40e_macaddr_init - explicitly write the mac address filters - * - * @vsi: pointer to the vsi - * @macaddr: the MAC address - * - * This is needed when the macaddr has been obtained by other - * means than the default, e.g., from Open Firmware or IDPROM. - * Returns 0 on success, negative on failure - **/ -static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) -{ - int ret; - struct i40e_aqc_add_macvlan_element_data element; - - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - macaddr, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Addr change for VSI failed: %d\n", ret); - return -EADDRNOTAVAIL; - } - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); - ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "add filter failed err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); - } - return ret; -} - -/** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up **/ @@ -2909,22 +2874,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; - cpumask_var_t mask; if (!ring->q_vector || !ring->netdev) return; - /* Single TC mode enable XPS */ - if (vsi->tc_config.numtc <= 1) { - if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) - netif_set_xps_queue(ring->netdev, - &ring->q_vector->affinity_mask, - ring->queue_index); - } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - /* Disable XPS to allow selection based on TC */ - bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); - netif_set_xps_queue(ring->netdev, mask, ring->queue_index); - free_cpumask_var(mask); + if ((vsi->tc_config.numtc <= 1) && + !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + netif_set_xps_queue(ring->netdev, + get_cpu_mask(ring->q_vector->v_idx), + ring->queue_index); } /* schedule our worker thread which will take care of @@ -3203,19 +3161,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { - struct i40e_pf *pf = vsi->back; - int err; - if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); - - if (!!(pf->flags & I40E_FLAG_PF_MAC)) { - err = i40e_macaddr_init(vsi, pf->hw.mac.addr); - if (err) { - dev_warn(&pf->pdev->dev, - "could not set up macaddr; err %d\n", err); - } - } } /** @@ -3495,7 +3442,7 @@ static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify, struct i40e_q_vector *q_vector = container_of(notify, struct i40e_q_vector, affinity_notify); - q_vector->affinity_mask = *mask; + cpumask_copy(&q_vector->affinity_mask, mask); } /** @@ -3559,8 +3506,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + /* get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to use here. + */ + irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); } vsi->irqs_ready = true; @@ -4342,7 +4291,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) /* clear the affinity notifier in the IRQ descriptor */ irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ + /* remove our suggested affinity mask for this IRQ */ irq_set_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); free_irq(irq_num, vsi->q_vectors[i]); @@ -4773,7 +4722,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) { struct net_device *netdev; struct i40e_vsi *vsi; - int i; + unsigned int i; /* Only for LAN VSI */ vsi = pf->vsi[pf->lan_vsi]; @@ -5350,7 +5299,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ - if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) + if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) goto out; /* Get the initial DCB configuration */ @@ -5400,6 +5349,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) char *speed = "Unknown"; char *fc = "Unknown"; char *fec = ""; + char *req_fec = ""; char *an = ""; new_speed = vsi->back->hw.phy.link_info.link_speed; @@ -5461,6 +5411,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) } if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { + req_fec = ", Requested FEC: None"; fec = ", FEC: None"; an = ", Autoneg: False"; @@ -5473,10 +5424,22 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) else if (vsi->back->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) fec = ", FEC: CL108 RS-FEC"; + + /* 'CL108 RS-FEC' should be displayed when RS is requested, or + * both RS and FC are requested + */ + if (vsi->back->hw.phy.link_info.req_fec_info & + (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) { + if (vsi->back->hw.phy.link_info.req_fec_info & + I40E_AQ_REQUEST_FEC_RS) + req_fec = ", Requested FEC: CL108 RS-FEC"; + else + req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R"; + } } - netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n", - speed, fec, an, fc); + netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n", + speed, req_fec, fec, an, fc); } /** @@ -5656,16 +5619,17 @@ exit: return ret; } -static int __i40e_setup_tc(struct net_device *netdev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) { - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; + struct tc_mqprio_qopt *mqprio = type_data; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (type != TC_SETUP_MQPRIO) + return -EOPNOTSUPP; - return i40e_setup_tc(netdev, tc->mqprio->num_tc); + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return i40e_setup_tc(netdev, mqprio->num_tc); } /** @@ -7331,7 +7295,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) wr32(hw, I40E_REG_MSS, val); } - if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { + if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -7520,6 +7484,18 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } +static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) +{ + switch (port->type) { + case UDP_TUNNEL_TYPE_VXLAN: + return "vxlan"; + case UDP_TUNNEL_TYPE_GENEVE: + return "geneve"; + default: + return "unknown"; + } +} + /** * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters * @pf: board private structure @@ -7565,14 +7541,14 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_dbg(&pf->pdev->dev, - "%s %s port %d, index %d failed, err %s aq_err %s\n", - pf->udp_ports[i].type ? "vxlan" : "geneve", - port ? "add" : "delete", - port, i, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, + "%s %s port %d, index %d failed, err %s aq_err %s\n", + i40e_tunnel_name(&pf->udp_ports[i]), + port ? "add" : "delete", + port, i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->udp_ports[i].port = 0; } } @@ -7957,7 +7933,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->tx_itr_setting = pf->tx_itr_default; vsi->tx_rings[i] = ring++; @@ -7974,7 +7950,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->count = vsi->num_desc; ring->size = 0; ring->dcb_tc = 0; - if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->tx_itr_setting = pf->tx_itr_default; @@ -8261,7 +8237,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) q_vector->vsi = vsi; q_vector->v_idx = v_idx; - cpumask_set_cpu(cpu, &q_vector->affinity_mask); + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, @@ -8510,7 +8486,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) u8 *lut; int ret; - if (!(pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)) + if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) return 0; if (!vsi->rss_size) @@ -8640,7 +8616,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_config_rss_aq(vsi, seed, lut, lut_size); else return i40e_config_rss_reg(vsi, seed, lut, lut_size); @@ -8659,7 +8635,7 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { struct i40e_pf *pf = vsi->back; - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) return i40e_get_rss_aq(vsi, seed, lut, lut_size); else return i40e_get_rss_reg(vsi, seed, lut, lut_size); @@ -8987,25 +8963,56 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } + if (pf->hw.mac.type == I40E_MAC_X722) { + pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE | + I40E_HW_128_QP_RSS_CAPABLE | + I40E_HW_ATR_EVICT_CAPABLE | + I40E_HW_WB_ON_ITR_CAPABLE | + I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_HW_NO_PCI_LINK_CHECK | + I40E_HW_USE_SET_LLDP_MIB | + I40E_HW_GENEVE_OFFLOAD_CAPABLE | + I40E_HW_PTP_L4_CAPABLE | + I40E_HW_WOL_MC_MAGIC_PKT_WAKE | + I40E_HW_OUTER_UDP_CSUM_CAPABLE); + +#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03 + if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) != + I40E_FDEVICT_PCTYPE_DEFAULT) { + dev_warn(&pf->pdev->dev, + "FD EVICT PCTYPES are not right, disable FD HW EVICT\n"); + pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE; + } + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE; + } + + /* Enable HW ATR eviction if possible */ + if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE) + pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; + if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (pf->hw.aq.fw_maj_ver < 4))) { - pf->flags |= I40E_FLAG_RESTART_AUTONEG; + pf->hw_features |= I40E_HW_RESTART_AUTONEG; /* No DCB support for FW < v4.33 */ - pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; + pf->hw_features |= I40E_HW_NO_DCB_SUPPORT; } /* Disable FW LLDP if FW < v4.3 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4))) - pf->flags |= I40E_FLAG_STOP_FW_LLDP; + pf->hw_features |= I40E_HW_STOP_FW_LLDP; /* Use the FW Set LLDP MIB API if FW > v4.40 */ if ((pf->hw.mac.type == I40E_MAC_XL710) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || (pf->hw.aq.fw_maj_ver >= 5))) - pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; + pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; @@ -9028,29 +9035,6 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ - if (pf->hw.mac.type == I40E_MAC_X722) { - pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE - | I40E_FLAG_128_QP_RSS_CAPABLE - | I40E_FLAG_HW_ATR_EVICT_CAPABLE - | I40E_FLAG_OUTER_UDP_CSUM_CAPABLE - | I40E_FLAG_WB_ON_ITR_CAPABLE - | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE - | I40E_FLAG_NO_PCI_LINK_CHECK - | I40E_FLAG_USE_SET_LLDP_MIB - | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE - | I40E_FLAG_PTP_L4_CAPABLE - | I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE; - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - } - - /* Enable HW ATR eviction if possible */ - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; - pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -9231,7 +9215,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; break; case UDP_TUNNEL_TYPE_GENEVE: - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE)) return; pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; break; @@ -9298,7 +9282,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; - if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) + if (!(pf->hw_features & I40E_HW_PORT_ID_VALID)) return -EOPNOTSUPP; ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); @@ -9589,6 +9573,7 @@ static int i40e_xdp(struct net_device *dev, return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: return -EINVAL; @@ -9675,7 +9660,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_RXCSUM | 0; - if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) + if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; @@ -9714,8 +9699,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) i40e_add_mac_filter(vsi, mac_addr); spin_unlock_bh(&vsi->mac_filter_hash_lock); } else { - /* relate the VSI_VMDQ name to the VSI_MAIN name */ - snprintf(netdev->name, IFNAMSIZ, "%sv%%d", + /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we + * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to + * the end, which is 4 bytes long, so force truncation of the + * original name by IFNAMSIZ - 4 + */ + snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", + IFNAMSIZ - 4, pf->vsi[pf->lan_vsi]->netdev->name); random_ether_addr(mac_addr); @@ -9756,8 +9746,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = I40E_MAX_RXBUFFER - - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; return 0; } @@ -9890,13 +9879,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { + /* Single TC condition is not fatal, + * message and continue + */ dev_info(&pf->pdev->dev, "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n", enabled_tc, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - ret = -ENOENT; } } break; @@ -10387,17 +10378,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, switch (vsi->type) { /* setup the netdev if needed */ case I40E_VSI_MAIN: - /* Apply relevant filters if a platform-specific mac - * address was selected. - */ - if (!!(pf->flags & I40E_FLAG_PF_MAC)) { - ret = i40e_macaddr_init(vsi, pf->hw.mac.addr); - if (ret) { - dev_warn(&pf->pdev->dev, - "could not set up macaddr; err %d\n", - ret); - } - } case I40E_VSI_VMDQ2: ret = i40e_config_netdev(vsi); if (ret) @@ -10434,7 +10414,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } - if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vsi->type == I40E_VSI_VMDQ2)) { ret = i40e_vsi_config_rss(vsi); } @@ -11443,7 +11423,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { + if (pf->hw_features & I40E_HW_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, NULL); } @@ -11460,7 +11440,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) - pf->flags |= I40E_FLAG_PORT_ID_VALID; + pf->hw_features |= I40E_HW_PORT_ID_VALID; pci_set_drvdata(pdev, pf); pci_save_state(pdev); @@ -11576,7 +11556,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { + if (pf->hw_features & I40E_HW_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) @@ -11663,7 +11643,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * and will report PCI Gen 1 x 1 by default so don't bother * checking them. */ - if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) { + if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) { char speed[PCI_SPEED_SIZE] = "Unknown"; char width[PCI_WIDTH_SIZE] = "Unknown"; @@ -11734,9 +11714,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) - pf->flags |= I40E_FLAG_PHY_CONTROLS_LEDS; + pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS; if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) - pf->flags |= I40E_FLAG_HAVE_CRT_RETIMER; + pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER; /* print a string summarizing features */ i40e_print_features(pf); @@ -12048,7 +12028,7 @@ static void i40e_shutdown(struct pci_dev *pdev) */ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf, false); @@ -12080,7 +12060,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); - if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); i40e_prep_for_reset(pf, false); @@ -12089,7 +12069,10 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_stop_misc_vector(pf); - + if (pf->msix_entries) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } retval = pci_save_state(pdev); if (retval) return retval; @@ -12129,6 +12112,15 @@ static int i40e_resume(struct pci_dev *pdev) /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { clear_bit(__I40E_DOWN, pf->state); + if (pf->msix_entries) { + err = request_irq(pf->msix_entries[0].vector, + i40e_intr, 0, pf->int_name, pf); + if (err) { + dev_err(&pf->pdev->dev, + "request_irq for %s failed: %d\n", + pf->int_name, err); + } + } i40e_reset_and_rebuild(pf, false, false); } @@ -12168,12 +12160,14 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); - /* we will see if single thread per module is enough for now, - * it can't be any worse than using the system workqueue which - * was already single threaded + /* There is no need to throttle the number of active tasks because + * each device limits its own task using a state bit for scheduling + * the service task, and the device tasks do not interfere with each + * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM + * since we need to be able to guarantee forward progress even under + * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 26d7e9fe6220..57505b1df98d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -134,8 +134,25 @@ i40e_i40e_acquire_nvm_exit: **/ void i40e_release_nvm(struct i40e_hw *hw) { - if (!hw->nvm.blank_nvm_mode) - i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + i40e_status ret_code = I40E_SUCCESS; + u32 total_delay = 0; + + if (hw->nvm.blank_nvm_mode) + return; + + ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + (total_delay < hw->aq.asq_cmd_timeout)) { + usleep_range(1000, 2000); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, + 0, NULL); + total_delay++; + } } /** @@ -757,6 +774,15 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } + /* Acquire lock to prevent race condition where adminq_task + * can execute after i40e_nvmupd_nvm_read/write but before state + * variables (nvm_wait_opcode, nvm_release_on_done) are updated. + * + * During NVMUpdate, it is observed that lock could be held for + * ~5ms for most commands. However lock is held for ~60ms for + * NVMUPD_CSUM_LCB command. + */ + mutex_lock(&hw->aq.arq_mutex); switch (hw->nvmupd_state) { case I40E_NVMUPD_STATE_INIT: status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); @@ -777,7 +803,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, */ if (cmd->offset == 0xffff) { i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); - return 0; + status = 0; + goto exit; } status = I40E_ERR_NOT_READY; @@ -792,6 +819,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, *perrno = -ESRCH; break; } +exit: + mutex_unlock(&hw->aq.arq_mutex); return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 1a0be835fa06..d8456c381c99 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -158,13 +158,12 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then; + struct timespec64 now; - then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, &now); - now = timespec64_add(now, then); + timespec64_add_ns(&now, delta); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); @@ -570,7 +569,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE)) + if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK | @@ -584,7 +583,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - if (!(pf->flags & I40E_FLAG_PTP_L4_CAPABLE)) + if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; /* fall through */ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -593,7 +592,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, pf->ptp_rx = true; tsyntype = I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK | I40E_PRTTSYN_CTL1_TSYNTYPE_V2; - if (pf->flags & I40E_FLAG_PTP_L4_CAPABLE) { + if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) { tsyntype |= I40E_PRTTSYN_CTL1_UDP_ENA_MASK; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2194960d5855..1519dfb851d0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -860,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -959,19 +959,31 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; - struct i40e_q_vector *qv = rc->ring->q_vector; u32 new_itr = rc->itr; int bytes_per_int; - int usecs; + unsigned int usecs, estimated_usecs; if (rc->total_packets == 0 || !rc->itr) return false; + usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; + bytes_per_int = rc->total_bytes / usecs; + + /* The calculations in this algorithm depend on interrupts actually + * firing at the ITR rate. This may not happen if the packet rate is + * really low, or if we've been napi polling. Check to make sure + * that's not the case before we continue. + */ + estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update); + if (estimated_usecs > usecs) { + new_latency_range = I40E_LOW_LATENCY; + goto reset_latency; + } + /* simple throttlerate management * 0-10MB/s lowest (50000 ints/s) * 10-20MB/s low (20000 ints/s) * 20-1249MB/s bulk (18000 ints/s) - * > 40000 Rx packets per second (8000 ints/s) * * The math works out because the divisor is in 10^(-6) which * turns the bytes/us input value into MB/s values, but @@ -979,9 +991,6 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) * are in 2 usec increments in the ITR registers, and make sure * to use the smoothed values that the countdown timer gives us. */ - usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_int = rc->total_bytes / usecs; - switch (new_latency_range) { case I40E_LOWEST_LATENCY: if (bytes_per_int > 10) @@ -994,24 +1003,13 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: - case I40E_ULTRA_LATENCY: default: if (bytes_per_int <= 20) new_latency_range = I40E_LOW_LATENCY; break; } - /* this is to adjust RX more aggressively when streaming small - * packets. The value of 40000 was picked as it is just beyond - * what the hardware can receive per second if in low latency - * mode. - */ -#define RX_ULTRA_PACKET_RATE 40000 - - if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) && - (&qv->rx == rc)) - new_latency_range = I40E_ULTRA_LATENCY; - +reset_latency: rc->latency_range = new_latency_range; switch (new_latency_range) { @@ -1024,21 +1022,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) case I40E_BULK_LATENCY: new_itr = I40E_ITR_18K; break; - case I40E_ULTRA_LATENCY: - new_itr = I40E_ITR_8K; - break; default: break; } rc->total_bytes = 0; rc->total_packets = 0; + rc->last_itr_update = jiffies; if (new_itr != rc->itr) { rc->itr = new_itr; return true; } - return false; } @@ -2065,7 +2060,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false, xdp_xmit = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; struct xdp_buff xdp; @@ -2198,7 +2193,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) @@ -2243,6 +2238,12 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, int idx = q_vector->v_idx; int rx_itr_setting, tx_itr_setting; + /* If we don't have MSIX, then we only need to re-enable icr0 */ + if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { + i40e_irq_dynamic_enable_icr0(vsi->back, false); + return; + } + vector = (q_vector->v_idx + vsi->base_vector); /* avoid dynamic calculation if in countdown mode OR if @@ -2363,7 +2364,6 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* If work not completed, return budget and polling will return */ if (!clean_complete) { - const cpumask_t *aff_mask = &q_vector->affinity_mask; int cpu_id = smp_processor_id(); /* It is possible that the interrupt affinity has changed but, @@ -2373,15 +2373,22 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * continue to poll, otherwise we must stop polling so the * interrupt can move to the correct cpu. */ - if (likely(cpumask_test_cpu(cpu_id, aff_mask) || - !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) { + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + + /* Force an interrupt */ + i40e_force_wb(vsi, q_vector); + + /* Return budget-1 so that polling stops */ + return budget - 1; + } tx_only: - if (arm_wb) { - q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_enable_wb_on_itr(vsi, q_vector); - } - return budget; + if (arm_wb) { + q_vector->tx.ring[0].tx_stats.tx_force_wb++; + i40e_enable_wb_on_itr(vsi, q_vector); } + return budget; } if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) @@ -2390,16 +2397,7 @@ tx_only: /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete_done(napi, work_done); - /* If we're prematurely stopping polling to fix the interrupt - * affinity we want to make sure polling starts back up so we - * issue a call to i40e_force_wb which triggers a SW interrupt. - */ - if (!clean_complete) - i40e_force_wb(vsi, q_vector); - else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) - i40e_irq_dynamic_enable_icr0(vsi->back, false); - else - i40e_update_enable_itr(vsi, q_vector); + i40e_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } @@ -2453,9 +2451,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, hlen = (hdr.network[0] & 0x0F) << 2; l4_proto = hdr.ipv4->protocol; } else { - hlen = hdr.network - skb->data; - l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); - hlen -= hdr.network - skb->data; + /* find the start of the innermost ipv6 header */ + unsigned int inner_hlen = hdr.network - skb->data; + unsigned int h_offset = inner_hlen; + + /* this function updates h_offset to the end of the header */ + l4_proto = + ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); + /* hlen will contain our best estimate of the tcp header */ + hlen = h_offset - inner_hlen; } if (l4_proto != IPPROTO_TCP) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index b288d58313a6..2f848bc5e391 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -112,7 +112,7 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) #define i40e_pf_get_default_rss_hena(pf) \ - (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) /* Supported Rx Buffer Sizes (a multiple of 128) */ @@ -130,6 +130,7 @@ enum i40e_dyn_idx_t { * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define i40e_rx_desc i40e_32byte_rx_desc #define I40E_RX_DMA_ATTR \ @@ -453,7 +454,6 @@ enum i40e_latency_range { I40E_LOWEST_LATENCY = 0, I40E_LOW_LATENCY = 1, I40E_BULK_LATENCY = 2, - I40E_ULTRA_LATENCY = 3, }; struct i40e_ring_container { @@ -461,6 +461,7 @@ struct i40e_ring_container { struct i40e_ring *ring; unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ + unsigned long last_itr_update; /* jiffies of last ITR update */ u16 count; enum i40e_latency_range latency_range; u16 itr; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 3a18ed13edc4..fd4bbdd88b57 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -185,6 +185,7 @@ struct i40e_link_status { enum i40e_aq_link_speed link_speed; u8 link_info; u8 an_info; + u8 req_fec_info; u8 fec_info; u8 ext_info; u8 loopback; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ecbe40ea8ffe..4d1e670f490e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1528,54 +1528,54 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; - vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2; + vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN; if (i40e_vf_client_capable(pf, vf->vf_id) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; } - if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - vfres->vf_offload_flags |= + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP; - if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && + if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) && (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); - ret = I40E_ERR_PARAM; + aq_ret = I40E_ERR_PARAM; goto err; } - vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; } - if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { + if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) { if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - vfres->vf_offload_flags |= + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; } @@ -1741,16 +1741,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, NULL); } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - aq_ret = 0; - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { - aq_ret = - i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - } + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; if (aq_ret) dev_err(&pf->pdev->dev, "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", @@ -1760,7 +1758,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - allmulti, NULL, + alluni, NULL, true); aq_err = pf->hw.aq.asq_last_status; if (aq_ret) { @@ -2532,6 +2530,60 @@ err: } /** + * i40e_vc_enable_vlan_stripping + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Enable vlan header stripping for the VF + **/ +static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg, + u16 msglen) +{ + struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + i40e_vlan_stripping_enable(vsi); + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, + aq_ret); +} + +/** + * i40e_vc_disable_vlan_stripping + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Disable vlan header stripping for the VF + **/ +static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg, + u16 msglen) +{ + struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx]; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + i40e_vlan_stripping_disable(vsi); + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, + aq_ret); +} + +/** * i40e_vc_process_vf_msg * @pf: pointer to the PF structure * @vf_id: source VF id @@ -2650,6 +2702,12 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_SET_RSS_HENA: ret = i40e_vc_set_rss_hena(vf, msg, msglen); break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen); + break; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); + break; case VIRTCHNL_OP_UNKNOWN: default: @@ -2764,7 +2822,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_unlock_bh(&vsi->mac_filter_hash_lock); - dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -2772,7 +2829,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } ether_addr_copy(vf->default_lan_addr.addr, mac); - vf->pf_set_mac = true; + + if (is_zero_ether_addr(mac)) { + vf->pf_set_mac = false; + dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); + } else { + vf->pf_set_mac = true; + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", + mac, vf_id); + } + /* Force the VF driver stop so it has to reload with new MAC address */ i40e_vc_disable_vf(pf, vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); |