diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 1352 |
1 files changed, 640 insertions, 712 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 82a95cc2c8ee..d5c9c9e06ff5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -32,6 +32,12 @@ #include "i40e.h" #include "i40e_diag.h" #include <net/udp_tunnel.h> +/* All i40e tracepoints are defined by the include below, which + * must be included exactly once across the whole kernel with + * CREATE_TRACE_POINTS defined + */ +#define CREATE_TRACE_POINTS +#include "i40e_trace.h" const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -39,9 +45,9 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" -#define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 6 -#define DRV_VERSION_BUILD 27 +#define DRV_VERSION_MAJOR 2 +#define DRV_VERSION_MINOR 1 +#define DRV_VERSION_BUILD 14 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -50,13 +56,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio /* a bit of forward declarations */ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi); -static void i40e_handle_reset_warning(struct i40e_pf *pf); +static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); static int i40e_add_vsi(struct i40e_vsi *vsi); static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); static int i40e_setup_misc_vector(struct i40e_pf *pf); static void i40e_determine_queue_usage(struct i40e_pf *pf); static int i40e_setup_pf_filter_control(struct i40e_pf *pf); +static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired); +static int i40e_reset(struct i40e_pf *pf); +static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); @@ -286,8 +295,8 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) **/ void i40e_service_event_schedule(struct i40e_pf *pf) { - if (!test_bit(__I40E_DOWN, &pf->state) && - !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + if (!test_bit(__I40E_VSI_DOWN, pf->state) && + !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) queue_work(i40e_wq, &pf->service_task); } @@ -299,11 +308,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf) * device is munged, not just the one netdev port, so go for the full * reset. **/ -#ifdef I40E_FCOE -void i40e_tx_timeout(struct net_device *netdev) -#else static void i40e_tx_timeout(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -372,13 +377,13 @@ static void i40e_tx_timeout(struct net_device *netdev) switch (pf->tx_timeout_recovery_level) { case 1: - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); break; case 2: - set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); break; case 3: - set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); break; default: netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); @@ -408,10 +413,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi) * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. **/ -#ifndef I40E_FCOE -static -#endif -void i40e_get_netdev_stats_struct(struct net_device *netdev, +static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -420,7 +422,7 @@ void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (!vsi->tx_rings) @@ -723,55 +725,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb) veb->stat_offsets_loaded = true; } -#ifdef I40E_FCOE -/** - * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters. - * @vsi: the VSI that is capable of doing FCoE - **/ -static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - struct i40e_fcoe_stats *ofs; - struct i40e_fcoe_stats *fs; /* device's eth stats */ - int idx; - - if (vsi->type != I40E_VSI_FCOE) - return; - - idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; - fs = &vsi->fcoe_stats; - ofs = &vsi->fcoe_stats_offsets; - - i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets); - i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords); - i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped); - i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets); - i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords); - i40e_stat_update32(hw, I40E_GL_FCOECRC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc); - i40e_stat_update32(hw, I40E_GL_FCOELAST(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->fcoe_last_error, &fs->fcoe_last_error); - i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx), - vsi->fcoe_stat_offsets_loaded, - &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count); - - vsi->fcoe_stat_offsets_loaded = true; -} - -#endif /** * i40e_update_vsi_stats - Update the vsi statistics counters. * @vsi: the VSI to be updated @@ -790,7 +743,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; - u64 tx_lost_interrupt; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; @@ -801,8 +753,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) u64 tx_p, tx_b; u16 q; - if (test_bit(__I40E_DOWN, &vsi->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; ns = i40e_get_vsi_stats_struct(vsi); @@ -816,7 +768,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; - tx_lost_interrupt = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -835,7 +786,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; - tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -854,7 +804,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; - vsi->tx_lost_interrupt = tx_lost_interrupt; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -1101,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_lpi_count, &nsd->rx_lpi_count); if (pf->flags & I40E_FLAG_FD_SB_ENABLED && - !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) + !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) nsd->fd_sb_status = true; else nsd->fd_sb_status = false; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED && - !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + !(pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) nsd->fd_atr_status = true; else nsd->fd_atr_status = false; @@ -1129,9 +1078,6 @@ void i40e_update_stats(struct i40e_vsi *vsi) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); -#ifdef I40E_FCOE - i40e_update_fcoe_stats(vsi); -#endif } /** @@ -1400,7 +1346,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, * to failed, so we don't bother to try sending the filter * to the hardware. */ - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state)) f->state = I40E_FILTER_FAILED; else f->state = I40E_FILTER_NEW; @@ -1562,11 +1508,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) * * Returns 0 on success, negative on failure **/ -#ifdef I40E_FCOE -int i40e_set_mac(struct net_device *netdev, void *p) -#else static int i40e_set_mac(struct net_device *netdev, void *p) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1583,8 +1525,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) return -EADDRNOTAVAIL; if (ether_addr_equal(hw->mac.addr, addr->sa_data)) @@ -1626,17 +1568,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) * * Setup VSI queue mapping for enabled traffic classes. **/ -#ifdef I40E_FCOE -void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, - struct i40e_vsi_context *ctxt, - u8 enabled_tc, - bool is_add) -#else static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt, u8 enabled_tc, bool is_add) -#endif { struct i40e_pf *pf = vsi->back; u16 sections = 0; @@ -1686,11 +1621,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, qcount = min_t(int, pf->alloc_rss_size, num_tc_qps); break; -#ifdef I40E_FCOE - case I40E_VSI_FCOE: - qcount = num_tc_qps; - break; -#endif case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: @@ -1800,11 +1730,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) * i40e_set_rx_mode - NDO callback to set the netdev filters * @netdev: network interface device structure **/ -#ifdef I40E_FCOE -void i40e_set_rx_mode(struct net_device *netdev) -#else static void i40e_set_rx_mode(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1883,19 +1809,12 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi, static struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) { - while (next) { - next = hlist_entry(next->hlist.next, - typeof(struct i40e_new_mac_filter), - hlist); - - /* keep going if we found a broadcast filter */ - if (next && is_broadcast_ether_addr(next->f->macaddr)) - continue; - - break; + hlist_for_each_entry_continue(next, hlist) { + if (!is_broadcast_ether_addr(next->f->macaddr)) + return next; } - return next; + return NULL; } /** @@ -2001,7 +1920,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, if (fcnt != num_add) { *promisc_changed = true; - set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); dev_warn(&vsi->back->pdev->dev, "Error %s adding RX filters on %s, promiscuous mode forced on\n", i40e_aq_str(hw, aq_err), @@ -2084,7 +2003,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_remove_macvlan_element_data *del_list; - while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state)) + while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) usleep_range(1000, 2000); pf = vsi->back; @@ -2220,8 +2139,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) num_add = 0; hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)) { + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, + vsi->state)) { new->state = I40E_FILTER_FAILED; continue; } @@ -2308,20 +2227,20 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) * safely exit if we didn't just enter, we no longer have any failed * filters, and we have reduced filters below the threshold value. */ - if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && + if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) && !promisc_changed && !failed_filters && (vsi->active_filters < vsi->promisc_threshold)) { dev_info(&pf->pdev->dev, "filter logjam cleared on %s, leaving overflow promiscuous mode\n", vsi_name); - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); promisc_changed = true; vsi->promisc_threshold = 0; } /* if the VF is not trusted do not do promisc */ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } @@ -2346,12 +2265,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if ((changed_flags & IFF_PROMISC) || (promisc_changed && - test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { + test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || - test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)); + test_bit(__I40E_VSI_OVERFLOW_PROMISC, + vsi->state)); if ((vsi->type == I40E_VSI_MAIN) && (pf->lan_veb != I40E_NO_VEB) && !(pf->flags & I40E_FLAG_MFP_ENABLED)) { @@ -2434,7 +2353,7 @@ out: if (retval) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return retval; err_no_memory: @@ -2446,7 +2365,7 @@ err_no_memory_locked: spin_unlock_bh(&vsi->mac_filter_hash_lock); vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - clear_bit(__I40E_CONFIG_BUSY, &vsi->state); + clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); return -ENOMEM; } @@ -2487,13 +2406,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); - i40e_notify_client_of_l2_param_changes(vsi); + pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | + I40E_FLAG_CLIENT_L2_CHANGE); return 0; } @@ -2707,13 +2628,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) * * net_device_ops implementation for adding vlan ids **/ -#ifdef I40E_FCOE -int i40e_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -#else static int i40e_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -2744,13 +2660,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, * * net_device_ops implementation for removing vlan ids **/ -#ifdef I40E_FCOE -int i40e_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) -#else static int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -2920,9 +2831,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); -#ifdef I40E_FCOE - i40e_fcoe_setup_ddp_resources(vsi); -#endif return err; } @@ -2942,9 +2850,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) i40e_free_rx_resources(vsi->rx_rings[i]); -#ifdef I40E_FCOE - i40e_fcoe_free_ddp_resources(vsi); -#endif } /** @@ -3015,9 +2920,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) tx_ctx.qlen = ring->count; tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)); -#ifdef I40E_FCOE - tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); -#endif tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); /* FDIR VSI tx ring can still use RS bit and writebacks */ if (vsi->type != I40E_VSI_FDIR) @@ -3098,7 +3000,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->rx_buf_len = vsi->rx_buf_len; - rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; + rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; @@ -3120,9 +3023,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ rx_ctx.showiv = 0; -#ifdef I40E_FCOE - rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); -#endif /* set the prefena field to 1 because the manual says to */ rx_ctx.prefena = 1; @@ -3144,6 +3044,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) return -ENOMEM; } + /* configure Rx buffer alignment */ + if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) + clear_ring_build_skb_enabled(ring); + else + set_ring_build_skb_enabled(ring); + /* cache tail for quicker writes, and clear the reg before use */ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); @@ -3181,27 +3087,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) int err = 0; u16 i; - if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN)) - vsi->max_frame = vsi->netdev->mtu + ETH_HLEN - + ETH_FCS_LEN + VLAN_HLEN; - else - vsi->max_frame = I40E_RXBUFFER_2048; - - vsi->rx_buf_len = I40E_RXBUFFER_2048; - -#ifdef I40E_FCOE - /* setup rx buffer for FCoE */ - if ((vsi->type == I40E_VSI_FCOE) && - (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { - vsi->rx_buf_len = I40E_RXBUFFER_3072; - vsi->max_frame = I40E_RXBUFFER_3072; + if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) { + vsi->max_frame = I40E_MAX_RXBUFFER; + vsi->rx_buf_len = I40E_RXBUFFER_2048; +#if (PAGE_SIZE < 8192) + } else if (!I40E_2K_TOO_SMALL_WITH_PADDING && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN; + vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; +#endif + } else { + vsi->max_frame = I40E_MAX_RXBUFFER; + vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 : + I40E_RXBUFFER_2048; } -#endif /* I40E_FCOE */ - /* round up for the chip's needs */ - vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, - BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); - /* set up individual rings */ for (i = 0; i < vsi->num_queue_pairs && !err; i++) err = i40e_configure_rx_ring(vsi->rx_rings[i]); @@ -3281,6 +3181,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; + /* Reset FDir counters as we're replaying all existing filters */ + pf->fd_tcp4_filter_cnt = 0; + pf->fd_udp4_filter_cnt = 0; + pf->fd_sctp4_filter_cnt = 0; + pf->fd_ip4_filter_cnt = 0; + hlist_for_each_entry_safe(filter, node, &pf->fdir_filter_list, fdir_node) { i40e_add_del_fdir(vsi, filter, true); @@ -3705,29 +3611,29 @@ static irqreturn_t i40e_intr(int irq, void *data) * this is not a performance path and napi_schedule() * can deal with rescheduling. */ - if (!test_bit(__I40E_DOWN, &pf->state)) + if (!test_bit(__I40E_VSI_DOWN, pf->state)) napi_schedule_irqoff(&q_vector->napi); } if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; - set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; - set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + set_bit(__I40E_MDD_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; - set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); + set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); } if (icr0 & I40E_PFINT_ICR0_GRST_MASK) { - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) - set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; val = rd32(hw, I40E_GLGEN_RSTAT); val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) @@ -3738,7 +3644,7 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); } } @@ -3771,7 +3677,7 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) { dev_info(&pf->pdev->dev, "device will be reset\n"); - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } ena_mask &= ~icr0_remaining; @@ -3781,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data) enable_intr: /* re-enable interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); - if (!test_bit(__I40E_DOWN, &pf->state)) { + if (!test_bit(__I40E_VSI_DOWN, pf->state)) { i40e_service_event_schedule(pf); i40e_irq_dynamic_enable_icr0(pf, false); } @@ -3993,11 +3899,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) * This is used by netconsole to send skbs without having to re-enable * interrupts. It's not called while the normal interrupt routine is executing. **/ -#ifdef I40E_FCOE -void i40e_netpoll(struct net_device *netdev) -#else static void i40e_netpoll(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4005,7 +3907,7 @@ static void i40e_netpoll(struct net_device *netdev) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &vsi->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { @@ -4017,6 +3919,8 @@ static void i40e_netpoll(struct net_device *netdev) } #endif +#define I40E_QTX_ENA_WAIT_COUNT 50 + /** * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled * @pf: the PF being configured @@ -4047,6 +3951,50 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) } /** + * i40e_control_tx_q - Start or stop a particular Tx queue + * @pf: the PF structure + * @pf_q: the PF queue to configure + * @enable: start or stop the queue + * + * This function enables or disables a single queue. Note that any delay + * required after the operation is expected to be handled by the caller of + * this function. + **/ +static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) +{ + struct i40e_hw *hw = &pf->hw; + u32 tx_reg; + int i; + + /* warn the TX unit of coming changes */ + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); + if (!enable) + usleep_range(10, 20); + + for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { + tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); + if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == + ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + return; + + /* turn on/off the queue */ + if (enable) { + wr32(hw, I40E_QTX_HEAD(pf_q), 0); + tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; + } else { + tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; + } + + wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); +} + +/** * i40e_vsi_control_tx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings @@ -4054,41 +4002,11 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int i, j, pf_q, ret = 0; - u32 tx_reg; + int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - - /* warn the TX unit of coming changes */ - i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); - if (!enable) - usleep_range(10, 20); - - for (j = 0; j < 50; j++) { - tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); - if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) == - ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1)) - break; - usleep_range(1000, 2000); - } - /* Skip if the queue is already in the requested state */ - if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) - continue; - - /* turn on/off the queue */ - if (enable) { - wr32(hw, I40E_QTX_HEAD(pf_q), 0); - tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK; - } else { - tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; - } - - wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); - /* No waiting for the Tx queue to disable */ - if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) - continue; + i40e_control_tx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); @@ -4100,8 +4018,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) } } - if (hw->revision_id == 0) - mdelay(50); return ret; } @@ -4135,6 +4051,43 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) } /** + * i40e_control_rx_q - Start or stop a particular Rx queue + * @pf: the PF structure + * @pf_q: the PF queue to configure + * @enable: start or stop the queue + * + * This function enables or disables a single queue. Note that any delay + * required after the operation is expected to be handled by the caller of + * this function. + **/ +static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) +{ + struct i40e_hw *hw = &pf->hw; + u32 rx_reg; + int i; + + for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) { + rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); + if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == + ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) + break; + usleep_range(1000, 2000); + } + + /* Skip if the queue is already in the requested state */ + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + return; + + /* turn on/off the queue */ + if (enable) + rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; + else + rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; + + wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); +} + +/** * i40e_vsi_control_rx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings @@ -4142,33 +4095,11 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) { struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int i, j, pf_q, ret = 0; - u32 rx_reg; + int i, pf_q, ret = 0; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - for (j = 0; j < 50; j++) { - rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); - if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) == - ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1)) - break; - usleep_range(1000, 2000); - } - - /* Skip if the queue is already in the requested state */ - if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) - continue; - - /* turn on/off the queue */ - if (enable) - rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK; - else - rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; - wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); - /* No waiting for the Tx queue to disable */ - if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) - continue; + i40e_control_rx_q(pf, pf_q, enable); /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); @@ -4180,6 +4111,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) } } + /* Due to HW errata, on Rx disable only, the register can indicate done + * before it really is. Needs 50ms to be sure + */ + if (!enable) + mdelay(50); + return ret; } @@ -4206,6 +4143,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) **/ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { + /* When port TX is suspended, don't wait */ + if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) + return i40e_vsi_stop_rings_no_wait(vsi); + /* do rx first for enable and last for disable * Ignore return value, we need to shutdown whatever we can */ @@ -4214,6 +4155,29 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) } /** + * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay + * @vsi: the VSI being shutdown + * + * This function stops all the rings for a VSI but does not delay to verify + * that rings have been disabled. It is expected that the caller is shutting + * down multiple VSIs at once and will delay together for all the VSIs after + * initiating the shutdown. This is particularly useful for shutting down lots + * of VFs together. Otherwise, a large delay can be incurred while configuring + * each VSI in serial. + **/ +void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + i40e_control_tx_q(pf, pf_q, false); + i40e_control_rx_q(pf, pf_q, false); + } +} + +/** * i40e_vsi_free_irq - Free the irq association with the OS * @vsi: the VSI being configured **/ @@ -4471,17 +4435,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) **/ static void i40e_vsi_close(struct i40e_vsi *vsi) { - bool reset = false; - - if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) + struct i40e_pf *pf = vsi->back; + if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) i40e_down(vsi); i40e_vsi_free_irq(vsi); i40e_vsi_free_tx_resources(vsi); i40e_vsi_free_rx_resources(vsi); vsi->current_netdev_flags = 0; - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) - reset = true; - i40e_notify_client_of_netdev_close(vsi, reset); + pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + pf->flags |= I40E_FLAG_CLIENT_RESET; } /** @@ -4490,18 +4453,10 @@ static void i40e_vsi_close(struct i40e_vsi *vsi) **/ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) { - if (test_bit(__I40E_DOWN, &vsi->state)) - return; - - /* No need to disable FCoE VSI when Tx suspended */ - if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && - vsi->type == I40E_VSI_FCOE) { - dev_dbg(&vsi->back->pdev->dev, - "VSI seid %d skipping FCoE VSI disable\n", vsi->seid); + if (test_bit(__I40E_VSI_DOWN, vsi->state)) return; - } - set_bit(__I40E_NEEDS_RESTART, &vsi->state); + set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); else @@ -4514,10 +4469,9 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) **/ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) { - if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state)) + if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) return; - clear_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else @@ -4552,21 +4506,20 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) } } -#ifdef CONFIG_I40E_DCB /** * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * - * This function waits for the given VSI's queues to be disabled. + * Wait until all queues on a given VSI have been disabled. **/ -static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) +int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - /* Check and wait for the disable status of the queue */ + /* Check and wait for the Tx queue */ ret = i40e_pf_txq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, @@ -4574,11 +4527,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) vsi->seid, pf_q); return ret; } - } - - pf_q = vsi->base_queue; - for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - /* Check and wait for the disable status of the queue */ + /* Check and wait for the Tx queue */ ret = i40e_pf_rxq_wait(pf, pf_q, false); if (ret) { dev_info(&pf->pdev->dev, @@ -4591,6 +4540,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) return 0; } +#ifdef CONFIG_I40E_DCB /** * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF @@ -4603,8 +4553,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { - /* No need to wait for FCoE VSI queues */ - if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + if (pf->vsi[v]) { ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) break; @@ -4622,16 +4571,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) * @vsi: Pointer to VSI struct * * This function checks specified queue for given VSI. Detects hung condition. - * Sets hung bit since it is two step process. Before next run of service task - * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not, - * hung condition remain unchanged and during subsequent run, this function - * issues SW interrupt to recover from hung condition. + * We proactively detect hung TX queues by checking if interrupts are disabled + * but there are pending descriptors. If it appears hung, attempt to recover + * by triggering a SW interrupt. **/ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct i40e_pf *pf; - u32 head, val, tx_pending_hw; + u32 val, tx_pending; int i; pf = vsi->back; @@ -4657,47 +4605,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); - head = i40e_get_head(tx_ring); - - tx_pending_hw = i40e_get_tx_pending(tx_ring, false); - - /* HW is done executing descriptors, updated HEAD write back, - * but SW hasn't processed those descriptors. If interrupt is - * not generated from this point ON, it could result into - * dev_watchdog detecting timeout on those netdev_queue, - * hence proactively trigger SW interrupt. - */ - if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { - /* NAPI Poll didn't run and clear since it was set */ - if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, - &tx_ring->q_vector->hung_detected)) { - netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", - vsi->seid, q_idx, tx_pending_hw, - tx_ring->next_to_clean, head, - tx_ring->next_to_use, - readl(tx_ring->tail)); - netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n", - vsi->seid, q_idx, val); - i40e_force_wb(vsi, tx_ring->q_vector); - } else { - /* First Chance - detected possible hung */ - set_bit(I40E_Q_VECTOR_HUNG_DETECT, - &tx_ring->q_vector->hung_detected); - } - } + tx_pending = i40e_get_tx_pending(tx_ring); - /* This is the case where we have interrupts missing, - * so the tx_pending in HW will most likely be 0, but we - * will have tx_pending in SW since the WB happened but the - * interrupt got lost. + /* Interrupts are disabled and TX pending is non-zero, + * trigger the SW interrupt (don't wait). Worst case + * there will be one extra interrupt which may result + * into not cleaning any queues because queues are cleaned. */ - if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && - (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { - local_bh_disable(); - if (napi_reschedule(&tx_ring->q_vector->napi)) - tx_ring->tx_stats.tx_lost_interrupt++; - local_bh_enable(); - } + if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) + i40e_force_wb(vsi, tx_ring->q_vector); } /** @@ -4721,8 +4637,8 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) return; /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */ - if (test_bit(__I40E_DOWN, &vsi->back->state) || - test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) + if (test_bit(__I40E_VSI_DOWN, vsi->back->state) || + test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state)) return; /* Make sure type is MAIN VSI */ @@ -5228,20 +5144,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) continue; /* - Enable all TCs for the LAN VSI -#ifdef I40E_FCOE - * - For FCoE VSI only enable the TC configured - * as per the APP TLV -#endif * - For all others keep them at TC0 for now */ if (v == pf->lan_vsi) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; -#ifdef I40E_FCOE - if (pf->vsi[v]->type == I40E_VSI_FCOE) - tc_map = i40e_get_fcoe_tc_map(pf); -#endif /* #ifdef I40E_FCOE */ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); if (ret) { @@ -5277,7 +5185,7 @@ static int i40e_resume_port_tx(struct i40e_pf *pf) i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } @@ -5308,10 +5216,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { dev_info(&pf->pdev->dev, "DCBX offload is not supported or is disabled for this PF.\n"); - - if (pf->flags & I40E_FLAG_MFP_ENABLED) - goto out; - } else { /* When status is not DISABLED then DCBX in FW */ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | @@ -5449,7 +5353,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) if (err) return err; - clear_bit(__I40E_DOWN, &vsi->state); + clear_bit(__I40E_VSI_DOWN, vsi->state); i40e_napi_enable_all(vsi); i40e_vsi_enable_irq(vsi); @@ -5472,13 +5376,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) /* replay FDIR SB filters */ if (vsi->type == I40E_VSI_FDIR) { /* reset fd counters */ - pf->fd_add_err = pf->fd_atr_cnt = 0; - if (pf->fd_tcp_rule > 0) { - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); - pf->fd_tcp_rule = 0; - } + pf->fd_add_err = 0; + pf->fd_atr_cnt = 0; i40e_fdir_filter_restore(vsi); } @@ -5503,12 +5402,12 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) struct i40e_pf *pf = vsi->back; WARN_ON(in_interrupt()); - while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) usleep_range(1000, 2000); i40e_down(vsi); i40e_up(vsi); - clear_bit(__I40E_CONFIG_BUSY, &pf->state); + clear_bit(__I40E_CONFIG_BUSY, pf->state); } /** @@ -5535,7 +5434,7 @@ void i40e_down(struct i40e_vsi *vsi) int i; /* It is assumed that the caller of this function - * sets the vsi->state __I40E_DOWN bit. + * sets the vsi->state __I40E_VSI_DOWN bit. */ if (vsi->netdev) { netif_carrier_off(vsi->netdev); @@ -5550,8 +5449,6 @@ void i40e_down(struct i40e_vsi *vsi) i40e_clean_rx_ring(vsi->rx_rings[i]); } - i40e_notify_client_of_netdev_close(vsi, false); - } /** @@ -5612,17 +5509,15 @@ exit: return ret; } -#ifdef I40E_FCOE -int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) -#else static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct tc_to_netdev *tc) -#endif { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return i40e_setup_tc(netdev, tc->tc); + + tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return i40e_setup_tc(netdev, tc->mqprio->num_tc); } /** @@ -5645,8 +5540,8 @@ int i40e_open(struct net_device *netdev) int err; /* disallow open during test or if eeprom is broken */ - if (test_bit(__I40E_TESTING, &pf->state) || - test_bit(__I40E_BAD_EEPROM, &pf->state)) + if (test_bit(__I40E_TESTING, pf->state) || + test_bit(__I40E_BAD_EEPROM, pf->state)) return -EBUSY; netif_carrier_off(netdev); @@ -5675,6 +5570,8 @@ int i40e_open(struct net_device *netdev) * Finish initialization of the VSI. * * Returns 0 on success, negative value on failure + * + * Note: expects to be called while under rtnl_lock() **/ int i40e_vsi_open(struct i40e_vsi *vsi) { @@ -5738,7 +5635,7 @@ err_setup_rx: err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); return err; } @@ -5753,6 +5650,7 @@ err_setup_tx: static void i40e_fdir_filter_exit(struct i40e_pf *pf) { struct i40e_fdir_filter *filter; + struct i40e_flex_pit *pit_entry, *tmp; struct hlist_node *node2; hlist_for_each_entry_safe(filter, node2, @@ -5760,7 +5658,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) hlist_del(&filter->fdir_node); kfree(filter); } + + list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { + list_del(&pit_entry->list); + kfree(pit_entry); + } + INIT_LIST_HEAD(&pf->l3_flex_pit_list); + + list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { + list_del(&pit_entry->list); + kfree(pit_entry); + } + INIT_LIST_HEAD(&pf->l4_flex_pit_list); + pf->fdir_pf_active_filters = 0; + pf->fd_tcp4_filter_cnt = 0; + pf->fd_udp4_filter_cnt = 0; + pf->fd_sctp4_filter_cnt = 0; + pf->fd_ip4_filter_cnt = 0; + + /* Reprogram the default input set for TCP/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + /* Reprogram the default input set for UDP/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + /* Reprogram the default input set for SCTP/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK | + I40E_L4_SRC_MASK | I40E_L4_DST_MASK); + + /* Reprogram the default input set for Other/IPv4 */ + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, + I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } /** @@ -5787,12 +5721,14 @@ int i40e_close(struct net_device *netdev) * i40e_do_reset - Start a PF or Core Reset sequence * @pf: board private structure * @reset_flags: which reset is requested + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. * * The essential difference in resets is that the PF Reset * doesn't clear the packet buffers, doesn't reset the PE * firmware, and doesn't bother the other PFs on the chip. **/ -void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) +void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) { u32 val; @@ -5838,7 +5774,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) * for the Core Reset. */ dev_dbg(&pf->pdev->dev, "PFR requested\n"); - i40e_handle_reset_warning(pf); + i40e_handle_reset_warning(pf, lock_acquired); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { int v; @@ -5850,10 +5786,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && - test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) { + test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, + vsi->state)) i40e_vsi_reinit_locked(pf->vsi[v]); - clear_bit(__I40E_REINIT_REQUESTED, &vsi->state); - } } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { int v; @@ -5864,10 +5799,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) struct i40e_vsi *vsi = pf->vsi[v]; if (vsi != NULL && - test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) { - set_bit(__I40E_DOWN, &vsi->state); + test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, + vsi->state)) { + set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); - clear_bit(__I40E_DOWN_REQUESTED, &vsi->state); } } } else { @@ -6007,7 +5942,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, else pf->flags &= ~I40E_FLAG_DCB_ENABLED; - set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + set_bit(__I40E_PORT_SUSPENDED, pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); @@ -6016,7 +5951,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ret = i40e_resume_port_tx(pf); - clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + clear_bit(__I40E_PORT_SUSPENDED, pf->state); /* In case of error no point in resuming VSIs */ if (ret) goto exit; @@ -6025,12 +5960,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); - /* Notify the client for the DCB changes */ - i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); + pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED | + I40E_FLAG_CLIENT_L2_CHANGE); } exit: @@ -6047,7 +5982,7 @@ exit: void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) { rtnl_lock(); - i40e_do_reset(pf, reset_flags); + i40e_do_reset(pf, reset_flags, true); rtnl_unlock(); } @@ -6140,34 +6075,33 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) u32 fcnt_prog, fcnt_avail; struct hlist_node *node; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) return; - /* Check if, FD SB or ATR was auto disabled and if there is enough room - * to re-enable - */ + /* Check if we have enough room to re-enable FDir SB capability. */ fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { - if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_SB_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } - /* Wait for some more space to be available to turn on ATR. We also - * must check that no existing ntuple rules for TCP are in effect + /* We should wait for even more space before re-enabling ATR. + * Additionally, we cannot enable ATR as long as we still have TCP SB + * rules active. */ - if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) { - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->fd_tcp_rule == 0)) { - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && + (pf->fd_tcp4_filter_cnt == 0)) { + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n"); } } @@ -6218,7 +6152,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } pf->fd_flush_timestamp = jiffies; - pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); @@ -6237,9 +6171,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - if (!disable_atr) - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + if (!disable_atr && !pf->fd_tcp4_filter_cnt) + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } @@ -6269,10 +6203,10 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state)) return; - if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); @@ -6286,14 +6220,11 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { - if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) + if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) return; switch (vsi->type) { case I40E_VSI_MAIN: -#ifdef I40E_FCOE - case I40E_VSI_FCOE: -#endif if (!vsi->netdev || !vsi->netdev_registered) break; @@ -6382,11 +6313,11 @@ static void i40e_link_event(struct i40e_pf *pf) if (new_link == old_link && new_link_speed == old_link_speed && - (test_bit(__I40E_DOWN, &vsi->state) || + (test_bit(__I40E_VSI_DOWN, vsi->state) || new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_DOWN, &vsi->state)) + if (!test_bit(__I40E_VSI_DOWN, vsi->state)) i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to @@ -6413,8 +6344,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) int i; /* if interface is down do nothing */ - if (test_bit(__I40E_DOWN, &pf->state) || - test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state) || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return; /* make sure we don't do these things too often */ @@ -6452,44 +6383,44 @@ static void i40e_reset_subtask(struct i40e_pf *pf) { u32 reset_flags = 0; - rtnl_lock(); - if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { + if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_REINIT_REQUESTED); - clear_bit(__I40E_REINIT_REQUESTED, &pf->state); + clear_bit(__I40E_REINIT_REQUESTED, pf->state); } - if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_PF_RESET_REQUESTED); - clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED); - clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { + if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); - clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); + clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); } - if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { - reset_flags |= BIT(__I40E_DOWN_REQUESTED); - clear_bit(__I40E_DOWN_REQUESTED, &pf->state); + if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { + reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); + clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); } /* If there's a recovery already waiting, it takes * precedence before starting a new reset sequence. */ - if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { - i40e_handle_reset_warning(pf); - goto unlock; + if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { + i40e_prep_for_reset(pf, false); + i40e_reset(pf); + i40e_rebuild(pf, false, false); } /* If we're already down or resetting, just bail */ if (reset_flags && - !test_bit(__I40E_DOWN, &pf->state) && - !test_bit(__I40E_CONFIG_BUSY, &pf->state)) - i40e_do_reset(pf, reset_flags); - -unlock: - rtnl_unlock(); + !test_bit(__I40E_VSI_DOWN, pf->state) && + !test_bit(__I40E_CONFIG_BUSY, pf->state)) { + rtnl_lock(); + i40e_do_reset(pf, reset_flags, true); + rtnl_unlock(); + } } /** @@ -6534,7 +6465,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) u32 val; /* Do not run clean AQ when PF reset fails */ - if (test_bit(__I40E_RESET_FAILED, &pf->state)) + if (test_bit(__I40E_RESET_FAILED, pf->state)) return; /* check for error indications */ @@ -6635,9 +6566,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) opcode); break; } - } while (pending && (i++ < pf->adminq_work_limit)); + } while (i++ < pf->adminq_work_limit); + + if (i < pf->adminq_work_limit) + clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); - clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); /* re-enable Admin queue interrupt cause */ val = rd32(hw, I40E_PFINT_ICR0_ENA); val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; @@ -6662,13 +6595,13 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) if (err) { dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", err); - set_bit(__I40E_BAD_EEPROM, &pf->state); + set_bit(__I40E_BAD_EEPROM, pf->state); } } - if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); - clear_bit(__I40E_BAD_EEPROM, &pf->state); + clear_bit(__I40E_BAD_EEPROM, pf->state); } } @@ -6975,17 +6908,19 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. * * Close up the VFs and other things in prep for PF Reset. **/ -static void i40e_prep_for_reset(struct i40e_pf *pf) +static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired) { struct i40e_hw *hw = &pf->hw; i40e_status ret = 0; u32 v; - clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); - if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) + clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); + if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; if (i40e_check_asq_alive(&pf->hw)) i40e_vc_notify_reset(pf); @@ -6993,7 +6928,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); /* quiesce the VSIs and their queues that are not already DOWN */ + /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */ + if (!lock_acquired) + rtnl_lock(); i40e_pf_quiesce_all_vsi(pf); + if (!lock_acquired) + rtnl_unlock(); for (v = 0; v < pf->num_alloc_vsi; v++) { if (pf->vsi[v]) @@ -7028,31 +6968,41 @@ static void i40e_send_version(struct i40e_pf *pf) } /** - * i40e_reset_and_rebuild - reset and rebuild using a saved config + * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen * @pf: board private structure - * @reinit: if the Main VSI needs to re-initialized. **/ -static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) +static int i40e_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - u8 set_fc_aq_fail = 0; i40e_status ret; - u32 val; - u32 v; - /* Now we wait for GRST to settle out. - * We don't have to delete the VEBs or VSIs from the hw switch - * because the reset will make them disappear. - */ ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); - set_bit(__I40E_RESET_FAILED, &pf->state); - goto clear_recovery; + set_bit(__I40E_RESET_FAILED, pf->state); + clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); + } else { + pf->pfr_count++; } - pf->pfr_count++; + return ret; +} + +/** + * i40e_rebuild - rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. + **/ +static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) +{ + struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; + i40e_status ret; + u32 val; + int v; - if (test_bit(__I40E_DOWN, &pf->state)) + if (test_bit(__I40E_VSI_DOWN, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); @@ -7066,7 +7016,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) i40e_verify_eeprom(pf); i40e_clear_pxe_mode(hw); @@ -7075,8 +7025,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) goto end_core_reset; ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, - hw->func_caps.num_rx_qp, - pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + hw->func_caps.num_rx_qp, 0, 0); if (ret) { dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); goto end_core_reset; @@ -7095,14 +7044,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ -#ifdef I40E_FCOE - i40e_init_pf_fcoe(pf); - -#endif /* do basic switch setup */ + if (!lock_acquired) + rtnl_lock(); ret = i40e_setup_pf_switch(pf, reinit); if (ret) - goto end_core_reset; + goto end_unlock; /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. @@ -7173,7 +7120,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) { dev_info(&pf->pdev->dev, "rebuild of Main VSI failed: %d\n", ret); - goto end_core_reset; + goto end_unlock; } } @@ -7216,18 +7163,45 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) /* restart the VSIs that were rebuilt and running before the reset */ i40e_pf_unquiesce_all_vsi(pf); - if (pf->num_alloc_vfs) { - for (v = 0; v < pf->num_alloc_vfs; v++) - i40e_reset_vf(&pf->vf[v], true); - } + /* Release the RTNL lock before we start resetting VFs */ + if (!lock_acquired) + rtnl_unlock(); + + i40e_reset_all_vfs(pf, true); /* tell the firmware that we're starting */ i40e_send_version(pf); + /* We've already released the lock, so don't do it again */ + goto end_core_reset; + +end_unlock: + if (!lock_acquired) + rtnl_unlock(); end_core_reset: - clear_bit(__I40E_RESET_FAILED, &pf->state); + clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: - clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); + clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); +} + +/** + * i40e_reset_and_rebuild - reset and rebuild using a saved config + * @pf: board private structure + * @reinit: if the Main VSI needs to re-initialized. + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. + **/ +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, + bool lock_acquired) +{ + int ret; + /* Now we wait for GRST to settle out. + * We don't have to delete the VEBs or VSIs from the hw switch + * because the reset will make them disappear. + */ + ret = i40e_reset(pf); + if (!ret) + i40e_rebuild(pf, reinit, lock_acquired); } /** @@ -7236,11 +7210,13 @@ clear_recovery: * * Close up the VFs and other things in prep for a Core Reset, * then get ready to rebuild the world. + * @lock_acquired: indicates whether or not the lock has been acquired + * before this function was called. **/ -static void i40e_handle_reset_warning(struct i40e_pf *pf) +static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired) { - i40e_prep_for_reset(pf); - i40e_reset_and_rebuild(pf, false); + i40e_prep_for_reset(pf, lock_acquired); + i40e_reset_and_rebuild(pf, false, lock_acquired); } /** @@ -7258,7 +7234,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) u32 reg; int i; - if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) + if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) return; /* find what triggered the MDD event */ @@ -7310,7 +7286,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } /* Queue belongs to the PF, initiate a reset */ if (pf_mdd_detected) { - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + set_bit(__I40E_PF_RESET_REQUESTED, pf->state); i40e_service_event_schedule(pf); } } @@ -7339,12 +7315,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) "Too many MDD events on VF %d, disabled\n", i); dev_info(&pf->pdev->dev, "Use PF Control I/F to re-enable the VF\n"); - set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states); + set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); } } /* re-enable mdd interrupt cause */ - clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); + clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); @@ -7352,6 +7328,23 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) } /** + * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters + * @pf: board private structure + **/ +static void i40e_sync_udp_filters(struct i40e_pf *pf) +{ + int i; + + /* loop through and set pending bit for all active UDP filters */ + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + if (pf->udp_ports[i].port) + pf->pending_udp_bitmap |= BIT_ULL(i); + } + + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; +} + +/** * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW * @pf: board private structure **/ @@ -7359,7 +7352,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret; - __be16 port; + u16 port; int i; if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC)) @@ -7370,7 +7363,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { pf->pending_udp_bitmap &= ~BIT_ULL(i); - port = pf->udp_ports[i].index; + port = pf->udp_ports[i].port; if (port) ret = i40e_aq_add_udp_tunnel(hw, port, pf->udp_ports[i].type, @@ -7383,11 +7376,11 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) "%s %s port %d, index %d failed, err %s aq_err %s\n", pf->udp_ports[i].type ? "vxlan" : "geneve", port ? "add" : "delete", - ntohs(port), i, + port, i, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->udp_ports[i].index = 0; + pf->udp_ports[i].port = 0; } } } @@ -7405,11 +7398,10 @@ static void i40e_service_task(struct work_struct *work) unsigned long start_time = jiffies; /* don't bother with service tasks if a reset is in progress */ - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) return; - } - if (test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) + if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) return; i40e_detect_recover_hung(pf); @@ -7419,23 +7411,34 @@ static void i40e_service_task(struct work_struct *work) i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); - i40e_client_subtask(pf); + if (pf->flags & I40E_FLAG_CLIENT_RESET) { + /* Client subtask will reopen next time through. */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true); + pf->flags &= ~I40E_FLAG_CLIENT_RESET; + } else { + i40e_client_subtask(pf); + if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) { + i40e_notify_client_of_l2_param_changes( + pf->vsi[pf->lan_vsi]); + pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE; + } + } i40e_sync_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf); i40e_clean_adminq_subtask(pf); /* flush memory to make sure state is correct before next watchdog */ smp_mb__before_atomic(); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); + clear_bit(__I40E_SERVICE_SCHED, pf->state); /* If the tasks have taken longer than one timer cycle or there * is more work to be done, reschedule the service task now * rather than wait for the timer to tick again. */ if (time_after(jiffies, (start_time + pf->service_timer_period)) || - test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || - test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || - test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) + test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || + test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || + test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) i40e_service_event_schedule(pf); } @@ -7492,15 +7495,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) I40E_REQ_DESCRIPTOR_MULTIPLE); break; -#ifdef I40E_FCOE - case I40E_VSI_FCOE: - vsi->alloc_queue_pairs = pf->num_fcoe_qps; - vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, - I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = pf->num_fcoe_msix; - break; - -#endif /* I40E_FCOE */ default: WARN_ON(1); return -ENODATA; @@ -7593,7 +7587,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) } vsi->type = type; vsi->back = pf; - set_bit(__I40E_DOWN, &vsi->state); + set_bit(__I40E_VSI_DOWN, vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; vsi->int_rate_limit = 0; @@ -7817,6 +7811,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) static int i40e_init_msix(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; + int cpus, extra_vectors; int vectors_left; int v_budget, i; int v_actual; @@ -7835,9 +7830,6 @@ static int i40e_init_msix(struct i40e_pf *pf) * - assumes symmetric Tx/Rx pairing * - The number of VMDq pairs * - The CPU count within the NUMA node if iWARP is enabled -#ifdef I40E_FCOE - * - The number of FCOE qps. -#endif * Once we count this up, try the request. * * If we can't get what we want, we'll simplify to nearly nothing @@ -7852,10 +7844,16 @@ static int i40e_init_msix(struct i40e_pf *pf) vectors_left--; } - /* reserve vectors for the main PF traffic queues */ - pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + /* reserve some vectors for the main PF traffic queues. Initially we + * only reserve at most 50% of the available vectors, in the case that + * the number of online CPUs is large. This ensures that we can enable + * extra features as well. Once we've enabled the other features, we + * will use any remaining vectors to reach as close as we can to the + * number of online CPUs. + */ + cpus = num_online_cpus(); + pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); vectors_left -= pf->num_lan_msix; - v_budget += pf->num_lan_msix; /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -7868,20 +7866,6 @@ static int i40e_init_msix(struct i40e_pf *pf) } } -#ifdef I40E_FCOE - /* can we reserve enough for FCoE? */ - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - if (!vectors_left) - pf->num_fcoe_msix = 0; - else if (vectors_left >= pf->num_fcoe_qps) - pf->num_fcoe_msix = pf->num_fcoe_qps; - else - pf->num_fcoe_msix = 1; - v_budget += pf->num_fcoe_msix; - vectors_left -= pf->num_fcoe_msix; - } - -#endif /* can we reserve enough for iWARP? */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { iwarp_requested = pf->num_iwarp_msix; @@ -7918,6 +7902,23 @@ static int i40e_init_msix(struct i40e_pf *pf) } } + /* On systems with a large number of SMP cores, we previously limited + * the number of vectors for num_lan_msix to be at most 50% of the + * available vectors, to allow for other features. Now, we add back + * the remaining vectors. However, we ensure that the total + * num_lan_msix will not exceed num_online_cpus(). To do this, we + * calculate the number of vectors we can add without going over the + * cap of CPUs. For systems with a small number of CPUs this will be + * zero. + */ + extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); + pf->num_lan_msix += extra_vectors; + vectors_left -= extra_vectors; + + WARN(vectors_left < 0, + "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); + + v_budget += pf->num_lan_msix; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!pf->msix_entries) @@ -7958,10 +7959,6 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; pf->num_vmdq_qps = 1; -#ifdef I40E_FCOE - pf->num_fcoe_qps = 0; - pf->num_fcoe_msix = 0; -#endif /* partition out the remaining vectors */ switch (vec) { @@ -7975,13 +7972,6 @@ static int i40e_init_msix(struct i40e_pf *pf) } else { pf->num_lan_msix = 2; } -#ifdef I40E_FCOE - /* give one vector to FCoE */ - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_lan_msix = 1; - pf->num_fcoe_msix = 1; - } -#endif break; default: if (pf->flags & I40E_FLAG_IWARP_ENABLED) { @@ -8001,13 +7991,6 @@ static int i40e_init_msix(struct i40e_pf *pf) (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), pf->num_lan_msix); pf->num_lan_qps = pf->num_lan_msix; -#ifdef I40E_FCOE - /* give one vector to FCoE */ - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_fcoe_msix = 1; - vec--; - } -#endif break; } } @@ -8028,13 +8011,6 @@ static int i40e_init_msix(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_IWARP_ENABLED; } -#ifdef I40E_FCOE - - if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { - dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); - pf->flags &= ~I40E_FLAG_FCOE_ENABLED; - } -#endif i40e_debug(&pf->hw, I40E_DEBUG_INIT, "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", pf->num_lan_msix, @@ -8133,9 +8109,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_IWARP_ENABLED | -#ifdef I40E_FCOE - I40E_FLAG_FCOE_ENABLED | -#endif I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED | @@ -8196,7 +8169,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) /* Only request the irq if this is the first time through, and * not when we're rebuilding after a Reset */ - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { + if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { @@ -8368,13 +8341,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, if (vsi->type == I40E_VSI_MAIN) { for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), - seed_dw[i]); + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } else if (vsi->type == I40E_VSI_SRIOV) { for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, - I40E_VFQF_HKEY1(i, vf_id), - seed_dw[i]); + wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); } @@ -8392,9 +8362,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) return -EINVAL; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, - I40E_VFQF_HLUT1(i, vf_id), - lut_dw[i]); + wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]); } else { dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); } @@ -8522,9 +8490,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ - if (!vsi->rss_size) - vsi->rss_size = min_t(int, pf->alloc_rss_size, - vsi->num_queue_pairs); + if (!vsi->rss_size) { + u16 qcount; + + qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; + vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); + } if (!vsi->rss_size) return -EINVAL; @@ -8558,6 +8529,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) * * returns 0 if rss is not enabled, if enabled returns the final rss queue * count which may be different from the requested queue count. + * Note: expects to be called while under rtnl_lock() **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { @@ -8570,12 +8542,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { + u16 qcount; + vsi->req_queue_pairs = queue_count; - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); pf->alloc_rss_size = new_rss_size; - i40e_reset_and_rebuild(pf, true); + i40e_reset_and_rebuild(pf, true, true); /* Discard the user configured hash keys and lut, if less * queues are enabled. @@ -8587,8 +8561,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) } /* Reset vsi->rss_size, as number of enabled queues changed */ - vsi->rss_size = min_t(int, pf->alloc_rss_size, - vsi->num_queue_pairs); + qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; + vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); i40e_pf_config_rss(pf); } @@ -8821,10 +8795,6 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->num_iwarp_msix = (int)num_online_cpus() + 1; } -#ifdef I40E_FCOE - i40e_init_pf_fcoe(pf); - -#endif /* I40E_FCOE */ #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; @@ -8851,9 +8821,9 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } else { - pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } pf->eeprom_version = 0xDEAD; @@ -8913,16 +8883,16 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) need_reset = true; i40e_fdir_filter_exit(pf); } - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | + I40E_FLAG_FD_SB_AUTO_DISABLED); /* reset fd counters */ - pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; - pf->fdir_pf_active_filters = 0; + pf->fd_add_err = 0; + pf->fd_atr_cnt = 0; /* if ATR was auto disabled it can be re-enabled. */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - if (I40E_DEBUG_FD & pf->hw.debug_mask) + if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED) { + pf->flags &= ~I40E_FLAG_FD_ATR_AUTO_DISABLED; + if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + (I40E_DEBUG_FD & pf->hw.debug_mask)) dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); } } @@ -8955,6 +8925,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi) * i40e_set_features - set the netdev feature flags * @netdev: ptr to the netdev being adjusted * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() **/ static int i40e_set_features(struct net_device *netdev, netdev_features_t features) @@ -8978,7 +8949,7 @@ static int i40e_set_features(struct net_device *netdev, need_reset = i40e_set_ntuple(pf, features); if (need_reset) - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); return 0; } @@ -8990,12 +8961,12 @@ static int i40e_set_features(struct net_device *netdev, * * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found **/ -static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) +static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) { u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { - if (pf->udp_ports[i].index == port) + if (pf->udp_ports[i].port == port) return i; } @@ -9013,7 +8984,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - __be16 port = ti->port; + u16 port = ntohs(ti->port); u8 next_idx; u8 idx; @@ -9021,8 +8992,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "port %d already offloaded\n", - ntohs(port)); + netdev_info(netdev, "port %d already offloaded\n", port); return; } @@ -9031,7 +9001,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", - ntohs(port)); + port); return; } @@ -9049,7 +9019,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, } /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; + pf->udp_ports[next_idx].port = port; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; } @@ -9065,7 +9035,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - __be16 port = ti->port; + u16 port = ntohs(ti->port); u8 idx; idx = i40e_get_udp_port_idx(pf, port); @@ -9090,14 +9060,14 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, /* if port exists, set it to 0 (mark for deletion) * and make it pending */ - pf->udp_ports[idx].index = 0; + pf->udp_ports[idx].port = 0; pf->pending_udp_bitmap |= BIT_ULL(idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; return; not_found: netdev_warn(netdev, "UDP port %d was not found, not deleting\n", - ntohs(port)); + port); } static int i40e_get_phys_port_id(struct net_device *netdev, @@ -9174,6 +9144,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * is to change the mode then that requires a PF reset to * allow rebuild of the components with required hardware * bridge mode enabled. + * + * Note: expects to be called while under rtnl_lock() **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, @@ -9229,7 +9201,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; else pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), + true); break; } } @@ -9352,10 +9325,6 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_poll_controller = i40e_netpoll, #endif .ndo_setup_tc = __i40e_setup_tc, -#ifdef I40E_FCOE - .ndo_fcoe_enable = i40e_fcoe_enable, - .ndo_fcoe_disable = i40e_fcoe_disable, -#endif .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, @@ -9388,6 +9357,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) u8 broadcast[ETH_ALEN]; u8 mac_addr[ETH_ALEN]; int etherdev_size; + netdev_features_t hw_enc_features; + netdev_features_t hw_features; etherdev_size = sizeof(struct i40e_netdev_priv); netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); @@ -9398,52 +9369,57 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HIGHDMA | - NETIF_F_SOFT_FEATURES | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_TSO6 | - NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL | - NETIF_F_SCTP_CRC | - NETIF_F_RXHASH | - NETIF_F_RXCSUM | - 0; + hw_enc_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= hw_enc_features; + /* record features VLANs can make use of */ - netdev->vlan_features |= netdev->hw_enc_features | - NETIF_F_TSO_MANGLEID; + netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->hw_features |= NETIF_F_NTUPLE; + hw_features = hw_enc_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; - netdev->hw_features |= netdev->hw_enc_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= hw_features; - netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); - /* The following steps are necessary to prevent reception - * of tagged packets - some older NVM configurations load a - * default a MAC-VLAN filter that accepts any tagged packet - * which must be replaced by a normal filter. + /* The following steps are necessary for two reasons. First, + * some older NVM configurations load a default MAC-VLAN + * filter that will accept any tagged packet, and we want to + * replace this with a normal filter. Additionally, it is + * possible our MAC address was provided by the platform using + * Open Firmware or similar. + * + * Thus, we need to remove the default filter and install one + * specific to the MAC address. */ i40e_rm_default_mac_filter(vsi, mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -9489,9 +9465,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->netdev_ops = &i40e_netdev_ops; netdev->watchdog_timeo = 5 * HZ; i40e_set_ethtool_ops(netdev); -#ifdef I40E_FCOE - i40e_fcoe_config_netdev(netdev, vsi); -#endif /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; @@ -9715,16 +9688,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; -#ifdef I40E_FCOE - case I40E_VSI_FCOE: - ret = i40e_fcoe_vsi_init(vsi, &ctxt); - if (ret) { - dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); - return ret; - } - break; - -#endif /* I40E_FCOE */ case I40E_VSI_IWARP: /* send down message to iWARP */ break; @@ -9751,7 +9714,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } vsi->active_filters = 0; - clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { @@ -9804,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) return -ENODEV; } if (vsi == pf->vsi[pf->lan_vsi] && - !test_bit(__I40E_DOWN, &pf->state)) { + !test_bit(__I40E_VSI_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } @@ -10141,7 +10104,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, } } case I40E_VSI_VMDQ2: - case I40E_VSI_FCOE: ret = i40e_config_netdev(vsi); if (ret) goto err_netdev; @@ -10789,6 +10751,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_ptp_init(pf); + /* repopulate tunnel port filters */ + i40e_sync_udp_filters(pf); + return ret; } @@ -10801,9 +10766,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) int queues_left; pf->num_lan_qps = 0; -#ifdef I40E_FCOE - pf->num_fcoe_qps = 0; -#endif /* Find the max queues to be put into basic use. We'll always be * using TC0, whether or not DCB is running, and TC0 will get the @@ -10820,9 +10782,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) /* make sure all the fancies are disabled */ pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | -#ifdef I40E_FCOE - I40E_FLAG_FCOE_ENABLED | -#endif I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | @@ -10839,9 +10798,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags &= ~(I40E_FLAG_RSS_ENABLED | I40E_FLAG_IWARP_ENABLED | -#ifdef I40E_FCOE - I40E_FLAG_FCOE_ENABLED | -#endif I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_ENABLED | @@ -10862,22 +10818,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= pf->num_lan_qps; } -#ifdef I40E_FCOE - if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - if (I40E_DEFAULT_FCOE <= queues_left) { - pf->num_fcoe_qps = I40E_DEFAULT_FCOE; - } else if (I40E_MINIMUM_FCOE <= queues_left) { - pf->num_fcoe_qps = I40E_MINIMUM_FCOE; - } else { - pf->num_fcoe_qps = 0; - pf->flags &= ~I40E_FLAG_FCOE_ENABLED; - dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); - } - - queues_left -= pf->num_fcoe_qps; - } - -#endif if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (queues_left > 1) { queues_left -= 1; /* save 1 queue for FD */ @@ -10909,9 +10849,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); -#ifdef I40E_FCOE - dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); -#endif } /** @@ -10978,10 +10915,6 @@ static void i40e_print_features(struct i40e_pf *pf) i += snprintf(&buf[i], REMAIN(i), " Geneve"); if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); -#ifdef I40E_FCOE - if (pf->flags & I40E_FLAG_FCOE_ENABLED) - i += snprintf(&buf[i], REMAIN(i), " FCOE"); -#endif if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i += snprintf(&buf[i], REMAIN(i), " VEB"); else @@ -10994,20 +10927,18 @@ static void i40e_print_features(struct i40e_pf *pf) /** * i40e_get_platform_mac_addr - get platform-specific MAC address - * * @pdev: PCI device information struct * @pf: board private structure * - * Look up the MAC address in Open Firmware on systems that support it, - * and use IDPROM on SPARC if no OF address is found. On return, the - * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value - * has been selected. + * Look up the MAC address for the device. First we'll try + * eth_platform_get_mac_address, which will check Open Firmware, or arch + * specific fallback. Otherwise, we'll default to the stored value in + * firmware. **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { - pf->flags &= ~I40E_FLAG_PF_MAC; - if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) - pf->flags |= I40E_FLAG_PF_MAC; + if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) + i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); } /** @@ -11072,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } pf->next_vsi = 0; pf->pdev = pdev; - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); hw = &pf->hw; hw->back = pf; @@ -11098,6 +11029,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; + INIT_LIST_HEAD(&pf->l3_flex_pit_list); + INIT_LIST_HEAD(&pf->l4_flex_pit_list); + /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ @@ -11196,8 +11130,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, - hw->func_caps.num_rx_qp, - pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); + hw->func_caps.num_rx_qp, 0, 0); if (err) { dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); goto err_init_lan_hmc; @@ -11219,9 +11152,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_aq_stop_lldp(hw, true, NULL); } - i40e_get_mac_addr(hw, hw->mac.addr); /* allow a platform config to override the HW addr */ i40e_get_platform_mac_addr(pdev, pf); + if (!is_valid_ether_addr(hw->mac.addr)) { dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); err = -EIO; @@ -11232,18 +11165,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_get_port_mac_addr(hw, hw->mac.port_addr); if (is_valid_ether_addr(hw->mac.port_addr)) pf->flags |= I40E_FLAG_PORT_ID_VALID; -#ifdef I40E_FCOE - err = i40e_get_san_mac_addr(hw, hw->mac.san_addr); - if (err) - dev_info(&pdev->dev, - "(non-fatal) SAN MAC retrieval failed: %d\n", err); - if (!is_valid_ether_addr(hw->mac.san_addr)) { - dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n", - hw->mac.san_addr); - ether_addr_copy(hw->mac.san_addr, hw->mac.addr); - } - dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); -#endif /* I40E_FCOE */ pci_set_drvdata(pdev, pf); pci_save_state(pdev); @@ -11261,8 +11182,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); - clear_bit(__I40E_SERVICE_SCHED, &pf->state); - pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; + clear_bit(__I40E_SERVICE_SCHED, pf->state); /* NVM bit on means WoL disabled for the port */ i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); @@ -11300,7 +11220,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && - !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + !test_bit(__I40E_BAD_EEPROM, pf->state)) { if (pci_num_vf(pdev)) pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; } @@ -11373,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * before setting up the misc vector or we get a race and the vector * ends up disabled forever. */ - clear_bit(__I40E_DOWN, &pf->state); + clear_bit(__I40E_VSI_DOWN, pf->state); /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI @@ -11393,7 +11313,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && - !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + !test_bit(__I40E_BAD_EEPROM, pf->state)) { /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; @@ -11434,16 +11354,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) round_jiffies(jiffies + pf->service_timer_period)); /* add this PF to client device list and launch a client service task */ - err = i40e_lan_add_device(pf); - if (err) - dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", - err); - -#ifdef I40E_FCOE - /* create FCoE interface */ - i40e_fcoe_vsi_setup(pf); + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + err = i40e_lan_add_device(pf); + if (err) + dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", + err); + } -#endif #define PCI_SPEED_SIZE 8 #define PCI_WIDTH_SIZE 8 /* Devices on the IOSF bus do not have this information @@ -11531,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Unwind what we've done if something failed in the setup */ err_vsis: - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); i40e_clear_interrupt_scheme(pf); kfree(pf->vsi); err_switch_setup: @@ -11582,13 +11499,18 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); if (pf->service_timer.data) del_timer_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; @@ -11615,10 +11537,11 @@ static void i40e_remove(struct pci_dev *pdev) i40e_vsi_release(pf->vsi[pf->lan_vsi]); /* remove attached clients */ - ret_code = i40e_lan_del_device(pf); - if (ret_code) { - dev_warn(&pdev->dev, "Failed to delete client device: %d\n", - ret_code); + if (pf->flags & I40E_FLAG_IWARP_ENABLED) { + ret_code = i40e_lan_del_device(pf); + if (ret_code) + dev_warn(&pdev->dev, "Failed to delete client device: %d\n", + ret_code); } /* shutdown and destroy the HMC */ @@ -11685,9 +11608,9 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, } /* shutdown all operations */ - if (!test_bit(__I40E_SUSPENDED, &pf->state)) { + if (!test_bit(__I40E_SUSPENDED, pf->state)) { rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); } @@ -11752,11 +11675,11 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) struct i40e_pf *pf = pci_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); - if (test_bit(__I40E_SUSPENDED, &pf->state)) + if (test_bit(__I40E_SUSPENDED, pf->state)) return; rtnl_lock(); - i40e_handle_reset_warning(pf); + i40e_handle_reset_warning(pf, true); rtnl_unlock(); } @@ -11816,10 +11739,10 @@ static void i40e_shutdown(struct pci_dev *pdev) struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); @@ -11829,11 +11752,16 @@ static void i40e_shutdown(struct pci_dev *pdev) cancel_work_sync(&pf->service_task); i40e_fdir_teardown(pf); + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, @@ -11860,14 +11788,14 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) struct i40e_hw *hw = &pf->hw; int retval = 0; - set_bit(__I40E_SUSPENDED, &pf->state); - set_bit(__I40E_DOWN, &pf->state); + set_bit(__I40E_SUSPENDED, pf->state); + set_bit(__I40E_VSI_DOWN, pf->state); if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); rtnl_lock(); - i40e_prep_for_reset(pf); + i40e_prep_for_reset(pf, true); rtnl_unlock(); wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); @@ -11912,10 +11840,10 @@ static int i40e_resume(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); /* handling the reset will rebuild the device state */ - if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { - clear_bit(__I40E_DOWN, &pf->state); + if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { + clear_bit(__I40E_VSI_DOWN, pf->state); rtnl_lock(); - i40e_reset_and_rebuild(pf, false); + i40e_reset_and_rebuild(pf, false, true); rtnl_unlock(); } |