summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40e/i40e_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c115
1 files changed, 90 insertions, 25 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b5dcd15ced36..94feea3b2599 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -419,10 +419,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
@@ -472,10 +472,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
if (!ring)
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -897,10 +897,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -915,10 +915,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
@@ -935,10 +935,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -3694,6 +3694,24 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
}
/**
+ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
+ *
+ * @vsi: VSI to calculate rx_buf_len from
+ */
+static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
+{
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return I40E_RXBUFFER_2048;
+
+#if (PAGE_SIZE < 8192)
+ if (!I40E_2K_TOO_SMALL_WITH_PADDING && vsi->netdev->mtu <= ETH_DATA_LEN)
+ return I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+
+ return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
+}
+
+/**
* i40e_vsi_configure_rx - Configure the VSI for Rx
* @vsi: the VSI being configured
*
@@ -3704,20 +3722,14 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
- vsi->max_frame = I40E_MAX_RXBUFFER;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->max_frame = I40E_MAX_RXBUFFER;
+ vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
+
#if (PAGE_SIZE < 8192)
- } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
+ vsi->netdev->mtu <= ETH_DATA_LEN)
vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
#endif
- } else {
- vsi->max_frame = I40E_MAX_RXBUFFER;
- vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
- I40E_RXBUFFER_2048;
- }
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -4123,6 +4135,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
+ q_vector->irq_num = irq_num;
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
@@ -10655,6 +10668,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
}
/**
+ * i40e_clean_xps_state - clean xps state for every tx_ring
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i])
+ clear_bit(__I40E_TX_XPS_INIT_DONE,
+ vsi->tx_rings[i]->state);
+}
+
+/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -10678,8 +10706,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
i40e_pf_quiesce_all_vsi(pf);
for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
+ if (pf->vsi[v]) {
+ i40e_clean_xps_state(pf->vsi[v]);
pf->vsi[v]->seid = 0;
+ }
}
i40e_shutdown_adminq(&pf->hw);
@@ -12920,6 +12950,29 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
}
/**
+ * i40e_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev) &&
+ !test_and_set_bit(__I40E_VSI_DOWN, vsi->state);
+ int ret;
+
+ if (if_running)
+ i40e_down(vsi);
+
+ ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ i40e_up(vsi);
+
+ return ret;
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -12959,6 +13012,9 @@ static int i40e_set_features(struct net_device *netdev,
if (need_reset)
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+ if ((features ^ netdev->features) & NETIF_F_LOOPBACK)
+ return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+
return 0;
}
@@ -13265,7 +13321,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
int i;
/* Don't allow frames that span over multiple buffers */
- if (frame_size > vsi->rx_buf_len) {
+ if (frame_size > i40e_calculate_vsi_rx_buf_len(vsi)) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
return -EINVAL;
}
@@ -13721,7 +13777,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
- netdev->hw_features |= hw_features;
+ netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
@@ -16644,6 +16700,8 @@ static struct pci_driver i40e_driver = {
**/
static int __init i40e_init_module(void)
{
+ int err;
+
pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
@@ -16661,7 +16719,14 @@ static int __init i40e_init_module(void)
}
i40e_dbg_init();
- return pci_register_driver(&i40e_driver);
+ err = pci_register_driver(&i40e_driver);
+ if (err) {
+ destroy_workqueue(i40e_wq);
+ i40e_dbg_exit();
+ return err;
+ }
+
+ return 0;
}
module_init(i40e_init_module);