diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 105 |
1 files changed, 69 insertions, 36 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d39cba214320..f1dbdf26d8e1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -58,6 +58,7 @@ #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> #include <net/vxlan.h> +#include <net/mpls.h> #include "ixgbe.h" #include "ixgbe_common.h" @@ -75,7 +76,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "5.0.0-k" +#define DRV_VERSION "5.1.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; @@ -1451,7 +1452,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } - /* Fall Through since DCA is disabled. */ + /* fall through - DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); @@ -2232,6 +2233,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, break; default: bpf_warn_invalid_xdp_action(act); + /* fallthrough */ case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); /* fallthrough -- handle aborts by dropping packet */ @@ -2638,8 +2640,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_DOWN, &adapter->state)) return; - if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) return; adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -3105,23 +3106,23 @@ int ixgbe_poll(struct napi_struct *napi, int budget) static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; + unsigned int ri = 0, ti = 0; int vector, err; - int ri = 0, ti = 0; for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "TxRx", ri++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-TxRx-%u", netdev->name, ri++); ti++; } else if (q_vector->rx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "rx", ri++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-rx-%u", netdev->name, ri++); } else if (q_vector->tx.ring) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-%s-%d", netdev->name, "tx", ti++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-tx-%u", netdev->name, ti++); } else { /* skip this unused q_vector */ continue; @@ -3802,6 +3803,9 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) mrqc = IXGBE_MRQC_VMDQRSS32EN; else mrqc = IXGBE_MRQC_VMDQRSS64EN; + + /* Enable L3/L4 for Tx Switched packets */ + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; @@ -4174,7 +4178,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; - /* fall through for older HW */ + /* fall through */ case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ @@ -6183,8 +6187,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) if (!tx_ring->tx_buffer_info) goto err; - u64_stats_init(&tx_ring->syncp); - /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); @@ -6278,8 +6280,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, if (!rx_ring->rx_buffer_info) goto err; - u64_stats_init(&rx_ring->syncp); - /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); @@ -6886,6 +6886,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); + /* fall through */ case ixgbe_mac_82599EB: for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += @@ -7634,6 +7635,7 @@ static void ixgbe_service_task(struct work_struct *work) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { ixgbe_ptp_overflow_check(adapter); ixgbe_ptp_rx_hang(adapter); + ixgbe_ptp_tx_hang(adapter); } ixgbe_service_event_complete(adapter); @@ -7667,7 +7669,10 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (err < 0) return err; - ip.hdr = skb_network_header(skb); + if (eth_p_mpls(first->protocol)) + ip.hdr = skb_inner_network_header(skb); + else + ip.hdr = skb_network_header(skb); l4.hdr = skb_checksum_start(skb); /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ @@ -7871,9 +7876,9 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) -static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *first, - const u8 hdr_len) +static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + const u8 hdr_len) { struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; @@ -8000,7 +8005,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, mmiowb(); } - return; + return 0; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); tx_buffer = &tx_ring->tx_buffer_info[i]; @@ -8030,6 +8035,8 @@ dma_error: first->skb = NULL; tx_ring->next_to_use = i; + + return -1; } static void ixgbe_atr(struct ixgbe_ring *ring, @@ -8205,6 +8212,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) break; + /* fall through */ default: return fallback(dev, skb); } @@ -8330,16 +8338,19 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, protocol = vlan_get_protocol(skb); if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - adapter->ptp_clock && - !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, - &adapter->state)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - tx_flags |= IXGBE_TX_FLAGS_TSTAMP; - - /* schedule check for Tx timestamp */ - adapter->ptp_tx_skb = skb_get(skb); - adapter->ptp_tx_start = jiffies; - schedule_work(&adapter->ptp_tx_work); + adapter->ptp_clock) { + if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + + /* schedule check for Tx timestamp */ + adapter->ptp_tx_skb = skb_get(skb); + adapter->ptp_tx_start = jiffies; + schedule_work(&adapter->ptp_tx_work); + } else { + adapter->tx_hwtstamp_skipped++; + } } skb_tx_timestamp(skb); @@ -8402,13 +8413,21 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #ifdef IXGBE_FCOE xmit_fcoe: #endif /* IXGBE_FCOE */ - ixgbe_tx_map(tx_ring, first, hdr_len); + if (ixgbe_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_timestamp; return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; +cleanup_tx_timestamp: + if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->ptp_tx_work); + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); + } return NETDEV_TX_OK; } @@ -9195,11 +9214,14 @@ free_jump: return err; } -static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) +static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, + __be16 proto, struct tc_to_netdev *tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); + if (chain_index) + return -EOPNOTSUPP; + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && tc->type == TC_SETUP_CLSU32) { switch (tc->cls_u32->command) { @@ -9793,6 +9815,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) return ixgbe_xdp_setup(dev, xdp->prog); case XDP_QUERY_PROG: xdp->prog_attached = !!(adapter->xdp_prog); + xdp->prog_id = adapter->xdp_prog ? + adapter->xdp_prog->aux->id : 0; return 0; default: return -EINVAL; @@ -9929,6 +9953,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, /* only support first port */ if (hw->bus.func != 0) break; + /* fall through */ case IXGBE_SUBDEV_ID_82599_SP_560FLR: case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: @@ -10191,7 +10216,11 @@ skip_sriov: netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->hw_enc_features |= netdev->vlan_features; - netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->mpls_features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM; + netdev->mpls_features |= IXGBE_GSO_PARTIAL_FEATURES; /* set this bit last since it cannot be part of vlan_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | @@ -10275,6 +10304,10 @@ skip_sriov: if (err) goto err_sw_init; + for (i = 0; i < adapter->num_rx_queues; i++) + u64_stats_init(&adapter->rx_ring[i]->syncp); + for (i = 0; i < adapter->num_tx_queues; i++) + u64_stats_init(&adapter->tx_ring[i]->syncp); for (i = 0; i < adapter->num_xdp_queues; i++) u64_stats_init(&adapter->xdp_ring[i]->syncp); @@ -10341,11 +10374,11 @@ skip_sriov: "hardware.\n"); } strcpy(netdev->name, "eth%d"); + pci_set_drvdata(pdev, adapter); err = register_netdev(netdev); if (err) goto err_register; - pci_set_drvdata(pdev, adapter); /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) |