summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c156
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h46
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h87
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c260
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c3
13 files changed, 425 insertions, 205 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 44f7ed7583dd..96385156b824 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -912,11 +912,11 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 40b0eafd0c71..eeb9864bc5b1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1639,6 +1639,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1647,6 +1651,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1654,7 +1659,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1677,6 +1683,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1687,7 +1695,22 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1709,7 +1732,20 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1789,9 +1825,18 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 external_power_ability;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7953c13451b9..aa63b7fb993d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -53,6 +53,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_SFP_X722:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index 70235706915e..21dcaee1ad1d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -39,6 +39,8 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index d89d52109efa..ba6c6bda0e22 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -115,6 +115,10 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 75f2a2cdd738..df67ef37b7f3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0;
}
-#define WB_STRIDE 0x3
+#define WB_STRIDE 4
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget &&
- ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+ ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
@@ -705,7 +705,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
- rx_desc->read.hdr_addr = 0;
rx_desc++;
bi++;
@@ -1209,7 +1208,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
while (likely(total_rx_packets < budget)) {
union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
- u32 rx_status;
u16 vlan_tag;
u8 rx_ptype;
u64 qword;
@@ -1223,21 +1221,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
-
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
* which is always zero because packet split isn't used, if the
* hardware wrote DD then it will be non-zero
*/
- if (!rx_desc->wb.qword1.status_error_len)
+ if (!i40e_test_staterr(rx_desc,
+ BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
/* This memory barrier is needed to keep us from reading
@@ -1271,6 +1261,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+
/* populate checksum, VLAN, and protocol */
i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
@@ -1461,12 +1455,24 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
+ const cpumask_t *aff_mask = &q_vector->affinity_mask;
+ int cpu_id = smp_processor_id();
+
+ /* It is possible that the interrupt affinity has changed but,
+ * if the cpu is pegged at 100%, polling will never exit while
+ * traffic continues and the interrupt will be stuck on this
+ * cpu. We check to make sure affinity is correct before we
+ * continue to poll, otherwise we must stop polling so the
+ * interrupt can move to the correct cpu.
+ */
+ if (likely(cpumask_test_cpu(cpu_id, aff_mask))) {
tx_only:
- if (arm_wb) {
- q_vector->tx.ring[0].tx_stats.tx_force_wb++;
- i40e_enable_wb_on_itr(vsi, q_vector);
+ if (arm_wb) {
+ q_vector->tx.ring[0].tx_stats.tx_force_wb++;
+ i40e_enable_wb_on_itr(vsi, q_vector);
+ }
+ return budget;
}
- return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
@@ -1474,8 +1480,17 @@ tx_only:
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete_done(napi, work_done);
- i40e_update_enable_itr(vsi, q_vector);
- return 0;
+
+ /* If we're prematurely stopping polling to fix the interrupt
+ * affinity we want to make sure polling starts back up so we
+ * issue a call to i40evf_force_wb which triggers a SW interrupt.
+ */
+ if (!clean_complete)
+ i40evf_force_wb(vsi, q_vector);
+ else
+ i40e_update_enable_itr(vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/**
@@ -1935,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0;
dma_addr_t dma;
u16 gso_segs;
- u16 desc_count = 0;
- bool tail_bump = true;
- bool do_rs = false;
+ u16 desc_count = 1;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
@@ -2020,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i];
}
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i++;
if (i == tx_ring->count)
@@ -2029,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ /* write last descriptor with EOP bit */
+ td_cmd |= I40E_TX_DESC_CMD_EOP;
+
+ /* We can OR these values together as they both are checked against
+ * 4 below and at this point desc_count will be used as a boolean value
+ * after this if/else block.
+ */
+ desc_count |= ++tx_ring->packet_stride;
+
/* Algorithm to optimize tail and RS bit setting:
- * if xmit_more is supported
- * if xmit_more is true
- * do not update tail and do not mark RS bit.
- * if xmit_more is false and last xmit_more was false
- * if every packet spanned less than 4 desc
- * then set RS bit on 4th packet and update tail
- * on every packet
- * else
- * update tail and set RS bit on every packet.
- * if xmit_more is false and last_xmit_more was true
- * update tail and set RS bit.
+ * if queue is stopped
+ * mark RS bit
+ * reset packet counter
+ * else if xmit_more is supported and is true
+ * advance packet counter to 4
+ * reset desc_count to 0
*
- * Optimization: wmb to be issued only in case of tail update.
- * Also optimize the Descriptor WB path for RS bit with the same
- * algorithm.
+ * if desc_count >= 4
+ * mark RS bit
+ * reset packet counter
+ * if desc_count > 0
+ * update tail
*
- * Note: If there are less than 4 packets
+ * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
- if (skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring))) {
- tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- tail_bump = false;
- } else if (!skb->xmit_more &&
- !netif_xmit_stopped(txring_txq(tx_ring)) &&
- (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
- (tx_ring->packet_stride < WB_STRIDE) &&
- (desc_count < WB_STRIDE)) {
- tx_ring->packet_stride++;
- } else {
+ if (netif_xmit_stopped(txring_txq(tx_ring))) {
+ goto do_rs;
+ } else if (skb->xmit_more) {
+ /* set stride to arm on next packet and reset desc_count */
+ tx_ring->packet_stride = WB_STRIDE;
+ desc_count = 0;
+ } else if (desc_count >= WB_STRIDE) {
+do_rs:
+ /* write last descriptor with RS bit set */
+ td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
- tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
- do_rs = true;
}
- if (do_rs)
- tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
- build_ctob(td_cmd, td_offset, size, td_tag) |
- cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
- I40E_TX_DESC_CMD_EOP) <<
- I40E_TXD_QW1_CMD_SHIFT);
+ build_ctob(td_cmd, td_offset, size, td_tag);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ *
+ * We also use this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (!tail_bump) {
- prefetchw(tx_desc + 1);
- } else {
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ if (desc_count) {
writel(i, tx_ring->tail);
+
+ /* we need this if more than one processor can write to our tail
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
+ mmiowb();
}
+
return;
dma_error:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index abcdecabbc56..a5fc789f78eb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -173,26 +173,37 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
#define I40E_MAX_DATA_PER_TXD_ALIGNED \
(I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
-/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
- * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
- * that 12K is not a power of 2 and division is expensive. It is used to
- * approximate the number of descriptors used per linear buffer. Note
- * that this will overestimate in some cases as it doesn't account for the
- * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
- * the error should not impact things much as large buffers usually mean
- * we will use fewer descriptors then there are frags in an skb.
+/**
+ * i40e_txd_use_count - estimate the number of descriptors needed for Tx
+ * @size: transmit request size in bytes
+ *
+ * Due to hardware alignment restrictions (4K alignment), we need to
+ * assume that we can have no more than 12K of data per descriptor, even
+ * though each descriptor can take up to 16K - 1 bytes of aligned memory.
+ * Thus, we need to divide by 12K. But division is slow! Instead,
+ * we decompose the operation into shifts and one relatively cheap
+ * multiply operation.
+ *
+ * To divide by 12K, we first divide by 4K, then divide by 3:
+ * To divide by 4K, shift right by 12 bits
+ * To divide by 3, multiply by 85, then divide by 256
+ * (Divide by 256 is done by shifting right by 8 bits)
+ * Finally, we add one to round up. Because 256 isn't an exact multiple of
+ * 3, we'll underestimate near each multiple of 12K. This is actually more
+ * accurate as we have 4K - 1 of wiggle room that we can fit into the last
+ * segment. For our purposes this is accurate out to 1M which is orders of
+ * magnitude greater than our largest possible GSO size.
+ *
+ * This would then be implemented as:
+ * return (((size >> 12) * 85) >> 8) + 1;
+ *
+ * Since multiplication and division are commutative, we can reorder
+ * operations into:
+ * return ((size * 85) >> 20) + 1;
*/
static inline unsigned int i40e_txd_use_count(unsigned int size)
{
- const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
- const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
- unsigned int adjust = ~(u32)0;
-
- /* if we rounded up on the reciprocal pull down the adjustment */
- if ((max * reciprocal) > adjust)
- adjust = ~(u32)(reciprocal - 1);
-
- return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+ return ((size * 85) >> 20) + 1;
}
/* Tx Descriptors needed, worst case */
@@ -309,7 +320,6 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
u8 packet_stride;
-#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 97f96e0d9c4c..c85e8a31c072 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -187,47 +187,59 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
- BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- enum i40e_aq_capabilities_phy_type phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -237,6 +249,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -348,6 +364,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index bd691ad86673..fc374f833aa9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -162,6 +162,10 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index c5fd724313c7..fffe4cf2c20b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -107,7 +107,8 @@ struct i40e_q_vector {
int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
bool arm_wb_state;
- cpumask_var_t affinity_mask;
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
};
/* Helper macros to switch between ints/sec and what the register uses.
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index a9940154eead..272d600c1ed0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -85,6 +85,14 @@ static int i40evf_get_settings(struct net_device *netdev,
case I40E_LINK_SPEED_40GB:
ethtool_cmd_speed_set(ecmd, SPEED_40000);
break;
+ case I40E_LINK_SPEED_25GB:
+#ifdef SPEED_25000
+ ethtool_cmd_speed_set(ecmd, SPEED_25000);
+#else
+ netdev_info(netdev,
+ "Speed is 25G, display not supported by this version of ethtool.\n");
+#endif
+ break;
case I40E_LINK_SPEED_20GB:
ethtool_cmd_speed_set(ecmd, SPEED_20000);
break;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 14372810fc27..c0fc53361800 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 16
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -207,6 +207,9 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
+ if (!adapter->msix_entries)
+ return;
+
wr32(hw, I40E_VFINT_DYN_CTL01, 0);
/* read flush */
@@ -496,6 +499,33 @@ static void i40evf_netpoll(struct net_device *netdev)
#endif
/**
+ * i40evf_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void i40evf_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(notify, struct i40e_q_vector, affinity_notify);
+
+ q_vector->affinity_mask = *mask;
+}
+
+/**
+ * i40evf_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void i40evf_irq_affinity_release(struct kref *ref) {}
+
+/**
* i40evf_request_traffic_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
*
@@ -507,6 +537,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
{
int vector, err, q_vectors;
int rx_int_idx = 0, tx_int_idx = 0;
+ int irq_num;
i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -514,6 +545,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
+ irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
@@ -532,21 +564,23 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
/* skip this unused q_vector */
continue;
}
- err = request_irq(
- adapter->msix_entries[vector + NONQ_VECS].vector,
- i40evf_msix_clean_rings,
- 0,
- q_vector->name,
- q_vector);
+ err = request_irq(irq_num,
+ i40evf_msix_clean_rings,
+ 0,
+ q_vector->name,
+ q_vector);
if (err) {
dev_info(&adapter->pdev->dev,
"Request_irq failed, error: %d\n", err);
goto free_queue_irqs;
}
+ /* register for affinity change notifications */
+ q_vector->affinity_notify.notify = i40evf_irq_affinity_notify;
+ q_vector->affinity_notify.release =
+ i40evf_irq_affinity_release;
+ irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* assign the mask for this irq */
- irq_set_affinity_hint(
- adapter->msix_entries[vector + NONQ_VECS].vector,
- q_vector->affinity_mask);
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
return 0;
@@ -554,11 +588,10 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(
- adapter->msix_entries[vector + NONQ_VECS].vector,
- NULL);
- free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
- &adapter->q_vectors[vector]);
+ irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
}
@@ -599,16 +632,18 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
**/
static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
{
- int i;
- int q_vectors;
+ int vector, irq_num, q_vectors;
+
+ if (!adapter->msix_entries)
+ return;
q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- for (i = 0; i < q_vectors; i++) {
- irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
- NULL);
- free_irq(adapter->msix_entries[i+1].vector,
- &adapter->q_vectors[i]);
+ for (vector = 0; vector < q_vectors; vector++) {
+ irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
+ irq_set_affinity_notifier(irq_num, NULL);
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
@@ -622,6 +657,9 @@ static void i40evf_free_misc_irq(struct i40evf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ if (!adapter->msix_entries)
+ return;
+
free_irq(adapter->msix_entries[0].vector, netdev);
}
@@ -1396,6 +1434,9 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
int q_idx, num_q_vectors;
int napi_vectors;
+ if (!adapter->q_vectors)
+ return;
+
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
napi_vectors = adapter->num_active_queues;
@@ -1405,6 +1446,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
netif_napi_del(&q_vector->napi);
}
kfree(adapter->q_vectors);
+ adapter->q_vectors = NULL;
}
/**
@@ -1414,6 +1456,9 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
**/
void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
{
+ if (!adapter->msix_entries)
+ return;
+
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -1664,6 +1709,49 @@ restart_watchdog:
schedule_work(&adapter->adminq_task);
}
+static void i40evf_disable_vf(struct i40evf_adapter *adapter)
+{
+ struct i40evf_mac_filter *f, *ftmp;
+ struct i40evf_vlan_filter *fv, *fvtmp;
+
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ netif_carrier_off(adapter->netdev);
+ netif_tx_disable(adapter->netdev);
+ adapter->link_up = false;
+ i40evf_napi_disable_all(adapter);
+ i40evf_irq_disable(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
+
+ /* Delete all of the filters, both MAC and VLAN. */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
+ list_del(&fv->list);
+ kfree(fv);
+ }
+
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ i40evf_free_q_vectors(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(&adapter->hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ adapter->state = __I40EVF_DOWN;
+ dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
+}
+
#define I40EVF_RESET_WAIT_MS 10
#define I40EVF_RESET_WAIT_COUNT 500
/**
@@ -1717,60 +1805,21 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ /* sleep first to make sure a minimum wait time is met */
+ msleep(I40EVF_RESET_WAIT_MS);
+
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE)
break;
- msleep(I40EVF_RESET_WAIT_MS);
}
+
pci_set_master(adapter->pdev);
- /* extra wait to make sure minimum wait is met */
- msleep(I40EVF_RESET_WAIT_MS);
- if (i == I40EVF_RESET_WAIT_COUNT) {
- struct i40evf_mac_filter *ftmp;
- struct i40evf_vlan_filter *fv, *fvtmp;
- /* reset never finished */
+ if (i == I40EVF_RESET_WAIT_COUNT) {
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
- adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
-
- if (netif_running(adapter->netdev)) {
- set_bit(__I40E_DOWN, &adapter->vsi.state);
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- adapter->link_up = false;
- i40evf_napi_disable_all(adapter);
- i40evf_irq_disable(adapter);
- i40evf_free_traffic_irqs(adapter);
- i40evf_free_all_tx_resources(adapter);
- i40evf_free_all_rx_resources(adapter);
- }
-
- /* Delete all of the filters, both MAC and VLAN. */
- list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
- list) {
- list_del(&f->list);
- kfree(f);
- }
-
- list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
- list) {
- list_del(&fv->list);
- kfree(fv);
- }
-
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_queues(adapter);
- i40evf_free_q_vectors(adapter);
- kfree(adapter->vf_res);
- i40evf_shutdown_adminq(hw);
- adapter->netdev->flags &= ~IFF_UP;
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
- adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
- adapter->state = __I40EVF_DOWN;
- dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
+ i40evf_disable_vf(adapter);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2133,10 +2182,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev)
static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
-
- if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
- return -EINVAL;
netdev->mtu = new_mtu;
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
@@ -2145,6 +2190,64 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+/**
+ * i40evf_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40evf_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 64 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* MACLEN can support at most 63 words */
+ len = skb_network_header(skb) - skb->data;
+ if (len & ~(63 * 2))
+ goto out_err;
+
+ /* IPLEN and EIPLEN can support at most 127 dwords */
+ len = skb_transport_header(skb) - skb_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+
+ if (skb->encapsulation) {
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (len & ~(127 * 2))
+ goto out_err;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_transport_header(skb) -
+ skb_inner_network_header(skb);
+ if (len & ~(127 * 4))
+ goto out_err;
+ }
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * a flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
+ */
+
+ return features;
+out_err:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
NETIF_F_HW_VLAN_CTAG_RX |\
NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -2179,6 +2282,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+ .ndo_features_check = i40evf_features_check,
.ndo_fix_features = i40evf_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
@@ -2424,6 +2528,10 @@ static void i40evf_init_task(struct work_struct *work)
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
+ /* MTU range: 68 - 9710 */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = I40E_MAX_RXBUFFER - (ETH_HLEN + ETH_FCS_LEN);
+
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
@@ -2764,12 +2872,10 @@ static void i40evf_remove(struct pci_dev *pdev)
msleep(50);
}
- if (adapter->msix_entries) {
- i40evf_misc_irq_disable(adapter);
- i40evf_free_misc_irq(adapter);
- i40evf_reset_interrupt_capability(adapter);
- i40evf_free_q_vectors(adapter);
- }
+ i40evf_misc_irq_disable(adapter);
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_q_vectors(adapter);
if (adapter->watchdog_timer.function)
del_timer_sync(&adapter->watchdog_timer);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index ddf478d6322b..2059a8e88908 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -836,6 +836,9 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
case I40E_LINK_SPEED_40GB:
speed = "40 G";
break;
+ case I40E_LINK_SPEED_25GB:
+ speed = "25 G";
+ break;
case I40E_LINK_SPEED_20GB:
speed = "20 G";
break;