summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40evf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h108
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c112
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
7 files changed, 311 insertions, 112 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 6c31bf22c2c3..60f04e96a80e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -148,7 +148,7 @@ static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index ff1b16370da9..e715bccfb5d2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -268,6 +268,8 @@ enum i40e_admin_queue_opc {
/* OEM commands */
i40e_aqc_opc_oem_parameter_change = 0xFE00,
i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
/* debug commands */
i40e_aqc_opc_debug_get_deviceid = 0xFF00,
@@ -276,7 +278,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_write_reg = 0xFF04,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
- i40e_aqc_opc_debug_modify_internals = 0xFF09,
};
/* command structures and indirect data structures */
@@ -410,6 +411,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_VSI 0x0017
#define I40E_AQ_CAP_ID_DCB 0x0018
#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_ISCSI 0x0022
#define I40E_AQ_CAP_ID_RSS 0x0040
#define I40E_AQ_CAP_ID_RXQ 0x0041
#define I40E_AQ_CAP_ID_TXQ 0x0042
@@ -454,8 +456,11 @@ struct i40e_aqc_arp_proxy_data {
__le32 pfpm_proxyfc;
__le32 ip_addr;
u8 mac_addr[6];
+ u8 reserved[2];
};
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
/* Set NS Proxy Table Entry Command (indirect 0x0105) */
struct i40e_aqc_ns_proxy_data {
__le16 table_idx_mac_addr_0;
@@ -481,6 +486,8 @@ struct i40e_aqc_ns_proxy_data {
u8 ipv6_addr_1[16];
};
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
/* Manage LAA Command (0x0106) - obsolete */
struct i40e_aqc_mng_laa {
__le16 command_flags;
@@ -491,6 +498,8 @@ struct i40e_aqc_mng_laa {
u8 reserved2[6];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
/* Manage MAC Address Read Command (indirect 0x0107) */
struct i40e_aqc_mac_address_read {
__le16 command_flags;
@@ -562,6 +571,8 @@ struct i40e_aqc_get_switch_config_header_resp {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
struct i40e_aqc_switch_config_element_resp {
u8 element_type;
#define I40E_AQ_SW_ELEM_TYPE_MAC 1
@@ -587,6 +598,8 @@ struct i40e_aqc_switch_config_element_resp {
__le16 element_info;
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
/* Get Switch Configuration (indirect 0x0200)
* an array of elements are returned in the response buffer
* the first in the array is the header, remainder are elements
@@ -596,6 +609,8 @@ struct i40e_aqc_get_switch_config_resp {
struct i40e_aqc_switch_config_element_resp element[1];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
/* Add Statistics (direct 0x0201)
* Remove Statistics (direct 0x0202)
*/
@@ -661,6 +676,8 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
u8 reserved2[6];
};
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -1092,6 +1109,8 @@ struct i40e_aqc_remove_tag {
u8 reserved[12];
};
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
/* Add multicast E-Tag (direct 0x0257)
* del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
* and no external data
@@ -1207,7 +1226,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
@@ -1240,7 +1259,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \
I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
u8 reserved2[14];
/* response section */
@@ -1359,6 +1378,8 @@ struct i40e_aqc_configure_vsi_ets_sla_bw_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
* responds with i40e_aqc_qs_handles_resp
*/
@@ -1370,6 +1391,8 @@ struct i40e_aqc_configure_vsi_tc_bw_data {
__le16 qs_handles[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
/* Query vsi bw configuration (indirect 0x0408) */
struct i40e_aqc_query_vsi_bw_config_resp {
u8 tc_valid_bits;
@@ -1383,6 +1406,8 @@ struct i40e_aqc_query_vsi_bw_config_resp {
u8 reserved3[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
struct i40e_aqc_query_vsi_ets_sla_config_resp {
u8 tc_valid_bits;
@@ -1394,6 +1419,8 @@ struct i40e_aqc_query_vsi_ets_sla_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
struct i40e_aqc_configure_switching_comp_bw_limit {
__le16 seid;
@@ -1421,6 +1448,8 @@ struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved2[96];
};
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 tc_valid_bits;
@@ -1432,6 +1461,9 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
*/
@@ -1443,6 +1475,8 @@ struct i40e_aqc_configure_switching_comp_bw_config_data {
u8 reserved1[20];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
/* Query Switching Component Configuration (indirect 0x0418) */
struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 tc_valid_bits;
@@ -1453,6 +1487,8 @@ struct i40e_aqc_query_switching_comp_ets_config_resp {
u8 reserved2[23];
};
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
struct i40e_aqc_query_port_ets_config_resp {
u8 reserved[4];
@@ -1468,6 +1504,8 @@ struct i40e_aqc_query_port_ets_config_resp {
u8 reserved3[32];
};
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
/* Query Switching Component Bandwidth Allocation per Traffic Type
* (indirect 0x041A)
*/
@@ -1482,6 +1520,8 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
__le16 tc_bw_max[2];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
/* Suspend/resume port TX traffic
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
@@ -1495,6 +1535,8 @@ struct i40e_aqc_configure_partition_bw_data {
u8 max_bw[16]; /* bandwidth limit */
};
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1577,6 +1619,8 @@ struct i40e_aqc_module_desc {
u8 reserved2[8];
};
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
struct i40e_aq_get_phy_abilities_resp {
__le32 phy_type; /* bitmap using the above enum for offsets */
u8 link_speed; /* bitmap using the above enum bit patterns */
@@ -1605,6 +1649,8 @@ struct i40e_aq_get_phy_abilities_resp {
struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
};
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
/* Set PHY Config (direct 0x0601) */
struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 phy_type;
@@ -1788,12 +1834,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
/* NVM Config Read (indirect 0x0704) */
struct i40e_aqc_nvm_config_read {
__le16 cmd_flags;
-#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
-#define ANVM_READ_SINGLE_FEATURE 0
-#define ANVM_READ_MULTIPLE_FEATURES 1
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
- u8 reserved[2];
+ __le16 element_id; /* Feature/field ID */
+ __le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
};
@@ -1811,21 +1857,32 @@ struct i40e_aqc_nvm_config_write {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
- __le16 instance_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10
__le16 feature_options;
__le16 feature_selection;
};
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
struct i40e_aqc_nvm_config_data_immediate_field {
-#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
- __le16 field_id;
- __le16 instance_id;
+ __le32 field_id;
+ __le32 field_value;
__le16 field_options;
- __le16 field_value;
+ __le16 reserved;
};
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -2082,7 +2139,8 @@ struct i40e_aqc_oem_param_change {
#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
#define I40E_AQ_OEM_PARAM_MAC 2
__le32 param_value1;
- u8 param_value2[8];
+ __le16 param_value2;
+ u8 reserved[6];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
@@ -2096,6 +2154,28 @@ struct i40e_aqc_oem_state_change {
I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocsd_memory_block_addr_high;
+ __le32 ocsd_memory_block_addr_low;
+ __le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+ u8 type_status;
+ u8 reserved1[3];
+ __le32 ocbb_memory_block_addr_high;
+ __le32 ocbb_memory_block_addr_low;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
/* debug commands */
/* get device id (0xFF00) uses the generic structure */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 04c7c1557a0c..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
@@ -162,35 +184,21 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
- !(tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
+#define WB_STRIDE 0x3
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
@@ -293,6 +301,14 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ if (budget &&
+ !((i & WB_STRIDE) == WB_STRIDE) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ else
+ tx_ring->arm_wb = false;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -344,6 +360,24 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
/**
+ * i40e_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+ /* allow 00 to be written to the index */
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+ val);
+}
+
+/**
* i40e_set_new_dynamic_itr - Find new ITR level
* @rc: structure containing ring performance data
*
@@ -568,6 +602,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
goto err;
+ u64_stats_init(&rx_ring->syncp);
+
/* Round up to nearest 4K */
rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
@@ -1065,6 +1101,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true;
+ bool arm_wb = false;
int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1075,8 +1112,10 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx)
+ i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ arm_wb |= ring->arm_wb;
+ }
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
@@ -1087,8 +1126,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ if (arm_wb)
+ i40e_force_wb(vsi, q_vector);
return budget;
+ }
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
@@ -1122,8 +1164,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
u32 tx_flags = 0;
/* if we have a HW VLAN tag being added, default to the HW one */
- if (vlan_tx_tag_present(skb)) {
- tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
@@ -1170,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1238,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1254,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -1344,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1618,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c7f29626eada..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
@@ -238,6 +239,7 @@ struct i40e_ring {
u8 atr_count;
bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 68aec11f6523..3d0fdaab5cc8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -211,6 +211,7 @@ struct i40e_hw_capabilities {
bool evb_802_1_qbh; /* Bridge Port Extension */
bool dcb;
bool fcoe;
+ bool iscsi; /* Indicates iSCSI enabled */
bool mfp_mode_1;
bool mgmt_cem;
bool ieee_1588;
@@ -425,7 +426,7 @@ struct i40e_hw {
u8 __iomem *hw_addr;
void *back;
- /* function pointer structs */
+ /* subsystem structs */
struct i40e_phy_info phy;
struct i40e_mac_info mac;
struct i40e_bus_info bus;
@@ -452,6 +453,11 @@ struct i40e_hw {
u8 pf_id;
u16 main_vsi_seid;
+ /* for multi-function MACs */
+ u16 partition_id;
+ u16 num_partitions;
+ u16 num_ports;
+
/* Closest numa node to the device */
u16 numa_node;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index cabaf599f562..8d8c201c63c1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver";
-#define DRV_VERSION "1.0.6"
+#define DRV_VERSION "1.2.0"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -313,10 +313,6 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_VFINT_DYN_CTL01, val);
- /* re-enable interrupt causes */
- wr32(hw, I40E_VFINT_ICR0_ENA1, ena_mask);
- wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK);
-
/* schedule work on the private workqueue */
schedule_work(&adapter->adminq_task);
@@ -947,30 +943,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
}
/**
- * i40evf_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_clean_rx_ring(adapter->rx_rings[i]);
-}
-
-/**
- * i40evf_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
- **/
-static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_active_queues; i++)
- i40evf_clean_tx_ring(adapter->tx_rings[i]);
-}
-
-/**
* i40e_down - Shutdown the connection processing
* @adapter: board private structure
**/
@@ -982,6 +954,12 @@ void i40evf_down(struct i40evf_adapter *adapter)
if (adapter->state == __I40EVF_DOWN)
return;
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section))
+ usleep_range(500, 1000);
+
+ i40evf_irq_disable(adapter);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -992,25 +970,27 @@ void i40evf_down(struct i40evf_adapter *adapter)
}
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ /* cancel any current operation */
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+ adapter->aq_pending = 0;
+ /* Schedule operations to close down the HW. Don't wait
+ * here for this to complete. The watchdog is still running
+ * and it will take care of this.
+ */
+ adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
- /* disable receives */
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
}
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
- i40evf_irq_disable(adapter);
-
i40evf_napi_disable_all(adapter);
- netif_carrier_off(netdev);
+ msleep(20);
- i40evf_clean_all_tx_rings(adapter);
- i40evf_clean_all_rx_rings(adapter);
+ netif_carrier_off(netdev);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
/**
@@ -1356,8 +1336,13 @@ static void i40evf_watchdog_task(struct work_struct *work)
/* Process admin queue tasks. After init, everything gets done
* here so we don't race on the admin queue.
*/
- if (adapter->aq_pending)
+ if (adapter->aq_pending) {
+ if (!i40evf_asq_done(hw)) {
+ dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n");
+ i40evf_send_api_ver(adapter);
+ }
goto watchdog_done;
+ }
if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) {
i40evf_map_queues(adapter);
@@ -1401,11 +1386,14 @@ static void i40evf_watchdog_task(struct work_struct *work)
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
-
- i40evf_irq_enable(adapter, true);
- i40evf_fire_sw_int(adapter, 0xFF);
-
watchdog_done:
+ if (adapter->state == __I40EVF_RUNNING) {
+ i40evf_irq_enable_queues(adapter, ~0);
+ i40evf_fire_sw_int(adapter, 0xFF);
+ } else {
+ i40evf_fire_sw_int(adapter, 0x1);
+ }
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
restart_watchdog:
if (adapter->state == __I40EVF_REMOVE)
@@ -1633,17 +1621,17 @@ static void i40evf_adminq_task(struct work_struct *work)
u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
- return;
+ goto out;
event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
- return;
+ goto out;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
- if (ret)
+ if (ret || !v_msg->v_opcode)
break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode,
@@ -1688,10 +1676,10 @@ static void i40evf_adminq_task(struct work_struct *work)
if (oldval != val)
wr32(hw, hw->aq.asq.len, val);
+ kfree(event.msg_buf);
+out:
/* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter);
-
- kfree(event.msg_buf);
}
/**
@@ -2053,12 +2041,8 @@ static void i40evf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
- err);
- if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- dev_info(&pdev->dev, "Resending request\n");
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
err = i40evf_send_api_ver(adapter);
- }
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
@@ -2081,7 +2065,6 @@ static void i40evf_init_task(struct work_struct *work)
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
- dev_info(&pdev->dev, "Resending VF config request\n");
err = i40evf_send_vf_config_msg(adapter);
goto err;
}
@@ -2230,12 +2213,18 @@ err:
static void i40evf_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (netif_running(netdev))
i40evf_close(netdev);
+ /* Prevent the watchdog from running. */
+ adapter->state = __I40EVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->aq_pending = 0;
+
#ifdef CONFIG_PM
pci_save_state(pdev);
@@ -2448,7 +2437,18 @@ static void i40evf_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
adapter->netdev_registered = false;
}
+
+ /* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
+ adapter->aq_required = 0;
+ adapter->aq_pending = 0;
+ i40evf_request_reset(adapter);
+ msleep(20);
+ /* If the FW isn't responding, kick it once, but only once. */
+ if (!i40evf_asq_done(hw)) {
+ i40evf_request_reset(adapter);
+ msleep(20);
+ }
if (adapter->msix_entries) {
i40evf_misc_irq_disable(adapter);
@@ -2477,6 +2477,10 @@ static void i40evf_remove(struct pci_dev *pdev)
list_del(&f->list);
kfree(f);
}
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ list_del(&f->list);
+ kfree(f);
+ }
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 5fde5a7f4591..3f0c85ecbca6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -715,14 +715,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
return;
}
- if (v_opcode != adapter->current_op)
- dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n",
- adapter->current_op, v_opcode);
if (v_retval) {
dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ /* no action, but also not an error */
+ break;
case I40E_VIRTCHNL_OP_GET_STATS: {
struct i40e_eth_stats *stats =
(struct i40e_eth_stats *)msg;