summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c361
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c519
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c979
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c133
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h17
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c608
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c647
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h17
23 files changed, 2564 insertions, 1113 deletions
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index c40581999121..b4729ba57c9c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Driver
-# Copyright(c) 2013 - 2014 Intel Corporation.
+# Copyright(c) 2013 - 2015 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2b65cdcad6ba..33c35d3b7420 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
+#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
@@ -49,6 +50,7 @@
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
@@ -70,6 +72,7 @@
#define I40E_MAX_NUM_DESCRIPTORS 4096
#define I40E_MAX_REGISTER 0x800000
+#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
#define I40E_MIN_NUM_DESCRIPTORS 64
@@ -94,6 +97,9 @@
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9)
+/* Ethtool Private Flags */
+#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0)
+
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
#define I40E_NVM_VERSION_HI_SHIFT 12
@@ -140,6 +146,7 @@ enum i40e_state_t {
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_REQUESTED,
+ __I40E_EMP_RESET_INTR_RECEIVED,
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_PTP_TX_IN_PROGRESS,
@@ -168,6 +175,9 @@ struct i40e_lump_tracking {
#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 32
+#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4)
+
+#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -232,17 +242,17 @@ struct i40e_pf {
bool fc_autoneg_status;
u16 eeprom_version;
- u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
+ u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
- u16 num_req_vfs; /* num vfs requested for this vf */
- u16 num_vf_qps; /* num queue pairs per vf */
+ u16 num_req_vfs; /* num VFs requested for this VF */
+ u16 num_vf_qps; /* num queue pairs per VF */
#ifdef I40E_FCOE
- u16 num_fcoe_qps; /* num fcoe queues this pf has set up */
+ u16 num_fcoe_qps; /* num fcoe queues this PF has set up */
u16 num_fcoe_msix; /* num queue vectors per fcoe pool */
#endif /* I40E_FCOE */
- u16 num_lan_qps; /* num lan queues this pf has set up */
- u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ u16 num_lan_qps; /* num lan queues this PF has set up */
+ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
@@ -269,7 +279,7 @@ struct i40e_pf {
enum i40e_interrupt_policy int_policy;
u16 rx_itr_default;
u16 tx_itr_default;
- u16 msg_enable;
+ u32 msg_enable;
char int_name[I40E_INT_NAME_STR_LEN];
u16 adminq_work_limit; /* num of admin receive queue desc to process */
unsigned long service_timer_period;
@@ -383,6 +393,9 @@ struct i40e_pf {
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size;
+ /* These are only valid in NPAR modes */
+ u32 npar_max_bw;
+ u32 npar_min_bw;
};
struct i40e_mac_filter {
@@ -405,6 +418,7 @@ struct i40e_veb {
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
+ u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
@@ -461,6 +475,9 @@ struct i40e_vsi {
u16 rx_itr_setting;
u16 tx_itr_setting;
+ u16 rss_table_size;
+ u16 rss_size;
+
u16 max_frame;
u16 rx_hdr_len;
u16 rx_buf_len;
@@ -478,6 +495,7 @@ struct i40e_vsi {
u16 base_queue; /* vsi's first queue in hw array */
u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 req_queue_pairs; /* User requested queue pairs */
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
@@ -504,6 +522,9 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ /* current rxnfc data */
+ struct ethtool_rxnfc rxnfc; /* current rss hash opts */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -544,14 +565,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
static char buf[32];
snprintf(buf, sizeof(buf),
- "f%d.%d a%d.%d n%02x.%02x e%08x",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ "f%d.%d.%05d a%d.%d n%x.%02x e%x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
(hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
I40E_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack);
+ (hw->nvm.eetrack & 0xffffff));
return buf;
}
@@ -593,7 +614,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
/**
* i40e_get_fd_cnt_all - get the total FD filter space available
- * @pf: pointer to the pf struct
+ * @pf: pointer to the PF struct
**/
static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
{
@@ -607,6 +628,7 @@ extern const char i40e_driver_name[];
extern const char i40e_driver_version_str[];
void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
@@ -618,9 +640,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
-int i40e_get_current_fd_count(struct i40e_pf *pf);
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
-int i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_current_fd_count(struct i40e_pf *pf);
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
+u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
@@ -680,6 +703,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev,
int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
+int i40e_open(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -690,7 +714,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
-int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
int i40e_setup_tc(struct net_device *netdev, u8 tc);
void i40e_netpoll(struct net_device *netdev);
@@ -712,6 +735,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
void i40e_dcbnl_set_all(struct i40e_vsi *vsi);
void i40e_dcbnl_setup(struct i40e_vsi *vsi);
@@ -727,4 +751,8 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 77f6254a89ac..3e0d20037675 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
ret_code = i40e_aq_get_firmware_version(hw,
&hw->aq.fw_maj_ver,
&hw->aq.fw_min_ver,
+ &hw->aq.fw_build,
&hw->aq.api_maj_ver,
&hw->aq.api_min_ver,
NULL);
@@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_free_arq;
/* get the NVM version info */
- i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+ &hw->nvm.version);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index de17b6fbcc4e..28e519a50de4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -93,6 +93,7 @@ struct i40e_adminq_info {
u16 asq_buf_size; /* send queue buffer size */
u16 fw_maj_ver; /* firmware major version */
u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_release_on_done;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..0bae22da014d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -51,6 +51,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_20G_KR2:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_VF:
@@ -85,46 +86,53 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = le16_to_cpu(aq_desc->datalen);
- u8 *aq_buffer = (u8 *)buffer;
- u32 data[4];
- u32 i = 0;
+ u8 *buf = (u8 *)buffer;
+ u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
- aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
- aq_desc->retval);
+ le16_to_cpu(aq_desc->opcode),
+ le16_to_cpu(aq_desc->flags),
+ le16_to_cpu(aq_desc->datalen),
+ le16_to_cpu(aq_desc->retval));
i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
- aq_desc->cookie_high, aq_desc->cookie_low);
+ le32_to_cpu(aq_desc->cookie_high),
+ le32_to_cpu(aq_desc->cookie_low));
i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
- aq_desc->params.internal.param0,
- aq_desc->params.internal.param1);
+ le32_to_cpu(aq_desc->params.internal.param0),
+ le32_to_cpu(aq_desc->params.internal.param1));
i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
- aq_desc->params.external.addr_high,
- aq_desc->params.external.addr_low);
+ le32_to_cpu(aq_desc->params.external.addr_high),
+ le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
- memset(data, 0, sizeof(data));
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
- for (i = 0; i < len; i++) {
- data[((i % 16) / 4)] |=
- ((u32)aq_buffer[i]) << (8 * (i % 4));
- if ((i % 16) == 15) {
- i40e_debug(hw, mask,
- "\t0x%04X %08X %08X %08X %08X\n",
- i - 15, data[0], data[1], data[2],
- data[3]);
- memset(data, 0, sizeof(data));
- }
+ /* write the full 16-byte chunks */
+ for (i = 0; i < (len - 16); i += 16)
+ i40e_debug(hw, mask,
+ "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ i, buf[i], buf[i + 1], buf[i + 2],
+ buf[i + 3], buf[i + 4], buf[i + 5],
+ buf[i + 6], buf[i + 7], buf[i + 8],
+ buf[i + 9], buf[i + 10], buf[i + 11],
+ buf[i + 12], buf[i + 13], buf[i + 14],
+ buf[i + 15]);
+ /* write whatever's left over without overrunning the buffer */
+ if (i < len) {
+ char d_buf[80];
+ int j = 0;
+
+ memset(d_buf, 0, sizeof(d_buf));
+ j += sprintf(d_buf, "\t0x%04X ", i);
+ while (i < len)
+ j += sprintf(&d_buf[j], " %02X", buf[i++]);
+ i40e_debug(hw, mask, "%s\n", d_buf);
}
- if ((i % 16) != 0)
- i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
- i - (i % 16), data[0], data[1], data[2],
- data[3]);
}
}
@@ -534,7 +542,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
I40E_PTT_UNUSED_ENTRY(255)
};
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@@ -685,7 +692,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
/**
* i40e_pre_tx_queue_cfg - pre tx queue configure
* @hw: pointer to the HW structure
- * @queue: target pf queue index
+ * @queue: target PF queue index
* @enable: state change request
*
* Handles hw requirement to indicate intention to enable
@@ -827,12 +834,15 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ case I40E_PHY_TYPE_10GBASE_AOC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
+ case I40E_PHY_TYPE_20GBASE_KR2:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -849,7 +859,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 110
+#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -868,8 +878,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -946,7 +957,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
u32 val;
u32 eol = 0x7ff;
- /* get number of interrupts, queues, and vfs */
+ /* get number of interrupts, queues, and VFs */
val = rd32(hw, I40E_GLPCI_CNF2);
num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
@@ -1075,8 +1086,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
return gpio_val;
}
-#define I40E_LED0 22
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
/**
* i40e_led_get - return current on/off mode
@@ -1089,6 +1103,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
+ u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1101,6 +1116,20 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@@ -1120,6 +1149,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
+ u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1134,6 +1164,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
+ /* ignore gpio LED src mode entries related to the activity
+ * LEDs
+ */
+ current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+ switch (current_mode) {
+ case I40E_COMBINED_ACTIVITY:
+ case I40E_FILTER_ACTIVITY:
+ case I40E_MAC_ACTIVITY:
+ continue;
+ default:
+ break;
+ }
+
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -1297,14 +1341,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
}
/* Update the link info */
- status = i40e_update_link_info(hw, true);
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status) {
/* Wait a little bit (on 40G cards it sometimes takes a really
* long time for link to come back from the atomic reset)
* and try once more
*/
msleep(1000);
- status = i40e_update_link_info(hw, true);
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
}
if (status)
*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -1440,6 +1484,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->lse_enable = false;
+ if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+ hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
/* save link status information */
if (link)
*link = *hw_link_info;
@@ -1452,35 +1500,6 @@ aq_get_link_info_exit:
}
/**
- * i40e_update_link_info
- * @hw: pointer to the hw struct
- * @enable_lse: enable/disable LinkStatusEvent reporting
- *
- * Returns the link status of the adapter
- **/
-i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
-{
- struct i40e_aq_get_phy_abilities_resp abilities;
- i40e_status status;
-
- status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
- if (status)
- return status;
-
- status = i40e_aq_get_phy_capabilities(hw, false, false,
- &abilities, NULL);
- if (status)
- return status;
-
- if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
- hw->phy.link_info.an_enabled = true;
- else
- hw->phy.link_info.an_enabled = false;
-
- return status;
-}
-
-/**
* i40e_aq_set_phy_int_mask
* @hw: pointer to the hw struct
* @mask: interrupt mask to be set
@@ -1759,6 +1778,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
* @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
* @api_major_version: major queue version
* @api_minor_version: minor queue version
* @cmd_details: pointer to command details structure or NULL
@@ -1767,6 +1787,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
**/
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -1780,13 +1801,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status) {
- if (fw_major_version != NULL)
+ if (fw_major_version)
*fw_major_version = le16_to_cpu(resp->fw_major);
- if (fw_minor_version != NULL)
+ if (fw_minor_version)
*fw_minor_version = le16_to_cpu(resp->fw_minor);
- if (api_major_version != NULL)
+ if (fw_build)
+ *fw_build = le32_to_cpu(resp->fw_build);
+ if (api_major_version)
*api_major_version = le16_to_cpu(resp->api_major);
- if (api_minor_version != NULL)
+ if (api_minor_version)
*api_minor_version = le16_to_cpu(resp->api_minor);
}
@@ -1816,7 +1839,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
- desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
cmd->driver_major_ver = dv->major_version;
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
@@ -1996,7 +2019,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+ buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
@@ -2038,7 +2061,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
- buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
@@ -2060,7 +2083,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
- * @vfid: vf id to send msg
+ * @vfid: VF id to send msg
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
@@ -2105,7 +2128,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
* Read the register using the admin queue commands
**/
i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
- u32 reg_addr, u64 *reg_val,
+ u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2116,17 +2139,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
if (reg_val == NULL)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_debug_read_reg);
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status) {
- *reg_val = ((u64)cmd_resp->value_high << 32) |
- (u64)cmd_resp->value_low;
- *reg_val = le64_to_cpu(*reg_val);
+ *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) |
+ (u64)le32_to_cpu(cmd_resp->value_low);
}
return status;
@@ -2376,6 +2397,7 @@ i40e_aq_erase_nvm_exit:
#define I40E_DEV_FUNC_CAP_LED 0x61
#define I40E_DEV_FUNC_CAP_SDP 0x62
#define I40E_DEV_FUNC_CAP_MDIO 0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
/**
* i40e_parse_discover_capabilities
@@ -2520,11 +2542,18 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
+ case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+ p->wr_csr_prot = (u64)number;
+ p->wr_csr_prot |= (u64)logical_id << 32;
+ break;
default:
break;
}
}
+ if (p->fcoe)
+ i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
/* Software override ensuring FCoE is disabled if npar or mfp
* mode because it is not supported in these modes.
*/
@@ -2846,7 +2875,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
@@ -3376,6 +3405,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_alternate_write *cmd_resp =
+ (struct i40e_aqc_alternate_write *)&desc.params.raw;
+ i40e_status status;
+
+ if (!reg_val0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+ cmd_resp->address0 = cpu_to_le32(reg_addr0);
+ cmd_resp->address1 = cpu_to_le32(reg_addr1);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ if (!status) {
+ *reg_val0 = le32_to_cpu(cmd_resp->data0);
+
+ if (reg_val1)
+ *reg_val1 = le32_to_cpu(cmd_resp->data1);
+ }
+
+ return status;
+}
+
+/**
* i40e_aq_resume_port_tx
* @hw: pointer to the hardware structure
* @cmd_details: pointer to command details structure or NULL
@@ -3439,3 +3509,136 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
break;
}
}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_debug_dump_internals *cmd =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ struct i40e_aqc_debug_dump_internals *resp =
+ (struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_debug_dump_internals);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ cmd->cluster_id = cluster_id;
+ cmd->table_id = table_id;
+ cmd->idx = cpu_to_le32(start_index);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (ret_buff_size)
+ *ret_buff_size = le16_to_cpu(desc.datalen);
+ if (ret_next_table)
+ *ret_next_table = resp->table_id;
+ if (ret_next_index)
+ *ret_next_index = le32_to_cpu(resp->idx);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is true if min_bw is a valid value
+ * @max_valid: pointer for bool that is true if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
+{
+ i40e_status status;
+ u32 max_bw_addr, min_bw_addr;
+
+ /* Calculate the address of the min/max bw registers */
+ max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MAX_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+ min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+ I40E_ALT_STRUCT_MIN_BW_OFFSET +
+ (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+ /* Read the bandwidths from alt ram */
+ status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+ min_bw_addr, min_bw);
+
+ if (*min_bw & I40E_ALT_BW_VALID_MASK)
+ *min_valid = true;
+ else
+ *min_valid = false;
+
+ if (*max_bw & I40E_ALT_BW_VALID_MASK)
+ *max_valid = true;
+ else
+ *max_valid = false;
+
+ return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ u16 bwd_size = sizeof(*bw_data);
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_partition_bw);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+ if (bwd_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(bwd_size);
+
+ status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size,
+ cmd_details);
+
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 3ce43588592d..2547aa21b2ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -419,7 +419,7 @@ static void i40e_cee_to_dcb_v1_config(
{
u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status);
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
- u8 i, tc, err, sync, oper;
+ u8 i, tc, err;
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
@@ -456,10 +456,8 @@ static void i40e_cee_to_dcb_v1_config(
status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
I40E_AQC_CEE_APP_STATUS_SHIFT;
err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
- sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
- oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
- /* Add APPs if Error is False and Oper/Sync is True */
- if (!err && sync && oper) {
+ /* Add APPs if Error is False */
+ if (!err) {
/* CEE operating configuration supports FCoE/iSCSI/FIP only */
dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb63ce98..bd5079d5c1b6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
@@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return;
+ /* MFP mode but not an iSCSI PF so return */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ return;
+
dcbxcfg = &hw->local_dcbx_config;
/* Set up all the App TLVs if DCBx is negotiated */
@@ -223,7 +227,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
/**
* i40e_dcbnl_del_app - Delete APP on all VSIs
- * @pf: the corresponding pf
+ * @pf: the corresponding PF
* @app: APP to delete
*
* Delete given APP from all the VSIs for given PF
@@ -268,23 +272,26 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
/**
* i40e_dcbnl_flush_apps - Delete all removed APPs
- * @pf: the corresponding pf
+ * @pf: the corresponding PF
+ * @old_cfg: old DCBX configuration data
* @new_cfg: new DCBX configuration data
*
* Find and delete all APPs that are not present in the passed
* DCB configuration
**/
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg)
{
struct i40e_dcb_app_priority_table app;
- struct i40e_dcbx_config *dcbxcfg;
- struct i40e_hw *hw = &pf->hw;
int i;
- dcbxcfg = &hw->local_dcbx_config;
- for (i = 0; i < dcbxcfg->numapps; i++) {
- app = dcbxcfg->app[i];
+ /* MFP mode but not an iSCSI PF so return */
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ return;
+
+ for (i = 0; i < old_cfg->numapps; i++) {
+ app = old_cfg->app[i];
/* The APP is not available anymore delete it */
if (!i40e_dcbnl_find_app(new_cfg, &app))
i40e_dcbnl_del_app(pf, &app);
@@ -306,9 +313,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return;
- /* Do not setup DCB NL ops for MFP mode */
- if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- dev->dcbnl_ops = &dcbnl_ops;
+ dev->dcbnl_ops = &dcbnl_ops;
/* Set initial IEEE DCB settings */
i40e_dcbnl_set_all(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..34170eabca7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -35,7 +35,7 @@ static struct dentry *i40e_dbg_root;
/**
* i40e_dbg_find_vsi - searches for the vsi with the given seid
- * @pf - the pf structure to search for the vsi
+ * @pf - the PF structure to search for the vsi
* @seid - seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
@@ -54,7 +54,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
/**
* i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf - the pf structure to search for the veb
+ * @pf - the PF structure to search for the veb
* @seid - seid of the veb it is searching for
**/
static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
@@ -112,7 +112,7 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
/**
* i40e_dbg_prep_dump_buf
- * @pf: the pf we're working with
+ * @pf: the PF we're working with
* @buflen: the desired buffer length
*
* Return positive if success, 0 if failed
@@ -318,7 +318,7 @@ static const struct file_operations i40e_dbg_dump_fops = {
* setup, adding or removing filters, or other things. Many of
* these will be useful for some forms of unit testing.
**************************************************************/
-static char i40e_dbg_command_buf[256] = "hello world";
+static char i40e_dbg_command_buf[256] = "";
/**
* i40e_dbg_command_read - read for command datum
@@ -390,6 +390,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
vsi->netdev_registered,
vsi->current_netdev_flags, vsi->state, vsi->flags);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ pf->hw.mac.addr,
+ pf->hw.mac.san_addr,
+ pf->hw.mac.port_addr);
list_for_each_entry(f, &vsi->mac_filter_list, list) {
dev_info(&pf->pdev->dev,
" mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
@@ -675,7 +680,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
if (vsi->back)
- dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back);
+ dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back);
dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
dev_info(&pf->pdev->dev,
" tc_config: numtc = %d, enabled_tc = 0x%x\n",
@@ -921,9 +926,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n",
+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid);
+ veb->uplink_seid,
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -945,7 +951,7 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
/**
* i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the pf that would be altered
+ * @pf: the PF that would be altered
* @flag: flag that needs enabling or disabling
* @enable: Enable/disable FD SD/ATR
**/
@@ -957,7 +963,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
pf->flags &= ~flag;
pf->auto_disable_flags |= flag;
}
- dev_info(&pf->pdev->dev, "requesting a pf reset\n");
+ dev_info(&pf->pdev->dev, "requesting a PF reset\n");
i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
}
@@ -989,8 +995,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!cmd_buf)
return count;
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
- if (bytes_not_copied < 0)
+ if (bytes_not_copied < 0) {
+ kfree(cmd_buf);
return bytes_not_copied;
+ }
if (bytes_not_copied > 0)
count -= bytes_not_copied;
cmd_buf[count] = '\0';
@@ -1380,6 +1388,50 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
r_cfg->app[i].selector,
r_cfg->app[i].protocolid);
}
+ } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
+ int cluster_id, table_id;
+ int index, ret;
+ u16 buff_len = 4096;
+ u32 next_index;
+ u8 next_table;
+ u8 *buff;
+ u16 rlen;
+
+ cnt = sscanf(&cmd_buf[18], "%i %i %i",
+ &cluster_id, &table_id, &index);
+ if (cnt != 3) {
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev,
+ "AQ debug dump fwdata params %x %x %x %x\n",
+ cluster_id, table_id, index, buff_len);
+ buff = kzalloc(buff_len, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id,
+ index, buff_len, buff, &rlen,
+ &next_table, &next_index,
+ NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "debug dump fwdata AQ Failed %d 0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
+ rlen, next_table, next_index);
+ print_hex_dump(KERN_INFO, "AQ buffer WB: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buff, rlen, true);
+ kfree(buff);
+ buff = NULL;
} else {
dev_info(&pf->pdev->dev,
"dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
@@ -1485,11 +1537,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else {
dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
}
- } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
- i40e_pf_reset_stats(pf);
- dev_info(&pf->pdev->dev, "pf clear stats called\n");
+ } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
+ if (pf->hw.partition_id == 1) {
+ i40e_pf_reset_stats(pf);
+ dev_info(&pf->pdev->dev, "port stats cleared\n");
+ } else {
+ dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
+ }
} else {
- dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
struct i40e_aq_desc *desc;
@@ -1891,11 +1947,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
dev_info(&pf->pdev->dev, " dump desc aq\n");
dev_info(&pf->pdev->dev, " dump reset stats\n");
+ dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
dev_info(&pf->pdev->dev, " msg_enable [level]\n");
dev_info(&pf->pdev->dev, " read <reg>\n");
dev_info(&pf->pdev->dev, " write <reg> <value>\n");
dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
- dev_info(&pf->pdev->dev, " clear_stats pf\n");
+ dev_info(&pf->pdev->dev, " clear_stats port\n");
dev_info(&pf->pdev->dev, " pfr\n");
dev_info(&pf->pdev->dev, " corer\n");
dev_info(&pf->pdev->dev, " globr\n");
@@ -1933,7 +1990,7 @@ static const struct file_operations i40e_dbg_command_fops = {
* The netdev_ops entry in debugfs is for giving the driver commands
* to be executed from the netdev operations.
**************************************************************/
-static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+static char i40e_dbg_netdev_ops_buf[256] = "";
/**
* i40e_dbg_netdev_ops - read for netdev_ops datum
@@ -2121,8 +2178,8 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
};
/**
- * i40e_dbg_pf_init - setup the debugfs directory for the pf
- * @pf: the pf that is starting up
+ * i40e_dbg_pf_init - setup the debugfs directory for the PF
+ * @pf: the PF that is starting up
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
@@ -2158,8 +2215,8 @@ create_failed:
}
/**
- * i40e_dbg_pf_exit - clear out the pf's debugfs entries
- * @pf: the pf that is stopping
+ * i40e_dbg_pf_exit - clear out the PF's debugfs entries
+ * @pf: the PF that is stopping
**/
void i40e_dbg_pf_exit(struct i40e_pf *pf)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b8230dc205ec..4cbaaeb902c4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
- I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
I40E_PF_STAT("crc_errors", stats.crc_errors),
I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
@@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "NPAR",
+};
+
+#define I40E_PRIV_FLAGS_STR_LEN \
+ (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
* @pf: the PF struct
@@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
}
/**
- * i40e_get_settings - Get Link Speed and Duplex settings
+ * i40e_get_settings_link_up - Get the Link settings for when link is up
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
* @netdev: network interface device structure
- * @ecmd: ethtool command
*
- * Reports speed/duplex settings based on media_type
**/
-static int i40e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static void i40e_get_settings_link_up(struct i40e_hw *hw,
+ struct ethtool_cmd *ecmd,
+ struct net_device *netdev)
{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_pf *pf = np->vsi->back;
- struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
- bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
u32 link_speed = hw_link_info->link_speed;
- /* hardware is either in 40G mode or 10G mode
- * NOTE: this section initializes supported and advertising
- */
- if (!link_up) {
- /* link is down and the driver needs to fall back on
- * device ID to determine what kinds of info to display,
- * it's mostly a guess that may change when link is up
- */
- switch (hw->device_id) {
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- case I40E_DEV_ID_QSFP_C:
- /* pluggable QSFP */
- ecmd->supported = SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseLR4_Full;
- ecmd->advertising = ADVERTISED_40000baseSR4_Full |
- ADVERTISED_40000baseCR4_Full |
- ADVERTISED_40000baseLR4_Full;
- break;
- case I40E_DEV_ID_KX_B:
- /* backplane 40G */
- ecmd->supported = SUPPORTED_40000baseKR4_Full;
- ecmd->advertising = ADVERTISED_40000baseKR4_Full;
- break;
- case I40E_DEV_ID_KX_C:
- /* backplane 10G */
- ecmd->supported = SUPPORTED_10000baseKR_Full;
- ecmd->advertising = ADVERTISED_10000baseKR_Full;
- break;
- case I40E_DEV_ID_10G_BASE_T:
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
- break;
- default:
- /* all the rest are 10G/1G */
- ecmd->supported = SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full;
- break;
- }
-
- /* skip phy_type use as it is zero when link is down */
- goto no_valid_phy_type;
- }
-
+ /* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
case I40E_PHY_TYPE_40GBASE_CR4:
case I40E_PHY_TYPE_40GBASE_CR4_CU:
@@ -304,6 +257,11 @@ static int i40e_get_settings(struct net_device *netdev,
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_40000baseCR4_Full;
break;
+ case I40E_PHY_TYPE_XLAUI:
+ case I40E_PHY_TYPE_XLPPI:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ break;
case I40E_PHY_TYPE_40GBASE_KR4:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_40000baseKR4_Full;
@@ -311,13 +269,17 @@ static int i40e_get_settings(struct net_device *netdev,
ADVERTISED_40000baseKR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_SR4:
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_XLAUI:
ecmd->supported = SUPPORTED_40000baseSR4_Full;
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ecmd->supported = SUPPORTED_40000baseLR4_Full;
break;
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_20000baseKR2_Full;
+ ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_20000baseKR2_Full;
+ break;
case I40E_PHY_TYPE_10GBASE_KX4:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseKX4_Full;
@@ -334,55 +296,56 @@ static int i40e_get_settings(struct net_device *netdev,
case I40E_PHY_TYPE_10GBASE_LR:
case I40E_PHY_TYPE_1000BASE_SX:
case I40E_PHY_TYPE_1000BASE_LX:
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->supported |= SUPPORTED_1000baseT_Full;
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_1000baseKX_Full;
+ ecmd->advertising = ADVERTISED_Autoneg |
+ ADVERTISED_1000baseKX_Full;
break;
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
case I40E_PHY_TYPE_10GBASE_T:
+ case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
+ ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ecmd->supported = SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
+ ADVERTISED_10000baseT_Full;
break;
case I40E_PHY_TYPE_XAUI:
case I40E_PHY_TYPE_XFI:
case I40E_PHY_TYPE_SFI:
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ case I40E_PHY_TYPE_10GBASE_AOC:
ecmd->supported = SUPPORTED_10000baseT_Full;
break;
- case I40E_PHY_TYPE_1000BASE_KX:
- case I40E_PHY_TYPE_1000BASE_T:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
- break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
- break;
case I40E_PHY_TYPE_SGMII:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full;
- ecmd->advertising = ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_100baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -390,8 +353,125 @@ static int i40e_get_settings(struct net_device *netdev,
hw_link_info->phy_type);
}
-no_valid_phy_type:
- /* this is if autoneg is enabled or disabled */
+ /* Set speed and duplex */
+ switch (link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_40000);
+ break;
+ case I40E_LINK_SPEED_20GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_20000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ break;
+ case I40E_LINK_SPEED_100MB:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+}
+
+/**
+ * i40e_get_settings_link_down - Get the Link settings for when link is down
+ * @hw: hw structure
+ * @ecmd: ethtool command to fill in
+ *
+ * Reports link settings that can be determined when link is down
+ **/
+static void i40e_get_settings_link_down(struct i40e_hw *hw,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ /* link is down and the driver needs to fall back on
+ * device ID to determine what kinds of info to display,
+ * it's mostly a guess that may change when link is up
+ */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ case I40E_DEV_ID_QSFP_C:
+ /* pluggable QSFP */
+ ecmd->supported = SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseLR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full |
+ ADVERTISED_40000baseCR4_Full |
+ ADVERTISED_40000baseLR4_Full;
+ break;
+ case I40E_DEV_ID_KX_B:
+ /* backplane 40G */
+ ecmd->supported = SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+ break;
+ case I40E_DEV_ID_KX_C:
+ /* backplane 10G */
+ ecmd->supported = SUPPORTED_10000baseKR_Full;
+ ecmd->advertising = ADVERTISED_10000baseKR_Full;
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
+ /* Figure out what has been requested */
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ break;
+ case I40E_DEV_ID_20G_KR2:
+ /* backplane 20G */
+ ecmd->supported = SUPPORTED_20000baseKR2_Full;
+ ecmd->advertising = ADVERTISED_20000baseKR2_Full;
+ break;
+ default:
+ /* all the rest are 10G/1G */
+ ecmd->supported = SUPPORTED_10000baseT_Full |
+ SUPPORTED_1000baseT_Full;
+ /* Figure out what has been requested */
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ }
+
+ /* With no link speed and duplex are unknown */
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+}
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+
+ if (link_up)
+ i40e_get_settings_link_up(hw, ecmd, netdev);
+ else
+ i40e_get_settings_link_down(hw, ecmd);
+
+ /* Now set the settings that don't rely on link being up/down */
+
+ /* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -424,11 +504,13 @@ no_valid_phy_type:
break;
}
+ /* Set transceiver */
ecmd->transceiver = XCVR_EXTERNAL;
+ /* Set flow control settings */
ecmd->supported |= SUPPORTED_Pause;
- switch (hw->fc.current_mode) {
+ switch (hw->fc.requested_mode) {
case I40E_FC_FULL:
ecmd->advertising |= ADVERTISED_Pause;
break;
@@ -445,30 +527,6 @@ no_valid_phy_type:
break;
}
- if (link_up) {
- switch (link_speed) {
- case I40E_LINK_SPEED_40GB:
- /* need a SPEED_40000 in ethtool.h */
- ethtool_cmd_speed_set(ecmd, 40000);
- break;
- case I40E_LINK_SPEED_10GB:
- ethtool_cmd_speed_set(ecmd, SPEED_10000);
- break;
- case I40E_LINK_SPEED_1GB:
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- break;
- case I40E_LINK_SPEED_100MB:
- ethtool_cmd_speed_set(ecmd, SPEED_100);
- break;
- default:
- break;
- }
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
- }
-
return 0;
}
@@ -588,6 +646,8 @@ static int i40e_set_settings(struct net_device *netdev,
advertise & ADVERTISED_10000baseKX4_Full ||
advertise & ADVERTISED_10000baseKR_Full)
config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (advertise & ADVERTISED_20000baseKR2_Full)
+ config.link_speed |= I40E_LINK_SPEED_20GB;
if (advertise & ADVERTISED_40000baseKR4_Full ||
advertise & ADVERTISED_40000baseCR4_Full ||
advertise & ADVERTISED_40000baseSR4_Full ||
@@ -601,6 +661,8 @@ static int i40e_set_settings(struct net_device *netdev,
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ /* save the requested speeds */
+ hw->phy.link_info.requested_speeds = config.link_speed;
/* set link and auto negotiation so changes take effect */
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* If link is up put link down */
@@ -621,7 +683,7 @@ static int i40e_set_settings(struct net_device *netdev,
return -EAGAIN;
}
- status = i40e_update_link_info(hw, true);
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
netdev_info(netdev, "Updating link info failed with error %d\n",
status);
@@ -767,7 +829,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n",
+ netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
status, hw->aq.asq_last_status);
err = -EAGAIN;
}
@@ -870,7 +932,9 @@ static int i40e_get_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val)
+ if (ret_val &&
+ ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
+ (hw->debug_mask & I40E_DEBUG_NVM)))
dev_info(&pf->pdev->dev,
"NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -974,7 +1038,10 @@ static int i40e_set_eeprom(struct net_device *netdev,
cmd = (struct i40e_nvm_access *)eeprom;
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY)
+ if (ret_val &&
+ ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
+ hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
+ (hw->debug_mask & I40E_DEBUG_NVM)))
dev_info(&pf->pdev->dev,
"NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -998,6 +1065,7 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1176,7 +1244,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi]) {
+ if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
int len = I40E_PF_STATS_LEN(netdev);
if (pf->lan_veb != I40E_NO_VEB)
@@ -1185,6 +1253,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
} else {
return I40E_VSI_STATS_LEN(netdev);
}
+ case ETH_SS_PRIV_FLAGS:
+ return I40E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1247,7 +1317,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
i += 2;
}
rcu_read_unlock();
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
if (pf->lan_veb != I40E_NO_VEB) {
@@ -1320,7 +1390,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
return;
if (pf->lan_veb != I40E_NO_VEB) {
@@ -1358,6 +1428,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ break;
}
}
@@ -1473,6 +1552,7 @@ static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
struct i40e_pf *pf = np->vsi->back;
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -1480,6 +1560,12 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
+ /* If the device is online then take it offline */
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
/* Link test performed before hardware reset
* so autoneg doesn't interfere with test result
@@ -1502,6 +1588,9 @@ static void i40e_diag_test(struct net_device *netdev,
clear_bit(__I40E_TESTING, &pf->state);
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ if (if_running)
+ dev_open(netdev);
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1599,6 +1688,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
case ETHTOOL_ID_INACTIVE:
i40e_led_set(hw, pf->led_status, false);
break;
+ default:
+ break;
}
return 0;
@@ -1703,6 +1794,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
{
cmd->data = 0;
+ if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) {
+ cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data;
+ cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type;
+ return 0;
+ }
/* Report default options for RSS on i40e */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
@@ -1817,6 +1913,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
else
fsp->ring_cookie = rule->q_index;
+ if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
+ struct i40e_vsi *vsi;
+
+ vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
+ if (vsi && vsi->type == I40E_VSI_SRIOV) {
+ fsp->h_ext.data[1] = htonl(vsi->vf_id);
+ fsp->m_ext.data[1] = htonl(0x1);
+ }
+ }
+
return 0;
}
@@ -1974,6 +2080,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
i40e_flush(hw);
+ /* Save setting for future output/update */
+ pf->vsi[pf->lan_vsi]->rxnfc = *nfc;
+
return 0;
}
@@ -2107,6 +2216,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input;
struct i40e_pf *pf;
int ret = -EINVAL;
+ u16 vf_id;
if (!vsi)
return -EINVAL;
@@ -2167,7 +2277,22 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ if (ntohl(fsp->m_ext.data[1])) {
+ if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
+ netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+ goto free_input;
+ }
+ vf_id = ntohl(fsp->h_ext.data[1]);
+ /* Find vsi id from vf id and override dest vsi */
+ input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
+ if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
+ netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+ goto free_input;
+ }
+ }
+
ret = i40e_add_del_fdir(vsi, input, true);
+free_input:
if (ret)
kfree(input);
else
@@ -2281,10 +2406,6 @@ static int i40e_set_channels(struct net_device *dev,
/* update feature limits from largest to smallest supported values */
/* TODO: Flow director limit, DCB etc */
- /* cap RSS limit */
- if (count > pf->rss_size_max)
- count = pf->rss_size_max;
-
/* use rss_reconfig to rebuild with new queue count and update traffic
* class queue mapping
*/
@@ -2295,6 +2416,133 @@ static int i40e_set_channels(struct net_device *dev,
return -EINVAL;
}
+#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+/**
+ * i40e_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_key_size(struct net_device *netdev)
+{
+ return I40E_HKEY_ARRAY_SIZE;
+}
+
+/**
+ * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40e_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return I40E_HLUT_ARRAY_SIZE;
+}
+
+static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ int i, j;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ reg_val = rd32(hw, I40E_PFQF_HLUT(i));
+ indir[j++] = reg_val & 0xff;
+ indir[j++] = (reg_val >> 8) & 0xff;
+ indir[j++] = (reg_val >> 16) & 0xff;
+ indir[j++] = (reg_val >> 24) & 0xff;
+ }
+
+ if (key) {
+ for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg_val = rd32(hw, I40E_PFQF_HKEY(i));
+ key[j++] = (u8)(reg_val & 0xff);
+ key[j++] = (u8)((reg_val >> 8) & 0xff);
+ key[j++] = (u8)((reg_val >> 16) & 0xff);
+ key[j++] = (u8)((reg_val >> 24) & 0xff);
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg_val;
+ int i, j;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+ reg_val = indir[j++];
+ reg_val |= indir[j++] << 8;
+ reg_val |= indir[j++] << 16;
+ reg_val |= indir[j++] << 24;
+ wr32(hw, I40E_PFQF_HLUT(i), reg_val);
+ }
+
+ if (key) {
+ for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) {
+ reg_val = key[j++];
+ reg_val |= key[j++] << 8;
+ reg_val |= key[j++] << 16;
+ reg_val |= key[j++] << 24;
+ wr32(hw, I40E_PFQF_HKEY(i), reg_val);
+ }
+ }
+ return 0;
+}
+
+/**
+ * i40e_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40e_get_priv_flags(struct net_device *dev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 ret_flags = 0;
+
+ ret_flags |= pf->hw.func_caps.npar_enable ?
+ I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+
+ return ret_flags;
+}
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.set_settings = i40e_set_settings,
@@ -2323,9 +2571,14 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ethtool_stats = i40e_get_ethtool_stats,
.get_coalesce = i40e_get_coalesce,
.set_coalesce = i40e_set_coalesce,
+ .get_rxfh_key_size = i40e_get_rxfh_key_size,
+ .get_rxfh_indir_size = i40e_get_rxfh_indir_size,
+ .get_rxfh = i40e_get_rxfh,
+ .set_rxfh = i40e_set_rxfh,
.get_channels = i40e_get_channels,
.set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
+ .get_priv_flags = i40e_get_priv_flags,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 27c206e62da7..1803afeef23e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -24,7 +24,6 @@
*
******************************************************************************/
-
#include <linux/if_ether.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -150,7 +149,7 @@ static inline bool i40e_fcoe_xid_is_valid(u16 xid)
/**
* i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
- * @pf: pointer to pf
+ * @pf: pointer to PF
* @ddp: sw DDP context
*
* Unmap the scatter-gather list associated with the given SW DDP context
@@ -269,7 +268,7 @@ out:
/**
* i40e_fcoe_sw_init - sets up the HW for FCoE
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
* Returns 0 if FCoE is supported otherwise the error code
**/
@@ -329,7 +328,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
/**
* i40e_get_fcoe_tc_map - Return TC map for FCoE APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
**/
u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
@@ -381,12 +380,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
ctxt->pf_num = hw->pf_id;
ctxt->vf_num = 0;
ctxt->uplink_seid = vsi->uplink_seid;
- ctxt->connection_type = 0x1;
+ ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt->flags = I40E_AQ_VSI_TYPE_PF;
/* FCoE VSI would need the following sections */
- info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
- I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
/* FCoE VSI does not need these sections */
info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
@@ -395,7 +393,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
- info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ info->valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ info->switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
enabled_tc = i40e_get_fcoe_tc_map(pf);
i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
@@ -1303,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
/* MACLEN is ether header length in words not bytes */
td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
- return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset);
}
/**
@@ -1443,7 +1445,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev,
return 0;
}
-
static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -1470,6 +1471,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_set_features = i40e_fcoe_set_features,
};
+/* fcoe network device type */
+static struct device_type fcoe_netdev_type = {
+ .name = "fcoe",
+};
+
/**
* i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
* @vsi: pointer to the associated VSI struct
@@ -1503,6 +1509,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
netdev->mtu = FCOE_MTU;
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type);
/* set different dev_port value 1 for FCoE netdev than the default
* zero dev_port value for PF netdev, this helps biosdevname user
* tool to differentiate them correctly while both attached to the
@@ -1523,7 +1530,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
/**
* i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
- * @pf: the pf that VSI is associated with
+ * @pf: the PF that VSI is associated with
*
**/
void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
@@ -1550,7 +1557,7 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
if (vsi) {
dev_dbg(&pf->pdev->dev,
- "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
+ "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n",
vsi->seid, vsi->id, vsi->uplink_seid, seid);
} else {
dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 21e0f582031c..0d49e2d15d40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -37,7 +37,6 @@
#define I40E_FILTER_CONTEXT_DESC(R, i) \
(&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
-
/* receive queue descriptor filter status for FCoE */
#define I40E_RX_DESC_FLTSTAT_FCMASK 0x3
#define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 4627588f4613..0079ad7bcd0e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits,
if (ce_info->width < 32)
mask = ((u32)1 << ce_info->width) - 1;
else
- mask = 0xFFFFFFFF;
+ mask = ~(u32)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
@@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits,
if (ce_info->width < 64)
mask = ((u64)1 << ce_info->width) - 1;
else
- mask = 0xFFFFFFFFFFFFFFFF;
+ mask = ~(u64)0;
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..24481cd7e59a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 6
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
/* required last entry */
{0, }
};
@@ -249,6 +250,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
}
/**
+ * i40e_find_vsi_from_id - searches for the vsi with the given id
+ * @pf - the pf structure to search for the vsi
+ * @id - id of the vsi it is searching for
+ **/
+struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->id == id))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
* i40e_service_event_schedule - Schedule the service task to wake up
* @pf: board private structure
*
@@ -450,7 +467,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * i40e_pf_reset_stats - Reset all of the stats for the given PF
* @pf: the PF to be reset
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
@@ -896,7 +913,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_update_pf_stats - Update the pf statistics counters.
+ * i40e_update_pf_stats - Update the PF statistics counters.
* @pf: the PF to be updated
**/
static void i40e_update_pf_stats(struct i40e_pf *pf)
@@ -919,11 +936,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
-
i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
@@ -1133,7 +1145,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL
@@ -1161,7 +1173,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
* i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns the first filter with the provided MAC address or NULL if
@@ -1209,7 +1221,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
* i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
- * @is_vf: true if it is a vf
+ * @is_vf: true if it is a VF
* @is_netdev: true if it is a netdev
*
* Goes through all the macvlan filters and adds a
@@ -1270,7 +1282,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
*
* Returns ptr to the filter object or NULL when no memory available.
@@ -1330,7 +1342,7 @@ add_filter_out:
* @vsi: the VSI to be searched
* @macaddr: the MAC address
* @vlan: the vlan
- * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
**/
void i40e_del_filter(struct i40e_vsi *vsi,
@@ -1357,7 +1369,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
f->counter--;
}
} else {
- /* make sure we don't remove a filter in use by vf or netdev */
+ /* make sure we don't remove a filter in use by VF or netdev */
int min_f = 0;
min_f += (f->is_vf ? 1 : 0);
min_f += (f->is_netdev ? 1 : 0);
@@ -1512,7 +1524,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ /* In MFP case we can have a much lower count of MSIx
+ * vectors available and so we need to lower the used
+ * q count.
+ */
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -1541,7 +1558,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.tc_info[i].qoffset = offset;
vsi->tc_config.tc_info[i].qcount = qcount;
- /* find the power-of-2 of the number of queue pairs */
+ /* find the next higher power-of-2 of num queue pairs */
num_qps = qcount;
pow = 0;
while (num_qps && ((1 << pow) < qcount)) {
@@ -1571,6 +1588,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
/* Set actual Tx/Rx queue pairs */
vsi->num_queue_pairs = offset;
+ if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
+ if (vsi->req_queue_pairs > 0)
+ vsi->num_queue_pairs = vsi->req_queue_pairs;
+ else
+ vsi->num_queue_pairs = pf->num_lan_msix;
+ }
/* Scheduler section valid can only be set for ADD VSI */
if (is_add) {
@@ -1962,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -1991,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
@@ -2275,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (aq_ret) {
dev_info(&vsi->back->pdev->dev,
@@ -2393,20 +2416,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
struct i40e_vsi *vsi = ring->vsi;
cpumask_var_t mask;
- if (ring->q_vector && ring->netdev) {
- /* Single TC mode enable XPS */
- if (vsi->tc_config.numtc <= 1 &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+ if (!ring->q_vector || !ring->netdev)
+ return;
+
+ /* Single TC mode enable XPS */
+ if (vsi->tc_config.numtc <= 1) {
+ if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
ring->queue_index);
- } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
- /* Disable XPS to allow selection based on TC */
- bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
- netif_set_xps_queue(ring->netdev, mask,
- ring->queue_index);
- free_cpumask_var(mask);
- }
+ } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ /* Disable XPS to allow selection based on TC */
+ bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
+ netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
+ free_cpumask_var(mask);
}
}
@@ -2591,7 +2614,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+ if (ring_is_ps_enabled(ring)) {
+ i40e_alloc_rx_headers(ring);
+ i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
+ } else {
+ i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
+ }
return 0;
}
@@ -2684,8 +2712,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
- return;
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3171,13 +3206,16 @@ static irqreturn_t i40e_intr(int irq, void *data)
pf->globr_count++;
} else if (val == I40E_RESET_EMPR) {
pf->empr_count++;
- set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
}
}
if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
+ rd32(hw, I40E_PFHMC_ERRORINFO),
+ rd32(hw, I40E_PFHMC_ERRORDATA));
}
if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
@@ -3813,6 +3851,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
pci_disable_msix(pf->pdev);
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ kfree(pf->irq_pile);
+ pf->irq_pile = NULL;
} else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
pci_disable_msi(pf->pdev);
}
@@ -3830,6 +3870,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -4003,7 +4049,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
#endif
/**
* i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
- * @pf: pointer to pf
+ * @pf: pointer to PF
*
* Get TC map for ISCSI PF type that will include iSCSI TC
* and LAN TC.
@@ -4101,7 +4147,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
enabled_tc = i40e_get_iscsi_tc_map(pf);
else
- enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ return 1; /* Only TC0 */
/* At least have TC0 */
enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -4151,11 +4197,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
- /* MPF enabled and iSCSI PF type */
+ /* MFP enabled and iSCSI PF type */
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return pf->hw.func_caps.enabled_tcmap;
+ return i40e_pf_get_default_tc(pf);
}
/**
@@ -4178,7 +4224,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+ "couldn't get PF vsi bw config, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4188,7 +4234,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+ "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
return -EINVAL;
}
@@ -4365,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.pf_num = vsi->back->hw.pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
/* Update the VSI after updating the VSI queue-mapping information */
@@ -4545,6 +4591,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
struct i40e_hw *hw = &pf->hw;
int err = 0;
+ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4))
+ goto out;
+
/* Get the initial DCB configuration */
err = i40e_init_dcb(hw);
if (!err) {
@@ -4608,6 +4659,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_40GB:
strlcpy(speed, "40 Gbps", SPEED_SIZE);
break;
+ case I40E_LINK_SPEED_20GB:
+ strncpy(speed, "20 Gbps", SPEED_SIZE);
+ break;
case I40E_LINK_SPEED_10GB:
strlcpy(speed, "10 Gbps", SPEED_SIZE);
break;
@@ -4835,11 +4889,7 @@ exit:
*
* Returns 0 on success, negative value on failure
**/
-#ifdef I40E_FCOE
int i40e_open(struct net_device *netdev)
-#else
-static int i40e_open(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -4949,7 +4999,7 @@ err_setup_tx:
/**
* i40e_fdir_filter_exit - Cleans up the Flow Director accounting
- * @pf: Pointer to pf
+ * @pf: Pointer to PF
*
* This function destroys the hlist where all the Flow Director
* filters were saved.
@@ -5037,24 +5087,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
- } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
-
- /* Request a Firmware Reset
- *
- * Same as Global reset, plus restarting the
- * embedded firmware engine.
- */
- /* enable EMP Reset */
- val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
- val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
- wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
-
- /* force the reset */
- val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
- val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
- wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
- i40e_flush(&pf->hw);
-
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -5177,7 +5209,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
struct i40e_aqc_lldp_get_mib *mib =
(struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
struct i40e_hw *hw = &pf->hw;
- struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
struct i40e_dcbx_config tmp_dcbx_cfg;
bool need_reconfig = false;
int ret = 0;
@@ -5208,10 +5239,11 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
goto exit;
}
- memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
/* Store the old configuration */
- tmp_dcbx_cfg = *dcbx_cfg;
+ tmp_dcbx_cfg = hw->local_dcbx_config;
+ /* Reset the old DCBx configuration data */
+ memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
@@ -5220,20 +5252,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
}
/* No change detected in DCBX configs */
- if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
+ sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
goto exit;
}
- need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg);
+ need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
+ &hw->local_dcbx_config);
- i40e_dcbnl_flush_apps(pf, dcbx_cfg);
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
if (!need_reconfig)
goto exit;
/* Enable DCB tagging only when more than one TC */
- if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
else
pf->flags &= ~I40E_FLAG_DCB_ENABLED;
@@ -5254,8 +5288,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Wait for the PF's Tx queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf);
- if (!ret)
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
i40e_pf_unquiesce_all_vsi(pf);
+ }
+
exit:
return ret;
}
@@ -5327,9 +5367,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
* i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
* @pf: board private structure
**/
-int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
{
- int val, fcnt_prog;
+ u32 val, fcnt_prog;
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
@@ -5337,12 +5377,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
}
/**
- * i40e_get_current_fd_count - Get the count of total FD filters programmed
+ * i40e_get_current_fd_count - Get total FD filters programmed for this PF
* @pf: board private structure
**/
-int i40e_get_current_fd_count(struct i40e_pf *pf)
+u32 i40e_get_current_fd_count(struct i40e_pf *pf)
{
- int val, fcnt_prog;
+ u32 val, fcnt_prog;
+
val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
@@ -5351,6 +5392,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
}
/**
+ * i40e_get_global_fd_count - Get total FD filters programmed on device
+ * @pf: board private structure
+ **/
+u32 i40e_get_global_fd_count(struct i40e_pf *pf)
+{
+ u32 val, fcnt_prog;
+
+ val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
+ fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
+ ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
+ I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
+ return fcnt_prog;
+}
+
+/**
* i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
* @pf: board private structure
**/
@@ -5364,7 +5420,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
/* Check if, FD SB or ATR was auto disabled and if there is enough room
* to re-enable
*/
- fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+ fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
(pf->fd_add_err == 0) ||
@@ -5386,13 +5442,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
}
#define I40E_MIN_FD_FLUSH_INTERVAL 10
+#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
/**
* i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
* @pf: board private structure
**/
static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
{
+ unsigned long min_flush_time;
int flush_wait_retry = 50;
+ bool disable_atr = false;
+ int fd_room;
int reg;
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
@@ -5400,9 +5460,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
- set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ /* If the flush is happening too quick and we have mostly
+ * SB rules we should not re-enable ATR for some time.
+ */
+ min_flush_time = pf->fd_flush_timestamp
+ + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+ fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+
+ if (!(time_after(jiffies, min_flush_time)) &&
+ (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ disable_atr = true;
+ }
+
pf->fd_flush_timestamp = jiffies;
- pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED;
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
@@ -5422,10 +5493,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ if (!disable_atr)
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
@@ -5436,7 +5505,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
* i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
* @pf: board private structure
**/
-int i40e_get_current_atr_cnt(struct i40e_pf *pf)
+u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
{
return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
}
@@ -5462,9 +5531,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
- if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) &&
- (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) &&
- (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count))
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
i40e_fdir_check_and_reenable(pf);
@@ -5587,7 +5654,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
int i, v;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* for each VSI/netdev
@@ -5732,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
struct i40e_hw *hw = &pf->hw;
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
- struct i40e_link_status *hw_link_info = &hw->phy.link_info;
/* save off old link status information */
- memcpy(&pf->hw.phy.link_info_old, hw_link_info,
- sizeof(pf->hw.phy.link_info_old));
+ hw->phy.link_info_old = hw->phy.link_info;
/* Do a new status request to re-enable LSE reporting
* and load new status information into the hw struct
@@ -5850,6 +5916,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
case i40e_aqc_opc_send_msg_to_peer:
dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
break;
+ case i40e_aqc_opc_nvm_erase:
+ case i40e_aqc_opc_nvm_update:
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+ break;
default:
dev_info(&pf->pdev->dev,
"ARQ Error: Unknown event 0x%04x received\n",
@@ -5894,6 +5964,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
}
/**
+ * i40e_enable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * enable switch loop back or die - no point in a return value
+ **/
+static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_disable_pf_switch_lb
+ * @pf: pointer to the PF structure
+ *
+ * disable switch loop back or die - no point in a return value
+ **/
+static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ int aq_ret;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s couldn't get PF vsi config, err %d, aq_err %d\n",
+ __func__, aq_ret, pf->hw.aq.asq_last_status);
+ return;
+ }
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "%s: update vsi switch failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_config_bridge_mode - Configure the HW bridge mode
+ * @veb: pointer to the bridge instance
+ *
+ * Configure the loop back mode for the LAN VSI that is downlink to the
+ * specified HW bridge instance. It is expected this function is called
+ * when a new HW bridge is instantiated.
+ **/
+static void i40e_config_bridge_mode(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+
+ dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+ veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ if (veb->bridge_mode & BRIDGE_MODE_VEPA)
+ i40e_disable_pf_switch_lb(pf);
+ else
+ i40e_enable_pf_switch_lb(pf);
+}
+
+/**
* i40e_reconstitute_veb - rebuild the VEB and anything connected to it
* @veb: pointer to the VEB instance
*
@@ -5939,8 +6097,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret)
goto end_reconstitute;
- /* Enable LB mode for the main VSI now that it is on a VEB */
- i40e_enable_pf_switch_lb(pf);
+ i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */
for (v = 0; v < pf->num_alloc_vsi; v++) {
@@ -6112,7 +6269,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for pf Reset.
+ * Close up the VFs and other things in prep for PF Reset.
**/
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
@@ -6197,10 +6354,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* re-verify the eeprom if we just had an EMP reset */
- if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
- clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
i40e_verify_eeprom(pf);
- }
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
@@ -6310,13 +6465,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
}
- msleep(75);
- ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
- if (ret) {
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+ ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (ret)
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
/* reinit the misc interrupt */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
ret = i40e_setup_misc_vector(pf);
@@ -6339,7 +6495,7 @@ clear_recovery:
}
/**
- * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
* @pf: board private structure
*
* Close up the VFs and other things in prep for a Core Reset,
@@ -6353,7 +6509,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
/**
* i40e_handle_mdd_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
* Called from the MDD irq handler to identify possibly malicious vfs
**/
@@ -6382,7 +6538,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
I40E_GL_MDET_TX_QUEUE_SHIFT) -
pf->hw.func_caps.base_queue;
if (netif_msg_tx_err(pf))
- dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+ dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
event, queue, pf_num, vf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
@@ -6468,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
- u8 filter_index;
__be16 port;
int i;
@@ -6481,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
if (pf->pending_vxlan_bitmap & (1 << i)) {
pf->pending_vxlan_bitmap &= ~(1 << i);
port = pf->vxlan_ports[i];
- ret = port ?
- i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ if (port)
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
I40E_AQC_TUNNEL_TYPE_VXLAN,
- &filter_index, NULL)
- : i40e_aq_del_udp_tunnel(hw, i, NULL);
+ NULL, NULL);
+ else
+ ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
- port ? "adding" : "deleting",
- ntohs(port), port ? i : i);
-
+ dev_info(&pf->pdev->dev,
+ "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+ port ? "add" : "delete",
+ ntohs(port), i, ret,
+ pf->hw.aq.asq_last_status);
pf->vxlan_ports[i] = 0;
- } else {
- dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
- port ? "Added" : "Deleted",
- ntohs(port), port ? i : filter_index);
}
}
}
@@ -6703,6 +6856,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->idx = vsi_idx;
vsi->rx_itr_setting = pf->rx_itr_default;
vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
+ pf->rss_table_size : 64;
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
@@ -6783,7 +6938,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
goto unlock_vsi;
}
- /* updates the pf for this cleared vsi */
+ /* updates the PF for this cleared vsi */
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
@@ -6896,15 +7051,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
*
* Work with the OS to set up the MSIX vectors needed.
*
- * Returns 0 on success, negative on failure
+ * Returns the number of vectors reserved or negative on failure
**/
static int i40e_init_msix(struct i40e_pf *pf)
{
- i40e_status err = 0;
struct i40e_hw *hw = &pf->hw;
- int other_vecs = 0;
+ int vectors_left;
int v_budget, i;
- int vec;
+ int v_actual;
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return -ENODEV;
@@ -6926,24 +7080,62 @@ static int i40e_init_msix(struct i40e_pf *pf)
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
- pf->num_vmdq_msix = pf->num_vmdq_qps;
- other_vecs = 1;
- other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
- other_vecs++;
-
- /* Scale down if necessary, and the rings will share vectors */
- pf->num_lan_msix = min_t(int, pf->num_lan_msix,
- (hw->func_caps.num_msix_vectors - other_vecs));
- v_budget = pf->num_lan_msix + other_vecs;
+ vectors_left = hw->func_caps.num_msix_vectors;
+ v_budget = 0;
+
+ /* reserve one vector for miscellaneous handler */
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ }
+
+ /* reserve vectors for the main PF traffic queues */
+ pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+ vectors_left -= pf->num_lan_msix;
+ v_budget += pf->num_lan_msix;
+
+ /* reserve one vector for sideband flow director */
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (vectors_left) {
+ v_budget++;
+ vectors_left--;
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ }
+ }
#ifdef I40E_FCOE
+ /* can we reserve enough for FCoE? */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
- pf->num_fcoe_msix = pf->num_fcoe_qps;
+ if (!vectors_left)
+ pf->num_fcoe_msix = 0;
+ else if (vectors_left >= pf->num_fcoe_qps)
+ pf->num_fcoe_msix = pf->num_fcoe_qps;
+ else
+ pf->num_fcoe_msix = 1;
v_budget += pf->num_fcoe_msix;
+ vectors_left -= pf->num_fcoe_msix;
}
+
#endif
+ /* any vectors left over go for VMDq support */
+ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
+ int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
+ int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
+
+ /* if we're short on vectors for what's desired, we limit
+ * the queues per vmdq. If this is still more than are
+ * available, the user will need to change the number of
+ * queues/vectors used by the PF later with the ethtool
+ * channels command
+ */
+ if (vmdq_vecs < vmdq_vecs_wanted)
+ pf->num_vmdq_qps = 1;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+
+ v_budget += vmdq_vecs;
+ vectors_left -= vmdq_vecs;
+ }
pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -6952,9 +7144,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
- vec = i40e_reserve_msix_vectors(pf, v_budget);
+ v_actual = i40e_reserve_msix_vectors(pf, v_budget);
- if (vec != v_budget) {
+ if (v_actual != v_budget) {
/* If we have limited resources, we will start with no vectors
* for the special features and then allocate vectors to some
* of these features based on the policy and at the end disable
@@ -6967,26 +7159,30 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_vmdq_msix = 0;
}
- if (vec < I40E_MIN_MSIX) {
+ if (v_actual < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
return -ENODEV;
- } else if (vec == I40E_MIN_MSIX) {
+ } else if (v_actual == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
- } else if (vec != v_budget) {
+ } else if (v_actual != v_budget) {
+ int vec;
+
/* reserve the misc vector */
- vec--;
+ vec = v_actual - 1;
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
pf->num_vmdq_vsis = 1;
+ pf->num_vmdq_qps = 1;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
/* partition out the remaining vectors */
switch (vec) {
@@ -7012,10 +7208,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
vec--;
}
#endif
- pf->num_lan_msix = min_t(int, (vec / 2),
- pf->num_lan_qps);
- pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
- I40E_DEFAULT_NUM_VMDQ_VSI);
+ /* give the rest to the PF */
+ pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
break;
}
}
@@ -7032,7 +7226,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
}
#endif
- return err;
+ return v_actual;
}
/**
@@ -7107,13 +7301,14 @@ err_out:
* i40e_init_interrupt_scheme - Determine proper interrupt scheme
* @pf: board private structure to initialize
**/
-static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
{
- int err = 0;
+ int vectors = 0;
+ ssize_t size;
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- err = i40e_init_msix(pf);
- if (err) {
+ vectors = i40e_init_msix(pf);
+ if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
@@ -7133,18 +7328,32 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
- err = pci_enable_msi(pf->pdev);
- if (err) {
- dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
+ vectors = pci_enable_msi(pf->pdev);
+ if (vectors < 0) {
+ dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
+ vectors);
pf->flags &= ~I40E_FLAG_MSI_ENABLED;
}
+ vectors = 1; /* one MSI or Legacy vector */
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
- /* track first vector for misc interrupts */
- err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile) {
+ dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
+ return -ENOMEM;
+ }
+ pf->irq_pile->num_entries = vectors;
+ pf->irq_pile->search_hint = 0;
+
+ /* track first vector for misc interrupts, ignore return */
+ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
+
+ return 0;
}
/**
@@ -7194,6 +7403,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
static int i40e_config_rss(struct i40e_pf *pf)
{
u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u32 lut = 0;
int i, j;
@@ -7211,15 +7421,14 @@ static int i40e_config_rss(struct i40e_pf *pf)
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
/* Check capability and Set table size and register per hw expectation*/
reg_val = rd32(hw, I40E_PFQF_CTL_0);
- if (hw->func_caps.rss_table_size == 512) {
+ if (pf->rss_table_size == 512)
reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- pf->rss_table_size = 512;
- } else {
- pf->rss_table_size = 128;
+ else
reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
- }
wr32(hw, I40E_PFQF_CTL_0, reg_val);
/* Populate the LUT with max no. of queues in round robin fashion */
@@ -7232,7 +7441,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
* If LAN VSI is the only consumer for RSS then this requirement
* is not necessary.
*/
- if (j == pf->rss_size)
+ if (j == vsi->rss_size)
j = 0;
/* lut = 4-byte sliding window of 4 lut entries */
lut = (lut << 8) | (j &
@@ -7256,15 +7465,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ int new_rss_size;
+
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
- queue_count = min_t(int, queue_count, pf->rss_size_max);
+ new_rss_size = min_t(int, queue_count, pf->rss_size_max);
- if (queue_count != pf->rss_size) {
+ if (queue_count != vsi->num_queue_pairs) {
+ vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
- pf->rss_size = queue_count;
+ pf->rss_size = new_rss_size;
i40e_reset_and_rebuild(pf, true);
i40e_config_rss(pf);
@@ -7274,6 +7487,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
}
/**
+ * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
+{
+ i40e_status status;
+ bool min_valid, max_valid;
+ u32 max_bw, min_bw;
+
+ status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+ &min_valid, &max_valid);
+
+ if (!status) {
+ if (min_valid)
+ pf->npar_min_bw = min_bw;
+ if (max_valid)
+ pf->npar_max_bw = max_bw;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_set_npar_bw_setting - Set BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
+{
+ struct i40e_aqc_configure_partition_bw_data bw_data;
+ i40e_status status;
+
+ /* Set the valid bit for this PF */
+ bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+ bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
+ bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
+
+ /* Set the new bandwidths */
+ status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
+ * @pf: board private structure
+ **/
+i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
+{
+ /* Commit temporary BW setting to permanent NVM image */
+ enum i40e_admin_queue_err last_aq_status;
+ i40e_status ret;
+ u16 nvm_word;
+
+ if (pf->hw.partition_id != 1) {
+ dev_info(&pf->pdev->dev,
+ "Commit BW only works on partition 1! This is partition %d",
+ pf->hw.partition_id);
+ ret = I40E_NOT_SUPPORTED;
+ goto bw_commit_out;
+ }
+
+ /* Acquire NVM for read access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for read access, err %d: aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+
+ /* Read word 0x10 of NVM - SW compatibility word 1 */
+ ret = i40e_aq_read_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word), &nvm_word,
+ false, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+
+ /* Wait a bit for NVM release to complete */
+ msleep(50);
+
+ /* Acquire NVM for write access */
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
+ last_aq_status = pf->hw.aq.asq_last_status;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot acquire NVM for write access, err %d: aq_err %d\n",
+ ret, last_aq_status);
+ goto bw_commit_out;
+ }
+ /* Write it back out unchanged to initiate update NVM,
+ * which will force a write of the shadow (alt) RAM to
+ * the NVM - thus storing the bandwidth values permanently.
+ */
+ ret = i40e_aq_update_nvm(&pf->hw,
+ I40E_SR_NVM_CONTROL_WORD,
+ 0x10, sizeof(nvm_word),
+ &nvm_word, true, NULL);
+ /* Save off last admin queue command status before releasing
+ * the NVM
+ */
+ last_aq_status = pf->hw.aq.asq_last_status;
+ i40e_release_nvm(&pf->hw);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "BW settings NOT SAVED, err %d aq_err %d\n",
+ ret, last_aq_status);
+bw_commit_out:
+
+ return ret;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -7299,8 +7634,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_1BUF_ENABLED;
+ I40E_FLAG_MSIX_ENABLED;
+
+ if (iommu_present(&pci_bus_type))
+ pf->flags |= I40E_FLAG_RX_PS_ENABLED;
+ else
+ pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
@@ -7311,6 +7650,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
*/
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
pf->rss_size = 1;
+ pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
@@ -7322,6 +7662,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
pf->flags |= I40E_FLAG_MFP_ENABLED;
dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ if (i40e_get_npar_bw_setting(pf))
+ dev_warn(&pf->pdev->dev,
+ "Could not get NPAR bw settings\n");
+ else
+ dev_info(&pf->pdev->dev,
+ "Min BW = %8.8x, Max BW = %8.8x\n",
+ pf->npar_min_bw, pf->npar_max_bw);
}
/* FW/NVM is not yet fixed in this regard */
@@ -7329,11 +7676,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per pf */
+ /* Setup a counter for fd_atr per PF */
pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per pf */
+ /* Setup a counter for fd_sb per PF */
pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
@@ -7381,22 +7728,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
pf->qp_pile->search_hint = 0;
- /* set up vector assignment tracking */
- size = sizeof(struct i40e_lump_tracking)
- + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
- pf->irq_pile = kzalloc(size, GFP_KERNEL);
- if (!pf->irq_pile) {
- kfree(pf->qp_pile);
- err = -ENOMEM;
- goto sw_init_done;
- }
- pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
- pf->irq_pile->search_hint = 0;
-
pf->tx_timeout_recovery_level = 1;
mutex_init(&pf->switch_mutex);
+ /* If NPAR is enabled nudge the Tx scheduler */
+ if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
+ i40e_set_npar_bw_setting(pf);
+
sw_init_done:
return err;
}
@@ -7509,7 +7848,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ netdev_info(netdev, "vxlan port %d already offloaded\n",
+ ntohs(port));
return;
}
@@ -7517,7 +7857,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
next_idx = i40e_get_vxlan_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
ntohs(port));
return;
}
@@ -7525,8 +7865,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* New port: add it and mark its index in the bitmap */
pf->vxlan_ports[next_idx] = port;
pf->pending_vxlan_bitmap |= (1 << next_idx);
-
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
}
/**
@@ -7554,12 +7895,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
* and make it pending
*/
pf->vxlan_ports[idx] = 0;
-
pf->pending_vxlan_bitmap |= (1 << idx);
-
pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
+ ntohs(port));
} else {
- netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port));
}
}
@@ -7628,6 +7970,118 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
+#ifdef HAVE_BRIDGE_ATTRIBS
+/**
+ * i40e_ndo_bridge_setlink - Set the hardware bridge mode
+ * @dev: the netdev being configured
+ * @nlh: RTNL message
+ *
+ * Inserts a new hardware bridge if not already created and
+ * enables the bridging mode requested (VEB or VEPA). If the
+ * hardware bridge has already been inserted and the request
+ * is to change the mode then that requires a PF reset to
+ * allow rebuild of the components with required hardware
+ * bridge mode enabled.
+ **/
+static int i40e_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ struct nlattr *attr, *br_spec;
+ int i, rem;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if ((mode != BRIDGE_MODE_VEPA) &&
+ (mode != BRIDGE_MODE_VEB))
+ return -EINVAL;
+
+ /* Insert a new HW bridge */
+ if (!veb) {
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ if (veb) {
+ veb->bridge_mode = mode;
+ i40e_config_bridge_mode(veb);
+ } else {
+ /* No Bridge HW offload available */
+ return -ENOENT;
+ }
+ break;
+ } else if (mode != veb->bridge_mode) {
+ /* Existing HW bridge but different mode needs reset */
+ veb->bridge_mode = mode;
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ndo_bridge_getlink - Get the hardware bridge mode
+ * @skb: skb buff
+ * @pid: process id
+ * @seq: RTNL message seq #
+ * @dev: the netdev being configured
+ * @filter_mask: unused
+ *
+ * Return the mode in which the hardware bridge is operating in
+ * i.e VEB or VEPA.
+ **/
+#ifdef HAVE_BRIDGE_FILTER
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 __always_unused filter_mask)
+#else
+static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+#endif /* HAVE_BRIDGE_FILTER */
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_veb *veb = NULL;
+ int i;
+
+ /* Only for PF VSI for now */
+ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ return -EOPNOTSUPP;
+
+ /* Find the HW bridge for the PF VSI */
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+
+ if (!veb)
+ return 0;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -7662,6 +8116,10 @@ static const struct net_device_ops i40e_netdev_ops = {
#endif
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
+ .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
};
/**
@@ -7774,6 +8232,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
}
/**
+ * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
+ * @vsi: the VSI being queried
+ *
+ * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
+ **/
+int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
+{
+ struct i40e_veb *veb;
+ struct i40e_pf *pf = vsi->back;
+
+ /* Uplink is not a bridge so default to VEB */
+ if (vsi->veb_idx == I40E_NO_VEB)
+ return 1;
+
+ veb = pf->veb[vsi->veb_idx];
+ /* Uplink is a bridge in VEPA mode */
+ if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
+ return 0;
+
+ /* Uplink is a bridge in VEB mode */
+ return 1;
+}
+
+/**
* i40e_add_vsi - Add a VSI to the switch
* @vsi: the VSI being configured
*
@@ -7805,11 +8287,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get pf vsi config, err %d, aq_err %d\n",
+ "couldn't get PF vsi config, err %d, aq_err %d\n",
ret, pf->hw.aq.asq_last_status);
return -ENOENT;
}
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info = ctxt.info;
vsi->info.valid_sections = 0;
vsi->seid = ctxt.seid;
@@ -7858,12 +8340,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections |=
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id =
+ ctxt.info.switch_id =
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -7871,16 +8355,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
- ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
/* This VSI is connected to VEB so the switch_id
* should be set to zero by default.
*/
- ctxt.info.switch_id = 0;
- ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
@@ -7890,15 +8376,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.pf_num = hw->pf_id;
ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
ctxt.uplink_seid = vsi->uplink_seid;
- ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
- ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
-
/* This VSI is connected to VEB so the switch_id
* should be set to zero by default.
*/
- ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+ }
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
@@ -7936,7 +8425,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = -ENOENT;
goto err;
}
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info = ctxt.info;
vsi->info.valid_sections = 0;
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
@@ -8256,7 +8745,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__);
return NULL;
}
- i40e_enable_pf_switch_lb(pf);
+ i40e_config_bridge_mode(veb);
}
for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
@@ -8699,7 +9188,7 @@ err_alloc:
}
/**
- * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * i40e_setup_pf_switch_element - set PF vars based on switch type
* @pf: board private structure
* @ele: element we are building info from
* @num_reported: total number of elements
@@ -8905,15 +9394,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_config_rss(pf);
/* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw, true);
- i40e_link_event(pf);
-
- /* Initialize user-specific link properties */
- pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
- I40E_AQ_AN_COMPLETED) ? true : false);
-
- /* fill in link information and enable LSE reporting */
- i40e_update_link_info(&pf->hw, true);
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
/* Initialize user-specific link properties */
@@ -8983,7 +9464,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
- pf->num_lan_qps = pf->rss_size_max;
+ pf->num_lan_qps = max_t(int, pf->rss_size_max,
+ num_online_cpus());
+ pf->num_lan_qps = min_t(int, pf->num_lan_qps,
+ pf->hw.func_caps.num_tx_qp);
+
queues_left -= pf->num_lan_qps;
}
@@ -9036,7 +9521,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
* i40e_setup_pf_filter_control - Setup PF static filter control
* @pf: PF to be setup
*
- * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * i40e_setup_pf_filter_control sets up a PF's initial filter control
* settings. If PE/FCoE are enabled then it will also set the per PF
* based filter sizes required for them. It also enables Flow director,
* ethertype and macvlan type filter settings for the pf.
@@ -9081,8 +9566,10 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
#endif
- buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
+ pf->hw.func_caps.num_vsis,
+ pf->vsi[pf->lan_vsi]->num_queue_pairs,
+ pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
@@ -9111,14 +9598,16 @@ static void i40e_print_features(struct i40e_pf *pf)
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
*
- * i40e_probe initializes a pf identified by a pci_dev structure.
- * The OS initialization, configuring of the pf private structure,
+ * i40e_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
* and a hardware reset occur.
*
* Returns 0 on success, negative on failure
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ unsigned long ioremap_len;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
@@ -9170,8 +9659,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &pf->hw;
hw->back = pf;
- hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+
+ ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
+ I40E_MAX_CSR_SPACE);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
if (!hw->hw_addr) {
err = -EIO;
dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
@@ -9249,7 +9741,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
-
i40e_verify_eeprom(pf);
/* Rev 0 hardware was never productized */
@@ -9342,7 +9833,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
- i40e_init_interrupt_scheme(pf);
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
@@ -9384,13 +9877,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
- msleep(75);
- err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
- if (err) {
- dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4)) {
+ msleep(75);
+ err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
}
-
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
* ends up disabled forever.
@@ -9474,6 +9968,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
}
+ /* get the requested speeds from the fw */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ if (err)
+ dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
+ err);
+ pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -9492,7 +9993,6 @@ err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
err_init_lan_hmc:
kfree(pf->qp_pile);
- kfree(pf->irq_pile);
err_sw_init:
err_adminq_setup:
(void)i40e_shutdown_adminq(hw);
@@ -9533,6 +10033,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -9559,12 +10060,6 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
-
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9597,7 +10092,6 @@ static void i40e_remove(struct pci_dev *pdev)
}
kfree(pf->qp_pile);
- kfree(pf->irq_pile);
kfree(pf->vsi);
iounmap(pf->hw.hw_addr);
@@ -9718,6 +10212,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_clear_interrupt_scheme(pf);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
@@ -9738,6 +10234,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
+
rtnl_lock();
i40e_prep_for_reset(pf);
rtnl_unlock();
@@ -9822,6 +10320,7 @@ static int __init i40e_init_module(void)
pr_info("%s: %s - version %s\n", i40e_driver_name,
i40e_driver_string, i40e_driver_version_str);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+
i40e_dbg_init();
return pci_register_driver(&i40e_driver);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..554e49d02683 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
}
/**
- * i40e_read_nvm_word - Reads Shadow RAM
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
i40e_status ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
@@ -212,7 +212,21 @@ read_nvm_exit:
}
/**
- * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
@@ -222,8 +236,8 @@ read_nvm_exit:
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
i40e_status ret_code = 0;
u16 index, word;
@@ -231,7 +245,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
/* Loop thru the selected region */
for (word = 0; word < *words; word++) {
index = offset + word;
- ret_code = i40e_read_nvm_word(hw, index, &data[word]);
+ ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
if (ret_code)
break;
}
@@ -243,6 +257,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
@@ -302,11 +333,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
u16 *checksum)
{
i40e_status ret_code = 0;
+ struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
- u16 word = 0;
- u32 i = 0;
+ u16 *data;
+ u16 i = 0;
+
+ ret_code = i40e_allocate_virt_mem(hw, &vmem,
+ I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+ if (ret_code)
+ goto i40e_calc_nvm_checksum_exit;
+ data = (u16 *)vmem.va;
/* read pointer to VPD area */
ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
@@ -317,7 +355,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
/* read pointer to PCIe Alt Auto-load module */
ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
- &pcie_alt_module);
+ &pcie_alt_module);
if (ret_code) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
@@ -327,33 +365,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
* except the VPD and PCIe ALT Auto-load modules
*/
for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Read SR page */
+ if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+ ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ }
+
/* Skip Checksum word */
if (i == I40E_SR_SW_CHECKSUM_WORD)
- i++;
+ continue;
/* Skip VPD module (convert byte size to word count) */
- if (i == (u32)vpd_module) {
- i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
- if (i >= hw->nvm.sr_size)
- break;
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module +
+ (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+ continue;
}
/* Skip PCIe ALT module (convert byte size to word count) */
- if (i == (u32)pcie_alt_module) {
- i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
- if (i >= hw->nvm.sr_size)
- break;
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module +
+ (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+ continue;
}
- ret_code = i40e_read_nvm_word(hw, (u16)i, &word);
- if (ret_code) {
- ret_code = I40E_ERR_NVM_CHECKSUM;
- goto i40e_calc_nvm_checksum_exit;
- }
- checksum_local += word;
+ checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
}
*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
i40e_calc_nvm_checksum_exit:
+ i40e_free_virt_mem(hw, &vmem);
return ret_code;
}
@@ -679,9 +724,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +772,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*errno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ i40e_status old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
@@ -741,13 +821,12 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
int *errno)
{
enum i40e_nvmupd_cmd upd_cmd;
- u8 transaction, module;
+ u8 transaction;
/* anything that doesn't match a recognized case is an error */
upd_cmd = I40E_NVMUPD_INVALID;
transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
/* limits on data size */
if ((cmd->data_size < 1) ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 68e852a96680..7b34f1e660ea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
@@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
bool enable_lse, struct i40e_link_status *link,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
u64 advt_reg,
struct i40e_asq_cmd_details *cmd_details);
@@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
@@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
- u16 *data);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
@@ -299,4 +303,9 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
u16 vsi_seid, u16 queue, bool is_add,
struct i40e_control_filter_stats *stats,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index fabcfa1b45b2..a92b7725dec3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -57,7 +57,7 @@
* timespec. However, since the registers are 64 bits of nanoseconds, we must
* convert the result to a timespec before we can return.
**/
-static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
{
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
@@ -69,7 +69,7 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
ns = (((u64)hi) << 32) | lo;
- *ts = ns_to_timespec(ns);
+ *ts = ns_to_timespec64(ns);
}
/**
@@ -81,10 +81,10 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts)
* we receive a timespec from the stack, we must convert that timespec into
* nanoseconds before programming the registers.
**/
-static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts)
+static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts)
{
struct i40e_hw *hw = &pf->hw;
- u64 ns = timespec_to_ns(ts);
+ u64 ns = timespec64_to_ns(ts);
/* The timer will not update until the high register is written, so
* write the low register first.
@@ -159,14 +159,14 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec now, then = ns_to_timespec(delta);
+ struct timespec64 now, then = ns_to_timespec64(delta);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
- now = timespec_add(now, then);
- i40e_ptp_write(pf, (const struct timespec *)&now);
+ now = timespec64_add(now, then);
+ i40e_ptp_write(pf, (const struct timespec64 *)&now);
spin_unlock_irqrestore(&pf->tmreg_lock, flags);
@@ -181,7 +181,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
* Read the device clock and return the correct value on ns, after converting it
* into a timespec struct.
**/
-static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
@@ -202,7 +202,7 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* to ns happens in the write function.
**/
static int i40e_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
@@ -613,8 +613,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
- pf->ptp_caps.gettime = i40e_ptp_gettime;
- pf->ptp_caps.settime = i40e_ptp_settime;
+ pf->ptp_caps.gettime64 = i40e_ptp_gettime;
+ pf->ptp_caps.settime64 = i40e_ptp_settime;
pf->ptp_caps.enable = i40e_ptp_feature_enable;
/* Attempt to register the clock before enabling the hardware. */
@@ -673,7 +673,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
} else {
- struct timespec ts;
+ struct timespec64 ts;
u32 regval;
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
@@ -695,7 +695,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
/* Set the clock value. */
- ts = ktime_to_timespec(ktime_get_real());
+ ts = ktime_to_timespec64(ktime_get_real());
i40e_ptp_settime(&pf->ptp_caps, &ts);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 65d3c8bb2d5b..522d6df51330 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -310,6 +310,10 @@
#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
@@ -421,6 +425,8 @@
#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
@@ -484,7 +490,9 @@
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
@@ -548,9 +556,6 @@
#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
@@ -1066,7 +1071,7 @@
#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
@@ -1171,7 +1176,7 @@
#define I40E_VFINT_ITRN_MAX_INDEX 2
#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
@@ -1803,9 +1808,6 @@
#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
@@ -1902,6 +1904,11 @@
#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
@@ -2374,20 +2381,20 @@
#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
@@ -2620,10 +2627,6 @@
#define I40E_GLPRT_TDOLD_MAX_INDEX 3
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDPC_MAX_INDEX 3
-#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_UPRCH_MAX_INDEX 3
#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
@@ -2990,9 +2993,6 @@
#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
@@ -3258,7 +3258,7 @@
#define I40E_VFINT_ITRN1_MAX_INDEX 2
#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..4bd3a80aba82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
******************************************************************************/
#include <linux/prefetch.h>
+#include <net/busy_poll.h>
#include "i40e.h"
#include "i40e_prototype.h"
@@ -44,7 +45,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* i40e_program_fdir_filter - Program a Flow Director filter
* @fdir_data: Packet data that will be filter parameters
* @raw_packet: the pre-allocated packet buffer for FDir
- * @pf: The pf pointer
+ * @pf: The PF pointer
* @add: True for add/update, False for remove
**/
int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
@@ -227,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
err = true;
- } else {
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
"Filter OK for PCTYPE %d loc = %d\n",
@@ -302,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
err = true;
- } else {
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
fd_data->pctype, fd_data->fd_id);
@@ -375,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
err = true;
- } else {
+ } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
dev_info(&pf->pdev->dev,
"Filter OK for PCTYPE %d loc = %d\n",
@@ -470,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
rx_desc->wb.qword0.hi_dword.fd_id);
+ /* Check if the programming error is for ATR.
+ * If so, auto disable ATR and set a state for
+ * flush in progress. Next time we come here if flush is in
+ * progress do nothing, once flush is complete the state will
+ * be cleared.
+ */
+ if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
+ return;
+
pf->fd_add_err++;
/* store the current atr filter count */
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
+ if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
+ (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+ }
+
/* filter programming failed most likely due to table full */
- fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+ fcnt_prog = i40e_get_global_fd_count(pf);
fcnt_avail = pf->fdir_pf_filter_count;
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
@@ -586,6 +602,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -594,10 +624,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -606,6 +642,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
@@ -623,41 +661,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending < I40E_MIN_DESC_PENDING) &&
- (tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -748,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_desc = I40E_TX_DESC(tx_ring, 0);
}
+ prefetch(tx_desc);
+
/* update budget accounting */
budget--;
} while (likely(budget));
@@ -835,6 +859,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
/* allow 00 to be written to the index */
@@ -1025,6 +1050,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
+ if (ring_is_ps_enabled(rx_ring)) {
+ int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
+
+ rx_bi = &rx_ring->rx_bi[0];
+ if (rx_bi->hdr_buf) {
+ dma_free_coherent(dev,
+ bufsz,
+ rx_bi->hdr_buf,
+ rx_bi->dma);
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ rx_bi->dma = 0;
+ rx_bi->hdr_buf = NULL;
+ }
+ }
+ }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
rx_bi = &rx_ring->rx_bi[i];
@@ -1083,6 +1124,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
+ * i40e_alloc_rx_headers - allocate rx header buffers
+ * @rx_ring: ring to alloc buffers
+ *
+ * Allocate rx header buffers for the entire ring. As these are static,
+ * this is only called when setting up a new ring.
+ **/
+void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ dma_addr_t dma;
+ void *buffer;
+ int buf_size;
+ int i;
+
+ if (rx_ring->rx_bi[0].hdr_buf)
+ return;
+ /* Make sure the buffers don't cross cache line boundaries. */
+ buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
+ buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
+ &dma, GFP_KERNEL);
+ if (!buffer)
+ return;
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ rx_bi->dma = dma + (i * buf_size);
+ rx_bi->hdr_buf = buffer + (i * buf_size);
+ }
+}
+
+/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1142,11 +1214,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+
+ if (bi->skb) /* desc is in use */
+ goto no_buffers;
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ dma_sync_single_range_for_device(rx_ring->dev,
+ bi->dma,
+ 0,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
**/
-void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
@@ -1186,40 +1323,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
}
}
- if (ring_is_ps_enabled(rx_ring)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- }
-
- if (!bi->page_dma) {
- /* use a half page if we're re-using */
- bi->page_offset ^= PAGE_SIZE / 2;
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- bi->page_dma = 0;
- goto no_buffers;
- }
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- rx_desc->read.hdr_addr = 0;
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
i++;
if (i == rx_ring->count)
i = 0;
@@ -1273,10 +1378,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct iphdr *iph;
__sum16 csum;
- ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
skb->ip_summed = CHECKSUM_NONE;
@@ -1404,13 +1509,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
}
/**
- * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean
* @budget: how many cleans we're allowed
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
@@ -1426,25 +1531,54 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (budget <= 0)
return 0;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
-
- while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
- union i40e_rx_desc *next_rxd;
+ do {
struct i40e_rx_buffer *rx_bi;
struct sk_buff *skb;
u16 vlan_tag;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ i = rx_ring->next_to_clean;
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
if (i40e_rx_is_programming_status(qword)) {
i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
- goto next_desc;
+ I40E_RX_INCREMENT(rx_ring, i);
+ continue;
}
rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb;
- prefetch(skb->data);
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_hdr_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ break;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->dma,
+ 0,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
+ }
rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
@@ -1459,40 +1593,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
+ prefetch(rx_bi->page);
rx_bi->skb = NULL;
-
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * STATUS_DD bit is set
- */
- rmb();
-
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- if (rx_bi->dma) {
- u16 len;
-
+ cleaned_count++;
+ if (rx_hbo || rx_sph) {
+ int len;
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
- else if (rx_sph)
- len = rx_header_len;
- else if (rx_packet_len)
- len = rx_packet_len; /* 1buf/no split found */
else
- len = rx_header_len; /* split always mode */
-
- skb_put(skb, len);
- dma_unmap_single(rx_ring->dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
+ len = rx_header_len;
+ memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
+ } else if (skb->len == 0) {
+ int len;
+
+ len = (rx_packet_len > skb_headlen(skb) ?
+ skb_headlen(skb) : rx_packet_len);
+ memcpy(__skb_put(skb, len),
+ rx_bi->page + rx_bi->page_offset,
+ len);
+ rx_bi->page_offset += len;
+ rx_packet_len -= len;
}
/* Get the rest of the data if this was a header split */
- if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
-
+ if (rx_packet_len) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
rx_bi->page,
rx_bi->page_offset,
@@ -1514,22 +1638,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
}
- I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+ I40E_RX_INCREMENT(rx_ring, i);
if (unlikely(
!(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
struct i40e_rx_buffer *next_buffer;
next_buffer = &rx_ring->rx_bi[i];
-
- if (ring_is_ps_enabled(rx_ring)) {
- rx_bi->skb = next_buffer->skb;
- rx_bi->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- }
+ next_buffer->skb = skb;
rx_ring->rx_stats.non_eop_descs++;
- goto next_desc;
+ continue;
}
/* ERR_MASK will only have valid bits if EOP set */
@@ -1538,7 +1656,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
- goto next_desc;
+ continue;
}
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
@@ -1564,33 +1682,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
#ifdef I40E_FCOE
if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
dev_kfree_skb_any(skb);
- goto next_desc;
+ continue;
}
#endif
+ skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
i40e_receive_skb(rx_ring, skb, vlan_tag);
rx_ring->netdev->last_rx = jiffies;
- budget--;
-next_desc:
rx_desc->wb.qword1.status_error_len = 0;
- if (!budget)
- break;
- cleaned_count++;
+ } while (likely(total_rx_packets < budget));
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ return total_rx_packets;
+}
+
+/**
+ * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns number of packets cleaned
+ **/
+static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u16 rx_packet_len;
+ u8 rx_ptype;
+ u64 qword;
+ u16 i;
+
+ do {
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+ i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
+ i = rx_ring->next_to_clean;
+ rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
- }
+ I40E_RXD_QW1_STATUS_SHIFT;
+
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * DD bit is set.
+ */
+ dma_rmb();
+
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ I40E_RX_INCREMENT(rx_ring, i);
+ continue;
+ }
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_bi->skb = NULL;
+ cleaned_count++;
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ skb_put(skb, rx_packet_len);
+ dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+
+ I40E_RX_INCREMENT(rx_ring, i);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ rx_ring->rx_stats.non_eop_descs++;
+ continue;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
+ continue;
+ }
+
+ skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+ i40e_ptype_to_hash(rx_ptype));
+ if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
+ i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
+ I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+#ifdef I40E_FCOE
+ if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+#endif
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ rx_desc->wb.qword1.status_error_len = 0;
+ } while (likely(total_rx_packets < budget));
- rx_ring->next_to_clean = i;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
@@ -1598,10 +1832,7 @@ next_desc:
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- if (cleaned_count)
- i40e_alloc_rx_buffers(rx_ring, cleaned_count);
-
- return budget > 0;
+ return total_rx_packets;
}
/**
@@ -1622,6 +1853,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
+ int cleaned;
if (test_bit(__I40E_DOWN, &vsi->state)) {
napi_complete(napi);
@@ -1641,8 +1873,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
*/
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
- i40e_for_each_ring(ring, q_vector->rx)
- clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
+ i40e_for_each_ring(ring, q_vector->rx) {
+ if (ring_is_ps_enabled(ring))
+ cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
+ else
+ cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ /* if we didn't clean as many as budgeted, we must be done */
+ clean_complete &= (budget_per_ring != cleaned);
+ }
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
@@ -1709,6 +1947,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
return;
+ if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ return;
+
/* if sampling is disabled do nothing */
if (!tx_ring->atr_sample_rate)
return;
@@ -1816,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
+ if (protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off by the user the
+ * stack sets the protocol to 8021q so that the driver
+ * can take any steps required to support the SW only
+ * VLAN handling. In our case the driver doesn't need
+ * to take any further steps so just set the protocol
+ * to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ goto out;
+ }
+
/* if we have a HW VLAN tag being added, default to the HW one */
if (skb_vlan_tag_present(skb)) {
tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
@@ -1832,6 +2086,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
}
+ if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ goto out;
+
/* Insert 802.1p priority into VLAN header */
if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
(skb->priority != TC_PRIO_CONTROL)) {
@@ -1852,6 +2109,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
}
}
+
+out:
*flags = tx_flags;
return 0;
}
@@ -1976,8 +2235,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
struct iphdr *this_ip_hdr;
u32 network_hdr_len;
u8 l4_hdr = 0;
+ u32 l4_tunnel = 0;
if (skb->encapsulation) {
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ default:
+ return;
+ }
network_hdr_len = skb_inner_network_header_len(skb);
this_ip_hdr = inner_ip_hdr(skb);
this_ipv6_hdr = inner_ipv6_hdr(skb);
@@ -2000,8 +2267,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the ctx descriptor fields */
*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- I40E_TXD_CTX_UDP_TUNNELING |
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ l4_tunnel |
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
@@ -2140,6 +2407,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
+/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -2396,6 +2724,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..4b0b8102cdc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -96,6 +96,14 @@ enum i40e_dyn_idx_t {
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_INCREMENT(r, i) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ r->next_to_clean = i; \
+ } while (0)
+
#define I40E_RX_NEXT_DESC(r, i, n) \
do { \
(i)++; \
@@ -112,6 +120,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
@@ -151,6 +160,7 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
+ void *hdr_buf;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
@@ -223,8 +233,8 @@ struct i40e_ring {
u16 rx_buf_len;
u8 dtype;
#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
-#define I40E_RX_DTYPE_HEADER_SPLIT 2
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
@@ -280,7 +290,9 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+void i40e_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index e9901ef06a63..568e855da0f3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -44,6 +44,7 @@
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
#define I40E_DEV_ID_10G_BASE_T 0x1586
+#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
@@ -175,12 +176,12 @@ struct i40e_link_status {
u8 an_info;
u8 ext_info;
u8 loopback;
- bool an_enabled;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
u16 max_frame_size;
bool crc_enable;
u8 pacing;
+ u8 requested_speeds;
};
struct i40e_phy_info {
@@ -241,6 +242,7 @@ struct i40e_hw_capabilities {
u8 rx_buf_chain_len;
u32 enabled_tcmap;
u32 maxtc;
+ u64 wr_csr_prot;
};
struct i40e_mac_info {
@@ -1143,7 +1145,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
-#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18
#define I40E_SR_NVM_WAKE_ON_LAN 0x19
#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
#define I40E_SR_NVM_EETRACK_LO 0x2D
@@ -1401,6 +1403,19 @@ struct i40e_lldp_variables {
u16 crc8;
};
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK 0xFF
+#define I40E_ALT_BW_RELATIVE_MASK 0x40000000
+#define I40E_ALT_BW_VALID_MASK 0x80000000
+
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 61dd1b187624..2d20af290fbf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -59,31 +59,29 @@
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
-/* VF sends req. to pf for the following
- * ops.
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
- I40E_VIRTCHNL_OP_RESET_VF,
- I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
- I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- I40E_VIRTCHNL_OP_GET_STATS,
- I40E_VIRTCHNL_OP_FCOE,
- I40E_VIRTCHNL_OP_CONFIG_RSS,
-/* PF sends status change events to vfs using
- * the following op.
- */
- I40E_VIRTCHNL_OP_EVENT,
+ I40E_VIRTCHNL_OP_RESET_VF = 2,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+ I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+ I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ I40E_VIRTCHNL_OP_GET_STATS = 15,
+ I40E_VIRTCHNL_OP_FCOE = 16,
+ I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
};
/* Virtual channel message descriptor. This overlays the admin queue
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 40f042af4131..78d1c4ff565e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -26,12 +26,135 @@
#include "i40e.h"
+/*********************notification routines***********************/
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the PF structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg,
+ u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ **/
+static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ if (vf->link_forced) {
+ pfe.event_data.link_event.link_status = vf->link_up;
+ pfe.event_data.link_event.link_speed =
+ (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+ } else {
+ pfe.event_data.link_event.link_status =
+ ls->link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = ls->link_speed;
+ }
+ i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe, sizeof(pfe), NULL);
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the PF structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ i40e_vc_notify_vf_link_state(&pf->vf[i]);
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the VF structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+ int abs_vf_id;
+
+ /* validate the request */
+ if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+ return;
+
+ /* verify if the VF is in either init or active before proceeding */
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
+ !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ return;
+
+ abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
+ 0, (u8 *)&pfe,
+ sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
/***********************misc routines*****************************/
/**
* i40e_vc_disable_vf
- * @pf: pointer to the pf info
- * @vf: pointer to the vf info
+ * @pf: pointer to the PF info
+ * @vf: pointer to the VF info
*
* Disable the VF through a SW reset
**/
@@ -48,38 +171,40 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
/**
* i40e_vc_isvalid_vsi_id
- * @vf: pointer to the vf info
- * @vsi_id: vf relative vsi id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI id
*
- * check for the valid vsi id
+ * check for the valid VSI id
**/
-static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
- return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+ return (vsi && (vsi->vf_id == vf->vf_id));
}
/**
* i40e_vc_isvalid_queue_id
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @vsi_id: vsi id
* @qid: vsi relative queue id
*
* check for the valid queue id
**/
-static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
u8 qid)
{
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
- return qid < pf->vsi[vsi_id]->alloc_queue_pairs;
+ return (vsi && (qid < vsi->alloc_queue_pairs));
}
/**
* i40e_vc_isvalid_vector_id
- * @vf: pointer to the vf info
- * @vector_id: vf relative vector id
+ * @vf: pointer to the VF info
+ * @vector_id: VF relative vector id
*
* check for the valid vector id
**/
@@ -94,19 +219,22 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
/**
* i40e_vc_get_pf_queue_id
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
* @vsi_queue_id: vsi relative queue id
*
- * return pf relative queue id
+ * return PF relative queue id
**/
-static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
u8 vsi_queue_id)
{
struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+ struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ if (!vsi)
+ return pf_queue_id;
+
if (le16_to_cpu(vsi->info.mapping_flags) &
I40E_AQ_VSI_QUE_MAP_NONCONTIG)
pf_queue_id =
@@ -120,13 +248,13 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
/**
* i40e_config_irq_link_list
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as given by the FW
* @vecmap: irq map info
*
* configure irq link list from the map
**/
-static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
struct i40e_virtchnl_vector_map *vecmap)
{
unsigned long linklistmap = 0, tempmap;
@@ -171,7 +299,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
I40E_VIRTCHNL_SUPPORTED_QTYPES));
vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
wr32(hw, reg_idx, reg);
@@ -198,7 +326,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
(I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
vsi_queue_id);
} else {
pf_queue_id = I40E_QUEUE_END_OF_LIST;
@@ -220,25 +348,27 @@ irq_list_done:
/**
* i40e_config_vsi_tx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
* @vsi_queue_id: vsi relative queue index
* @info: config. info
*
* configure tx queue
**/
-static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
struct i40e_virtchnl_txq_info *info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi;
u16 pf_queue_id;
u32 qtx_ctl;
int ret = 0;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+ vsi = i40e_find_vsi_from_id(pf, vsi_id);
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -246,7 +376,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
tx_ctx.qlen = info->ring_len;
- tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
tx_ctx.head_wb_ena = info->headwb_enabled;
tx_ctx.head_wb_addr = info->dma_headwb_addr;
@@ -287,14 +417,14 @@ error_context:
/**
* i40e_config_vsi_rx_queue
- * @vf: pointer to the vf info
- * @vsi_idx: index of VSI in PF struct
+ * @vf: pointer to the VF info
+ * @vsi_id: id of VSI as provided by the FW
* @vsi_queue_id: vsi relative queue index
* @info: config. info
*
* configure rx queue
**/
-static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
struct i40e_virtchnl_rxq_info *info)
{
@@ -304,7 +434,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
u16 pf_queue_id;
int ret = 0;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -378,10 +508,10 @@ error_param:
/**
* i40e_alloc_vsi_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @type: type of VSI to allocate
*
- * alloc vf vsi context & resources
+ * alloc VF vsi context & resources
**/
static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
{
@@ -394,18 +524,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (!vsi) {
dev_err(&pf->pdev->dev,
- "add vsi failed for vf %d, aq_err %d\n",
+ "add vsi failed for VF %d, aq_err %d\n",
vf->vf_id, pf->hw.aq.asq_last_status);
ret = -ENOENT;
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- vf->lan_vsi_index = vsi->idx;
+ vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
- dev_info(&pf->pdev->dev,
- "VF %d assigned LAN VSI index %d, VSI id %d\n",
- vf->vf_id, vsi->idx, vsi->id);
/* If the port VLAN has been configured and then the
* VF driver was removed then the VSI port VLAN
* configuration was destroyed. Check if there is
@@ -446,9 +573,9 @@ error_alloc_vsi_res:
/**
* i40e_enable_vf_mappings
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * enable vf mappings
+ * enable VF mappings
**/
static void i40e_enable_vf_mappings(struct i40e_vf *vf)
{
@@ -469,8 +596,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
/* map PF queues to VF queues */
- for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
total_queue_pairs++;
@@ -478,13 +605,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/* map PF queues to VSI */
for (j = 0; j < 7; j++) {
- if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) {
+ if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
reg = 0x07FF07FF; /* unused */
} else {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
j * 2);
reg = qid;
- qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
(j * 2) + 1);
reg |= qid << 16;
}
@@ -496,9 +623,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/**
* i40e_disable_vf_mappings
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * disable vf mappings
+ * disable VF mappings
**/
static void i40e_disable_vf_mappings(struct i40e_vf *vf)
{
@@ -516,9 +643,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf)
/**
* i40e_free_vf_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * free vf resources
+ * free VF resources
**/
static void i40e_free_vf_res(struct i40e_vf *vf)
{
@@ -528,9 +655,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
int i, msix_vf;
/* free vsi & disconnect it from the parent uplink */
- if (vf->lan_vsi_index) {
- i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
- vf->lan_vsi_index = 0;
+ if (vf->lan_vsi_idx) {
+ i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
+ vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0;
}
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -571,9 +698,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
/**
* i40e_alloc_vf_res
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * allocate vf resources
+ * allocate VF resources
**/
static int i40e_alloc_vf_res(struct i40e_vf *vf)
{
@@ -585,15 +712,15 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
if (ret)
goto error_alloc;
- total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
- * vf req validation
+ * VF req validation
*/
vf->num_queue_pairs = total_queue_pairs;
- /* vf is now completely initialized */
+ /* VF is now completely initialized */
set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
error_alloc:
@@ -607,7 +734,7 @@ error_alloc:
#define VF_TRANS_PENDING_MASK 0x20
/**
* i40e_quiesce_vf_pci
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
*
* Wait for VF PCI transactions to be cleared after reset. Returns -EIO
* if the transactions never clear.
@@ -634,10 +761,10 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
/**
* i40e_reset_vf
- * @vf: pointer to the vf structure
+ * @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * reset the vf
+ * reset the VF
**/
void i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -657,7 +784,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset vf using VPGEN_VFRTRIG reg */
+ /* reset VF using VPGEN_VFRTRIG reg */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -685,6 +812,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
}
}
+ if (flr)
+ usleep_range(10000, 20000);
+
if (!rsd)
dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
@@ -695,12 +825,12 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
/* On initial reset, we won't have any queues */
- if (vf->lan_vsi_index == 0)
+ if (vf->lan_vsi_idx == 0)
goto complete_reset;
- i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false);
+ i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false);
complete_reset:
- /* reallocate vf resources to reset the VSI state */
+ /* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
@@ -713,78 +843,10 @@ complete_reset:
}
/**
- * i40e_enable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * enable switch loop back or die - no point in a return value
- **/
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
-{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- struct i40e_vsi_context ctxt;
- int aq_ret;
-
- ctxt.seid = pf->main_vsi_seid;
- ctxt.pf_num = pf->hw.pf_id;
- ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s couldn't get pf vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
- return;
- }
- ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
- }
-}
-
-/**
- * i40e_disable_pf_switch_lb
- * @pf: pointer to the pf structure
- *
- * disable switch loop back or die - no point in a return value
- **/
-static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
-{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- struct i40e_vsi_context ctxt;
- int aq_ret;
-
- ctxt.seid = pf->main_vsi_seid;
- ctxt.pf_num = pf->hw.pf_id;
- ctxt.vf_num = 0;
- aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s couldn't get pf vsi config, err %d, aq_err %d\n",
- __func__, aq_ret, pf->hw.aq.asq_last_status);
- return;
- }
- ctxt.flags = I40E_AQ_VSI_TYPE_PF;
- ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
-
- aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
- if (aq_ret) {
- dev_info(&pf->pdev->dev,
- "%s: update vsi switch failed, aq_err=%d\n",
- __func__, vsi->back->hw.aq.asq_last_status);
- }
-}
-
-/**
* i40e_free_vfs
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
- * free vf resources
+ * free VF resources
**/
void i40e_free_vfs(struct i40e_pf *pf)
{
@@ -797,16 +859,23 @@ void i40e_free_vfs(struct i40e_pf *pf)
while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
usleep_range(1000, 2000);
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+ false);
+
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
msleep(20); /* let any messages in transit get finished up */
- /* free up vf resources */
+ /* free up VF resources */
tmp = pf->num_alloc_vfs;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
@@ -832,10 +901,6 @@ void i40e_free_vfs(struct i40e_pf *pf)
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
}
- i40e_disable_pf_switch_lb(pf);
- } else {
- dev_warn(&pf->pdev->dev,
- "unable to disable SR-IOV because VFs are assigned.\n");
}
clear_bit(__I40E_VF_DISABLE, &pf->state);
}
@@ -843,10 +908,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
/**
* i40e_alloc_vfs
- * @pf: pointer to the pf structure
- * @num_alloc_vfs: number of vfs to allocate
+ * @pf: pointer to the PF structure
+ * @num_alloc_vfs: number of VFs to allocate
*
- * allocate vf resources
+ * allocate VF resources
**/
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
@@ -883,15 +948,14 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
- /* vf resources get allocated during reset */
+ /* VF resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
- /* enable vf vplan_qtable mappings */
+ /* enable VF vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
pf->num_alloc_vfs = num_alloc_vfs;
- i40e_enable_pf_switch_lb(pf);
err_alloc:
if (ret)
i40e_free_vfs(pf);
@@ -905,7 +969,7 @@ err_iov:
/**
* i40e_pci_sriov_enable
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
*
* Enable or change the number of VFs
**/
@@ -945,7 +1009,7 @@ err_out:
/**
* i40e_pci_sriov_configure
* @pdev: pointer to a pci_dev structure
- * @num_vfs: number of vfs to allocate
+ * @num_vfs: number of VFs to allocate
*
* Enable or change the number of VFs. Called when the user updates the number
* of VFs in sysfs.
@@ -970,13 +1034,13 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
/**
* i40e_vc_send_msg_to_vf
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * send msg to vf
+ * send msg to VF
**/
static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
@@ -1025,11 +1089,11 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/**
* i40e_vc_send_resp_to_vf
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @opcode: operation code
* @retval: return value
*
- * send resp msg to vf
+ * send resp msg to VF
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum i40e_virtchnl_ops opcode,
@@ -1040,9 +1104,9 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
/**
* i40e_vc_get_version_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
*
- * called from the vf to request the API version used by the PF
+ * called from the VF to request the API version used by the PF
**/
static int i40e_vc_get_version_msg(struct i40e_vf *vf)
{
@@ -1058,11 +1122,11 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
/**
* i40e_vc_get_vf_resources_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to request its resources
+ * called from the VF to request its resources
**/
static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
{
@@ -1090,18 +1154,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
}
vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
- if (vf->lan_vsi_index) {
- vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+ if (vf->lan_vsi_idx) {
+ vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
vfres->vsi_res[i].num_queue_pairs =
- pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs;
+ pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
memcpy(vfres->vsi_res[i].default_mac_addr,
vf->default_lan_addr.addr, ETH_ALEN);
i++;
@@ -1109,7 +1173,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
err:
- /* send the response back to the vf */
+ /* send the response back to the VF */
ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
aq_ret, (u8 *)vfres, len);
@@ -1119,13 +1183,13 @@ err:
/**
* i40e_vc_reset_vf_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to reset itself,
- * unlike other virtchnl messages, pf driver
- * doesn't send the response back to the vf
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
**/
static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
{
@@ -1135,12 +1199,12 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
/**
* i40e_vc_config_promiscuous_mode_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to configure the promiscuous mode of
- * vf vsis
+ * called from the VF to configure the promiscuous mode of
+ * VF vsis
**/
static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
u8 *msg, u16 msglen)
@@ -1153,21 +1217,21 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
bool allmulti = false;
i40e_status aq_ret;
+ vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+ (vsi->type != I40E_VSI_FCOE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi = pf->vsi[info->vsi_id];
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
allmulti, NULL);
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
aq_ret);
@@ -1175,11 +1239,11 @@ error_param:
/**
* i40e_vc_config_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to configure the rx/tx
+ * called from the VF to configure the rx/tx
* queues
**/
static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1221,22 +1285,22 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
- /* set vsi num_queue_pairs in use to num configured by vf */
- pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs;
+ /* set vsi num_queue_pairs in use to num configured by VF */
+ pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
aq_ret);
}
/**
* i40e_vc_config_irq_map_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to configure the irq to
+ * called from the VF to configure the irq to
* queue map
**/
static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1288,18 +1352,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_config_irq_link_list(vf, vsi_id, map);
}
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
aq_ret);
}
/**
* i40e_vc_enable_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to enable all or specific queue(s)
+ * called from the VF to enable all or specific queue(s)
**/
static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
@@ -1323,21 +1387,22 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vsi_id], true))
+
+ if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
aq_ret);
}
/**
* i40e_vc_disable_queues_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to disable all or specific
+ * called from the VF to disable all or specific
* queue(s)
**/
static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
@@ -1345,7 +1410,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_virtchnl_queue_select *vqs =
(struct i40e_virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
@@ -1362,22 +1426,23 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (i40e_vsi_control_rings(pf->vsi[vsi_id], false))
+
+ if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false))
aq_ret = I40E_ERR_TIMEOUT;
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
aq_ret);
}
/**
* i40e_vc_get_stats_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
- * called from the vf to get vsi stats
+ * called from the VF to get vsi stats
**/
static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
@@ -1400,7 +1465,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- vsi = pf->vsi[vqs->vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1409,14 +1474,14 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
stats = vsi->eth_stats;
error_param:
- /* send the response back to the vf */
+ /* send the response back to the VF */
return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
(u8 *)&stats, sizeof(stats));
}
/**
* i40e_check_vf_permission
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @macaddr: pointer to the MAC Address being checked
*
* Check if the VF has permission to add or delete unicast MAC address
@@ -1450,7 +1515,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
/**
* i40e_vc_add_mac_addr_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1478,7 +1543,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (ret)
goto error_param;
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
/* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) {
@@ -1507,14 +1572,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
ret);
}
/**
* i40e_vc_del_mac_addr_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1546,7 +1611,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
/* delete addresses from the list */
for (i = 0; i < al->num_elements; i++)
@@ -1558,14 +1623,14 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
ret);
}
/**
* i40e_vc_add_vlan_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1596,7 +1661,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1613,13 +1678,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
}
/**
* i40e_vc_remove_vlan_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
@@ -1649,7 +1714,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
}
- vsi = pf->vsi[vsi_id];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1664,13 +1729,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
}
error_param:
- /* send the response to the vf */
+ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
}
/**
* i40e_vc_validate_vf_msg
- * @vf: pointer to the vf info
+ * @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
* @msghndl: msg handle
@@ -1776,14 +1841,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
/**
* i40e_vc_process_vf_msg
- * @pf: pointer to the pf structure
- * @vf_id: source vf id
+ * @pf: pointer to the PF structure
+ * @vf_id: source VF id
* @msg: pointer to the msg buffer
* @msglen: msg length
* @msghndl: msg handle
*
* called from the common aeq/arq handler to
- * process request from vf
+ * process request from VF
**/
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
@@ -1801,7 +1866,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
- dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
+ dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
local_vf_id, v_opcode, msglen);
return ret;
}
@@ -1828,6 +1893,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ i40e_vc_notify_vf_link_state(vf);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
@@ -1849,7 +1915,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
- dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
+ dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
@@ -1861,10 +1927,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
/**
* i40e_vc_process_vflr_event
- * @pf: pointer to the pf structure
+ * @pf: pointer to the PF structure
*
* called from the vlfr irq handler to
- * free up vf resources and state variables
+ * free up VF resources and state variables
**/
int i40e_vc_process_vflr_event(struct i40e_pf *pf)
{
@@ -1885,7 +1951,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & (1 << bit_idx)) {
@@ -1901,124 +1967,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
}
/**
- * i40e_vc_vf_broadcast
- * @pf: pointer to the pf structure
- * @opcode: operation code
- * @retval: return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send a message to all VFs on a given PF
- **/
-static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
- enum i40e_virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg,
- u16 msglen)
-{
- struct i40e_hw *hw = &pf->hw;
- struct i40e_vf *vf = pf->vf;
- int i;
-
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* Not all vfs are enabled so skip the ones that are not */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- continue;
-
- /* Ignore return value on purpose - a given VF may fail, but
- * we need to keep going and send to all of them
- */
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
- }
-}
-
-/**
- * i40e_vc_notify_link_state
- * @pf: pointer to the pf structure
- *
- * send a link status message to all VFs on a given PF
- **/
-void i40e_vc_notify_link_state(struct i40e_pf *pf)
-{
- struct i40e_virtchnl_pf_event pfe;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_vf *vf = pf->vf;
- struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int i;
-
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
- for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- if (vf->link_forced) {
- pfe.event_data.link_event.link_status = vf->link_up;
- pfe.event_data.link_event.link_speed =
- (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
- } else {
- pfe.event_data.link_event.link_status =
- ls->link_info & I40E_AQ_LINK_UP;
- pfe.event_data.link_event.link_speed = ls->link_speed;
- }
- i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
- 0, (u8 *)&pfe, sizeof(pfe),
- NULL);
- }
-}
-
-/**
- * i40e_vc_notify_reset
- * @pf: pointer to the pf structure
- *
- * indicate a pending reset to all VFs on a given PF
- **/
-void i40e_vc_notify_reset(struct i40e_pf *pf)
-{
- struct i40e_virtchnl_pf_event pfe;
-
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
- (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
-}
-
-/**
- * i40e_vc_notify_vf_reset
- * @vf: pointer to the vf structure
- *
- * indicate a pending reset to the given VF
- **/
-void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
-{
- struct i40e_virtchnl_pf_event pfe;
- int abs_vf_id;
-
- /* validate the request */
- if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
- return;
-
- /* verify if the VF is in either init or active before proceeding */
- if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
- !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
- return;
-
- abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
-
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
- i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (u8 *)&pfe,
- sizeof(struct i40e_virtchnl_pf_event), NULL);
-}
-
-/**
* i40e_ndo_set_vf_mac
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @mac: mac address
*
- * program vf mac address
+ * program VF mac address
**/
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
@@ -2038,7 +1992,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev,
"Uninitialized VF %d\n", vf_id);
@@ -2083,11 +2037,11 @@ error_param:
/**
* i40e_ndo_set_vf_port_vlan
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @vlan_id: mac address
* @qos: priority setting
*
- * program vf vlan id and/or qos
+ * program VF vlan id and/or qos
**/
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos)
@@ -2112,7 +2066,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
}
vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
ret = -EINVAL;
@@ -2196,10 +2150,10 @@ error_pvid:
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
- * @vf_id: vf identifier
- * @tx_rate: tx rate
+ * @vf_id: VF identifier
+ * @tx_rate: Tx rate
*
- * configure vf tx rate
+ * configure VF Tx rate
**/
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate)
@@ -2219,13 +2173,13 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (min_tx_rate) {
- dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
min_tx_rate, vf_id);
return -EINVAL;
}
vf = &(pf->vf[vf_id]);
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
ret = -EINVAL;
@@ -2247,7 +2201,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
}
if (max_tx_rate > speed) {
- dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.",
max_tx_rate, vf->vf_id);
ret = -EINVAL;
goto error;
@@ -2276,10 +2230,10 @@ error:
/**
* i40e_ndo_get_vf_config
* @netdev: network interface device structure
- * @vf_id: vf identifier
- * @ivi: vf configuration structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
*
- * return vf configuration
+ * return VF configuration
**/
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi)
@@ -2299,7 +2253,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
vf = &(pf->vf[vf_id]);
/* first vsi is always the LAN vsi */
- vsi = pf->vsi[vf->lan_vsi_index];
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
ret = -EINVAL;
@@ -2331,7 +2285,7 @@ error_param:
/**
* i40e_ndo_set_vf_link_state
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @link: required link state
*
* Set the link state of a specified VF, regardless of physical link state
@@ -2394,7 +2348,7 @@ error_out:
/**
* i40e_ndo_set_vf_spoofchk
* @netdev: network interface device structure
- * @vf_id: vf identifier
+ * @vf_id: VF identifier
* @enable: flag to enable or disable feature
*
* Enable or disable VF spoof checking
@@ -2423,11 +2377,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
vf->spoofchk = enable;
memset(&ctxt, 0, sizeof(ctxt));
- ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+ ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
ctxt.pf_num = pf->hw.pf_id;
ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
if (enable)
- ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 9452f5247cff..09043c1aae54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -71,12 +71,12 @@ enum i40e_vf_capabilities {
struct i40e_vf {
struct i40e_pf *pf;
- /* vf id in the pf space */
+ /* VF id in the PF space */
u16 vf_id;
- /* all vf vsis connect to the same parent */
+ /* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
- /* vf Port Extender (PE) stag if used */
+ /* VF Port Extender (PE) stag if used */
u16 stag;
struct i40e_virtchnl_ether_addr default_lan_addr;
@@ -88,10 +88,10 @@ struct i40e_vf {
* When assigned, these will be non-zero, because VSI 0 is always
* the main LAN VSI for the PF.
*/
- u8 lan_vsi_index; /* index into PF struct */
+ u8 lan_vsi_idx; /* index into PF struct */
u8 lan_vsi_id; /* ID as used by firmware */
- u8 num_queue_pairs; /* num of qps assigned to vf vsis */
+ u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u64 num_mdd_events; /* num of mdd events detected */
u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
u64 num_valid_msgs; /* num of valid msgs detected */
@@ -100,7 +100,7 @@ struct i40e_vf {
unsigned long vf_states; /* vf's runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
- bool link_up; /* only valid if vf link is forced */
+ bool link_up; /* only valid if VF link is forced */
bool spoofchk;
};
@@ -113,7 +113,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
-/* vf configuration related iplink handlers */
+/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
@@ -126,6 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
-void i40e_enable_pf_switch_lb(struct i40e_pf *pf);
#endif /* _I40E_VIRTCHNL_PF_H_ */