diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
62 files changed, 6707 insertions, 3542 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 24f3986cfae2..83140cbb5f01 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3136,12 +3136,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, * packets may get corrupted during padding by HW. * To WA this issue, pad all small packets manually. */ - if (skb->len < ETH_ZLEN) { - if (skb_pad(skb, ETH_ZLEN - skb->len)) - return NETDEV_TX_OK; - skb->len = ETH_ZLEN; - skb_set_tail_pointer(skb, ETH_ZLEN); - } + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; /* The controller does a simple calculation to @@ -4104,7 +4100,7 @@ static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, unsigned int bufsz) { - struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz); + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); if (unlikely(!skb)) adapter->alloc_rx_buff_failed++; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 952ef7c434e8..e14fd85f64eb 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1016,7 +1016,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, */ if (length < copybreak) { struct sk_buff *new_skb = - netdev_alloc_skb_ip_align(netdev, length); + napi_alloc_skb(&adapter->napi, length); if (new_skb) { skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, @@ -3449,15 +3449,12 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; + u32 rss_key[10]; int i; - static const u32 rsskey[10] = { - 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, - 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe - }; - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i < 10; i++) - ew32(RSSRK(i), rsskey[i]); + ew32(RSSRK(i), rss_key[i]); /* Direct all traffic to queue 0 */ for (i = 0; i < 32; i++) @@ -5557,12 +5554,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 bytes so * pad skb in order to meet this minimum size requirement */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; mss = skb_shinfo(skb)->gso_size; if (mss) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2d04464e6aa3..651f53bc7376 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -916,11 +916,15 @@ static u32 fm10k_get_rssrk_size(struct net_device *netdev) return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } -static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) +static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct fm10k_intfc *interface = netdev_priv(netdev); int i, err; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + err = fm10k_get_reta(netdev, indir); if (err || !key) return err; @@ -932,12 +936,16 @@ static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key) } static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_hw *hw = &interface->hw; int i, err; + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + err = fm10k_set_reta(netdev, indir); if (err || !key) return err; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index e645af412e76..ee1ecb146df7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -83,7 +83,7 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, return true; /* alloc new page for storage */ - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; @@ -308,8 +308,8 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - FM10K_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + FM10K_RX_HDR_LEN); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_failed++; return NULL; @@ -578,14 +578,9 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, if (skb_is_nonlinear(skb)) fm10k_pull_tail(rx_ring, rx_desc, skb); - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index a0cb74ab3dc6..4f5892cc32d7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1551,15 +1551,11 @@ void fm10k_down(struct fm10k_intfc *interface) static int fm10k_sw_init(struct fm10k_intfc *interface, const struct pci_device_id *ent) { - static const u32 seed[FM10K_RSSRK_SIZE] = { 0xda565a6d, 0xc20e5b25, - 0x3d256741, 0xb08fa343, - 0xcb2bcad0, 0xb4307bae, - 0xa32dcb77, 0x0cf23080, - 0x3bb7426a, 0xfa01acbe }; const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; struct fm10k_hw *hw = &interface->hw; struct pci_dev *pdev = interface->pdev; struct net_device *netdev = interface->netdev; + u32 rss_key[FM10K_RSSRK_SIZE]; unsigned int rss; int err; @@ -1673,8 +1669,8 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, /* initialize vxlan_port list */ INIT_LIST_HEAD(&interface->vxlan_port); - /* initialize RSS key */ - memcpy(interface->rssrk, seed, sizeof(seed)); + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + memcpy(interface->rssrk, rss_key, sizeof(rss_key)); /* Start off interface as being down */ set_bit(__FM10K_DOWN, &interface->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f1e33f896439..fc50f6461b13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -87,7 +87,7 @@ #define I40E_MINIMUM_FCOE 1 /* minimum number of QPs for FCoE */ #endif /* I40E_FCOE */ #define I40E_MAX_AQ_BUF_SIZE 4096 -#define I40E_AQ_LEN 32 +#define I40E_AQ_LEN 128 #define I40E_AQ_WORK_LIMIT 16 #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_MSG_ENABLE 4 @@ -146,6 +146,7 @@ enum i40e_state_t { __I40E_DOWN_REQUESTED, __I40E_FD_FLUSH_REQUESTED, __I40E_RESET_FAILED, + __I40E_PORT_TX_SUSPENDED, }; enum i40e_interrupt_policy { @@ -269,7 +270,8 @@ struct i40e_pf { u16 msg_enable; char misc_int_name[IFNAMSIZ + 9]; u16 adminq_work_limit; /* num of admin receive queue desc to process */ - int service_timer_period; + unsigned long service_timer_period; + unsigned long service_timer_previous; struct timer_list service_timer; struct work_struct service_task; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 72f5d25a222f..77f6254a89ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -51,7 +51,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -617,7 +617,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_busy = false; + hw->aq.nvm_release_on_done = false; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; ret_code = i40e_aq_set_hmc_resource_profile(hw, I40E_HMC_PROFILE_DEFAULT, @@ -754,12 +755,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, goto asq_send_command_exit; } - if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) { - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n"); - status = I40E_ERR_NVM; - goto asq_send_command_exit; - } - details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); if (cmd_details) { *details = *cmd_details; @@ -853,7 +848,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { /* AQ designers suggest use of head for better @@ -861,9 +855,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - /* ugh! delay while spin_lock */ - udelay(delay_len); - total_delay += delay_len; + usleep_range(1000, 2000); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -903,9 +896,6 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; } - if (!status && i40e_is_nvm_update_op(desc)) - hw->aq.nvm_busy = true; - asq_send_command_error: mutex_unlock(&hw->aq.asq_mutex); asq_send_command_exit: @@ -958,9 +948,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -982,10 +969,10 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size); + e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, @@ -1021,7 +1008,6 @@ clean_arq_element_out: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { - hw->aq.nvm_busy = false; if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); hw->aq.nvm_release_on_done = false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ba38a89c79d6..564d0b0192f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -28,6 +28,7 @@ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" +#include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -76,7 +77,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -93,7 +95,6 @@ struct i40e_adminq_info { u16 fw_min_ver; /* firmware minor version */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_busy; bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ @@ -108,7 +109,7 @@ struct i40e_adminq_info { * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_rc: AdminQ error code to convert **/ -static inline int i40e_aq_rc_to_posix(u16 aq_rc) +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 15f289f2917f..8835aeeff23e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -33,8 +33,8 @@ * This file needs to comply with the Linux Kernel coding style. */ -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 struct i40e_aq_desc { __le16 flags; @@ -66,216 +66,217 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, + i40e_aqc_opc_clear_pxe_mode = 0x0110, /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07, /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, + i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, }; /* command structures and indirect data structures */ @@ -302,7 +303,7 @@ enum i40e_admin_queue_opc { /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ @@ -320,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); @@ -351,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); @@ -373,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -384,123 +385,123 @@ struct i40e_aqc_list_capabilites { I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; }; /* list of caps */ -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; }; /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; }; /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; }; /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); @@ -516,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); @@ -544,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); * command */ struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); @@ -556,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; }; struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; }; /* Get Switch Configuration (indirect 0x0200) @@ -591,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; }; /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; }; /* Add VSI (indirect 0x0210) @@ -671,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); @@ -706,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; + u8 up_enable_bits; + u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; + __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); @@ -830,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); @@ -859,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); */ struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; + u8 reserved[6]; + __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); @@ -909,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); @@ -928,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { @@ -978,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); */ struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) @@ -998,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; + __le16 vlan_tag; + u8 vlan_flags; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; + __le16 promiscuous_flags; + __le16 valid_flags; /* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1059,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; + __le16 tag; + __le16 queue_number; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); @@ -1084,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; + __le16 tag; + u8 reserved[12]; }; /* Add multicast E-Tag (direct 0x0257) @@ -1097,22 +1097,22 @@ struct i40e_aqc_remove_tag { * and no external data */ struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; }; @@ -1120,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); @@ -1145,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; + __le16 queue; + u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); @@ -1179,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; union { struct { u8 reserved[12]; @@ -1205,49 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 data[16]; } v6; } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { @@ -1269,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ @@ -1286,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); @@ -1302,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); @@ -1321,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); @@ -1336,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); @@ -1350,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; }; /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; }; /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); @@ -1411,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; }; /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; }; /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; }; /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; + __le16 tc_bw_max[2]; + u8 reserved3[32]; }; /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Suspend/resume port TX traffic @@ -1490,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ }; /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, }; -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, @@ -1578,147 +1578,147 @@ struct i40e_aqc_module_desc { }; struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; + __le32 phy_type; + u8 link_speed; + u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); @@ -1728,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { - u8 command_flags; + u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ @@ -1757,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; /* NVM Read command (indirect 0x0701) @@ -1773,40 +1773,40 @@ enum i40e_aq_phy_reg_type { * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; + __le16 cmd_flags; #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define ANVM_READ_SINGLE_FEATURE 0 #define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); @@ -1831,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); @@ -1870,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; + __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { - __le32 mode; + __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); @@ -1896,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); @@ -1931,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); @@ -1945,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); @@ -1981,57 +1981,97 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data +/* Get CEE DCBX Oper Config (0x0A07) + * uses the generic descriptor struct + * returns below as indirect response */ +#define I40E_AQC_CEE_APP_FCOE_SHIFT 0x0 +#define I40E_AQC_CEE_APP_FCOE_MASK (0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT) +#define I40E_AQC_CEE_APP_ISCSI_SHIFT 0x3 +#define I40E_AQC_CEE_APP_ISCSI_MASK (0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT) +#define I40E_AQC_CEE_APP_FIP_SHIFT 0x8 +#define I40E_AQC_CEE_APP_FIP_MASK (0x7 << I40E_AQC_CEE_APP_FIP_SHIFT) +#define I40E_AQC_CEE_PG_STATUS_SHIFT 0x0 +#define I40E_AQC_CEE_PG_STATUS_MASK (0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT) +#define I40E_AQC_CEE_PFC_STATUS_SHIFT 0x3 +#define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) +#define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 +#define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) +struct i40e_aqc_get_cee_dcb_cfg_v1_resp { + u8 reserved1; + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 reserved2; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + u8 reserved3; + __le16 oper_app_prio; + u8 reserved4; + __le16 tlv_status; +}; + +I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp); + +struct i40e_aqc_get_cee_dcb_cfg_resp { + u8 oper_num_tc; + u8 oper_prio_tc[4]; + u8 oper_tc_bw[8]; + u8 oper_pfc_en; + __le16 oper_app_prio; + __le32 tlv_status; + u8 reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); + /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); @@ -2044,11 +2084,11 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 @@ -2061,21 +2101,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); @@ -2087,18 +2127,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); @@ -2151,21 +2191,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 30056b25d94e..3d741ee99a2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -50,6 +50,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -549,7 +550,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { i40e_status i40e_init_shared_code(struct i40e_hw *hw) { i40e_status status = 0; - u32 reg; + u32 port, ari, func_rid; i40e_set_mac_type(hw); @@ -562,18 +563,17 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) hw->phy.get_link_info = true; - /* Determine port number */ - reg = rd32(hw, I40E_PFGEN_PORTNUM); - reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >> - I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT); - hw->port = (u8)reg; - - /* Determine the PF number based on the PCI fn */ - reg = rd32(hw, I40E_GLPCI_CAPSUP); - if (reg & I40E_GLPCI_CAPSUP_ARI_EN_MASK) - hw->pf_id = (u8)((hw->bus.device << 3) | hw->bus.func); + /* Determine port number and PF number*/ + port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) + >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; + hw->port = (u8)port; + ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> + I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; + func_rid = rd32(hw, I40E_PF_FUNC_RID); + if (ari) + hw->pf_id = (u8)(func_rid & 0xff); else - hw->pf_id = (u8)hw->bus.func; + hw->pf_id = (u8)(func_rid & 0x7); status = i40e_init_nvm(hw); return status; @@ -790,7 +790,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) } #define I40E_PF_RESET_WAIT_COUNT_A0 200 -#define I40E_PF_RESET_WAIT_COUNT 100 +#define I40E_PF_RESET_WAIT_COUNT 110 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -1420,6 +1420,33 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) } /** + * i40e_aq_set_phy_int_mask + * @hw: pointer to the hw struct + * @mask: interrupt mask to be set + * @cmd_details: pointer to command details structure or NULL + * + * Set link interrupt mask. + **/ +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, + u16 mask, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_int_mask *cmd = + (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_int_mask); + + cmd->event_mask = cpu_to_le16(mask); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2632,6 +2659,34 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, } /** + * i40e_aq_get_cee_dcb_config + * @hw: pointer to the hw struct + * @buff: response buffer that stores CEE operational configuration + * @buff_size: size of the buffer passed + * @cmd_details: pointer to command details structure or NULL + * + * Get CEE DCBX mode operational configuration from firmware + **/ +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + if (buff_size == 0 || !buff) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); + + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, + cmd_details); + + return status; +} + +/** * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add @@ -3189,6 +3244,26 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_aq_resume_port_tx + * @hw: pointer to the hardware structure + * @cmd_details: pointer to command details structure or NULL + * + * Resume port's Tx traffic + **/ +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_set_pci_config_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 036570d76176..3ce43588592d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -59,7 +59,7 @@ i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status) static void i40e_parse_ieee_etscfg_tlv(struct i40e_lldp_org_tlv *tlv, struct i40e_dcbx_config *dcbcfg) { - struct i40e_ieee_ets_config *etscfg; + struct i40e_dcb_ets_config *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; u8 priority; @@ -407,6 +407,166 @@ free_mem: } /** + * i40e_cee_to_dcb_v1_config + * @cee_cfg: pointer to CEE v1 response configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE v1 configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_v1_config( + struct i40e_aqc_get_cee_dcb_cfg_v1_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u16 status, tlv_status = le16_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** + * i40e_cee_to_dcb_config + * @cee_cfg: pointer to CEE configuration struct + * @dcbcfg: DCB configuration struct + * + * Convert CEE configuration from firmware to DCB configuration + **/ +static void i40e_cee_to_dcb_config( + struct i40e_aqc_get_cee_dcb_cfg_resp *cee_cfg, + struct i40e_dcbx_config *dcbcfg) +{ + u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status); + u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio); + u8 i, tc, err, sync, oper; + + /* CEE PG data to ETS config */ + dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; + + for (i = 0; i < 4; i++) { + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_1_MASK) >> + I40E_CEE_PGID_PRIO_1_SHIFT); + dcbcfg->etscfg.prioritytable[i*2] = tc; + tc = (u8)((cee_cfg->oper_prio_tc[i] & + I40E_CEE_PGID_PRIO_0_MASK) >> + I40E_CEE_PGID_PRIO_0_SHIFT); + dcbcfg->etscfg.prioritytable[i*2 + 1] = tc; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (dcbcfg->etscfg.prioritytable[i] == I40E_CEE_PGID_STRICT) { + /* Map it to next empty TC */ + dcbcfg->etscfg.prioritytable[i] = + cee_cfg->oper_num_tc - 1; + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_STRICT; + } else { + dcbcfg->etscfg.tsatable[i] = I40E_IEEE_TSA_ETS; + } + } + + /* CEE PFC data to ETS config */ + dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en; + dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + + status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >> + I40E_AQC_CEE_APP_STATUS_SHIFT; + err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0; + sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; + oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; + /* Add APPs if Error is False and Oper/Sync is True */ + if (!err && sync && oper) { + /* CEE operating configuration supports FCoE/iSCSI/FIP only */ + dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; + + /* FCoE APP */ + dcbcfg->app[0].priority = + (app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >> + I40E_AQC_CEE_APP_FCOE_SHIFT; + dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE; + + /* iSCSI APP */ + dcbcfg->app[1].priority = + (app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >> + I40E_AQC_CEE_APP_ISCSI_SHIFT; + dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP; + dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI; + + /* FIP APP */ + dcbcfg->app[2].priority = + (app_prio & I40E_AQC_CEE_APP_FIP_MASK) >> + I40E_AQC_CEE_APP_FIP_SHIFT; + dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE; + dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP; + } +} + +/** * i40e_get_dcb_config * @hw: pointer to the hw struct * @@ -415,7 +575,44 @@ free_mem: i40e_status i40e_get_dcb_config(struct i40e_hw *hw) { i40e_status ret = 0; + struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; + struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; + + /* If Firmware version < v4.33 IEEE only */ + if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4)) + goto ieee; + + /* If Firmware version == v4.33 use old CEE struct */ + if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, + sizeof(cee_v1_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_v1_config(&cee_v1_cfg, + &hw->local_dcbx_config); + } + } else { + ret = i40e_aq_get_cee_dcb_config(hw, &cee_cfg, + sizeof(cee_cfg), NULL); + if (!ret) { + /* CEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE; + i40e_cee_to_dcb_config(&cee_cfg, + &hw->local_dcbx_config); + } + } + + /* CEE mode not enabled try querying IEEE data */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + goto ieee; + else + goto out; +ieee: + /* IEEE mode */ + hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE; /* Get Local DCB Config */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); @@ -426,6 +623,10 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, &hw->remote_dcbx_config); + /* Don't treat ENOENT as an error for Remote MIBs */ + if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) + ret = 0; + out: return ret; } @@ -439,10 +640,27 @@ out: i40e_status i40e_init_dcb(struct i40e_hw *hw) { i40e_status ret = 0; + struct i40e_lldp_variables lldp_cfg; + u8 adminstatus = 0; if (!hw->func_caps.dcb) return ret; + /* Read LLDP NVM area */ + ret = i40e_read_lldp_cfg(hw, &lldp_cfg); + if (ret) + return ret; + + /* Get the LLDP AdminStatus for the current port */ + adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); + adminstatus &= 0xF; + + /* LLDP agent disabled */ + if (!adminstatus) { + hw->dcbx_status = I40E_DCBX_STATUS_DISABLED; + return ret; + } + /* Get DCBX status */ ret = i40e_get_dcbx_status(hw, &hw->dcbx_status); if (ret) @@ -454,6 +672,8 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) case I40E_DCBX_STATUS_IN_PROGRESS: /* Get current DCBX configuration */ ret = i40e_get_dcb_config(hw); + if (ret) + return ret; break; case I40E_DCBX_STATUS_DISABLED: return ret; @@ -470,3 +690,33 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) return ret; } + +/** + * i40e_read_lldp_cfg - read LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * + * Reads the LLDP configuration data from NVM + **/ +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg) +{ + i40e_status ret = 0; + u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + + if (!lldp_cfg) + return I40E_ERR_PARAM; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, + sizeof(struct i40e_lldp_variables), + (u8 *)lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 34cf1c30c7ff..e137e3fac8ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -65,6 +65,11 @@ #define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT) #define I40E_IEEE_ETS_PRIO_1_SHIFT 4 #define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT) +#define I40E_CEE_PGID_PRIO_0_SHIFT 0 +#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT) +#define I40E_CEE_PGID_PRIO_1_SHIFT 4 +#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT) +#define I40E_CEE_PGID_STRICT 15 /* Defines for IEEE TSA types */ #define I40E_IEEE_TSA_STRICT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 00bc0cdb3a03..183dcb63ce98 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -207,7 +207,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) * VSI **/ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { struct net_device *dev = vsi->netdev; struct dcb_app sapp; @@ -229,7 +229,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, * Delete given APP from all the VSIs for given PF **/ static void i40e_dcbnl_del_app(struct i40e_pf *pf, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { int v, err; for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -252,7 +252,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf, * Find given APP in the DCB configuration **/ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, - struct i40e_ieee_app_priority_table *app) + struct i40e_dcb_app_priority_table *app) { int i; @@ -277,7 +277,7 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, void i40e_dcbnl_flush_apps(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) { - struct i40e_ieee_app_priority_table app; + struct i40e_dcb_app_priority_table app; struct i40e_dcbx_config *dcbxcfg; struct i40e_hw *hw = &pf->hw; int i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 7067f4b9159c..433a55886ad2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -773,7 +773,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, { struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; - struct i40e_ring ring; + struct i40e_ring *ring; struct i40e_vsi *vsi; int i; @@ -792,29 +792,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, vsi_seid); return; } - if (is_rx_ring) - ring = *vsi->rx_rings[ring_id]; - else - ring = *vsi->tx_rings[ring_id]; + + ring = kmemdup(is_rx_ring + ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], + sizeof(*ring), GFP_KERNEL); + if (!ring) + return; + if (cnt == 2) { dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); - for (i = 0; i < ring.count; i++) { + for (i = 0; i < ring->count; i++) { if (!is_rx_ring) { - txd = I40E_TX_DESC(&ring, i); + txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(&ring, i); + rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { - rxd = I40E_RX_DESC(&ring, i); + rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, @@ -823,26 +826,26 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } } } else if (cnt == 3) { - if (desc_n >= ring.count || desc_n < 0) { + if (desc_n >= ring->count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); return; } if (!is_rx_ring) { - txd = I40E_TX_DESC(&ring, desc_n); + txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(&ring, desc_n); + rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { - rxd = I40E_RX_DESC(&ring, desc_n); + rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, @@ -852,6 +855,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } else { dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); } + kfree(ring); } /** @@ -895,90 +899,6 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, } /** - * i40e_dbg_dump_stats - handles dump stats write into command datum - * @pf: the i40e_pf created in command write - * @stats: the stats structure to be dumped - **/ -static void i40e_dbg_dump_stats(struct i40e_pf *pf, - struct i40e_hw_port_stats *stats) -{ - int i; - - dev_info(&pf->pdev->dev, " stats:\n"); - dev_info(&pf->pdev->dev, - " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n", - stats->crc_errors, stats->illegal_bytes, stats->error_bytes); - dev_info(&pf->pdev->dev, - " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n", - stats->mac_local_faults, stats->mac_remote_faults, - stats->rx_length_errors); - dev_info(&pf->pdev->dev, - " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n", - stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx); - dev_info(&pf->pdev->dev, - " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n", - stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127); - dev_info(&pf->pdev->dev, - " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n", - stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023); - dev_info(&pf->pdev->dev, - " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n", - stats->rx_size_big, stats->rx_undersize, stats->rx_jabber); - dev_info(&pf->pdev->dev, - " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n", - stats->rx_fragments, stats->rx_oversize, stats->tx_size_64); - dev_info(&pf->pdev->dev, - " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n", - stats->tx_size_127, stats->tx_size_255, stats->tx_size_511); - dev_info(&pf->pdev->dev, - " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n", - stats->tx_size_1023, stats->tx_size_big, - stats->mac_short_packet_dropped); - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_rx[i], - i+1, stats->priority_xon_rx[i+1], - i+2, stats->priority_xon_rx[i+2], - i+3, stats->priority_xon_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xoff_rx[i], - i+1, stats->priority_xoff_rx[i+1], - i+2, stats->priority_xoff_rx[i+2], - i+3, stats->priority_xoff_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_tx[i], - i+1, stats->priority_xon_tx[i+1], - i+2, stats->priority_xon_tx[i+2], - i+3, stats->priority_xon_rx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xoff_tx[i], - i+1, stats->priority_xoff_tx[i+1], - i+2, stats->priority_xoff_tx[i+2], - i+3, stats->priority_xoff_tx[i+3]); - } - for (i = 0; i < 8; i += 4) { - dev_info(&pf->pdev->dev, - " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", - i, stats->priority_xon_2_xoff[i], - i+1, stats->priority_xon_2_xoff[i+1], - i+2, stats->priority_xon_2_xoff[i+2], - i+3, stats->priority_xon_2_xoff[i+3]); - } - - i40e_dbg_dump_eth_stats(pf, &stats->eth); -} - -/** * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb * @pf: the i40e_pf created in command write * @seid: the seid the user put in @@ -1342,11 +1262,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc aq\n"); } - } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) { - dev_info(&pf->pdev->dev, "pf stats:\n"); - i40e_dbg_dump_stats(pf, &pf->stats); - dev_info(&pf->pdev->dev, "pf stats_offsets:\n"); - i40e_dbg_dump_stats(pf, &pf->stats_offsets); } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { dev_info(&pf->pdev->dev, "core reset count: %d\n", pf->corer_count); @@ -1402,6 +1317,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, bw_data = NULL; dev_info(&pf->pdev->dev, + "port dcbx_mode=%d\n", cfg->dcbx_mode); + dev_info(&pf->pdev->dev, "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", cfg->etscfg.willing, cfg->etscfg.cbs, cfg->etscfg.maxtcs); @@ -1464,8 +1381,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); - dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n"); - dev_info(&pf->pdev->dev, "dump stats\n"); + dev_info(&pf->pdev->dev, "dump switch\n"); + dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); dev_info(&pf->pdev->dev, @@ -1580,7 +1497,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[11], - "%hx %hx %hx %hx %x %x %x %x %x %x", + "%hi %hi %hi %hi %i %i %i %i %i %i", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, @@ -1628,7 +1545,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[20], - "%hx %hx %hx %hx %x %x %x %x %x %x %hd", + "%hi %hi %hi %hi %i %i %i %i %i %i %hi", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 1dda467ae1ac..951e8767fc50 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -40,8 +40,9 @@ struct i40e_stats { .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } + #define I40E_NETDEV_STAT(_net_stat) \ - I40E_STAT(struct net_device_stats, #_net_stat, _net_stat) + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ @@ -264,6 +265,14 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->supported = SUPPORTED_10000baseKR_Full; ecmd->advertising = ADVERTISED_10000baseKR_Full; break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; + break; default: /* all the rest are 10G/1G */ ecmd->supported = SUPPORTED_10000baseT_Full | @@ -322,9 +331,13 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: @@ -335,14 +348,22 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_1000BASE_T: ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_SGMII: @@ -426,6 +447,9 @@ no_valid_phy_type: case I40E_LINK_SPEED_1GB: ethtool_cmd_speed_set(ecmd, SPEED_1000); break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; default: break; } @@ -528,7 +552,7 @@ static int i40e_set_settings(struct net_device *netdev, } /* If autoneg is currently enabled */ if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { - config.abilities = abilities.abilities | + config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; change = true; } @@ -621,11 +645,19 @@ static void i40e_get_pauseparam(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; pause->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); + /* PFC enabled so report LFC as off */ + if (dcbx_cfg->pfc.pfcenable) { + pause->rx_pause = 0; + pause->tx_pause = 0; + return; + } + if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { pause->rx_pause = 1; } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { @@ -649,6 +681,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; i40e_status status; u8 aq_failures; @@ -670,8 +703,9 @@ static int i40e_set_pauseparam(struct net_device *netdev, netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } - if (hw->fc.current_mode == I40E_FC_PFC) { - netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n"); + if (dcbx_cfg->pfc.pfcenable) { + netdev_info(netdev, + "Priority flow control enabled. Cannot set link flow control.\n"); return -EOPNOTSUPP; } @@ -788,7 +822,7 @@ static int i40e_get_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; - int ret_val = 0, len; + int ret_val = 0, len, offset; u8 *eeprom_buff; u16 i, sectors; bool last; @@ -801,19 +835,21 @@ static int i40e_get_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { + struct i40e_nvm_access *cmd; int errno; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) return -EINVAL; - ret_val = i40e_nvmupd_command(hw, - (struct i40e_nvm_access *)eeprom, - bytes, &errno); + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); if (ret_val) dev_info(&pf->pdev->dev, - "NVMUpdate read failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); + "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); return errno; } @@ -842,20 +878,29 @@ static int i40e_get_eeprom(struct net_device *netdev, len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); last = true; } - ret_val = i40e_aq_read_nvm(hw, 0x0, - eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), - len, + offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), + ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), last, NULL); - if (ret_val) { + if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { + dev_info(&pf->pdev->dev, + "read NVM failed, invalid offset 0x%x\n", + offset); + break; + } else if (ret_val && + hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { dev_info(&pf->pdev->dev, - "read NVM failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); - goto release_nvm; + "read NVM failed, access, offset 0x%x\n", + offset); + break; + } else if (ret_val) { + dev_info(&pf->pdev->dev, + "read NVM failed offset %d err=%d status=0x%x\n", + offset, ret_val, hw->aq.asq_last_status); + break; } } -release_nvm: i40e_release_nvm(hw); memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); free_buff: @@ -883,6 +928,7 @@ static int i40e_set_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; + struct i40e_nvm_access *cmd; int ret_val = 0; int errno; u32 magic; @@ -900,12 +946,14 @@ static int i40e_set_eeprom(struct net_device *netdev, test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) return -EBUSY; - ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom, - bytes, &errno); - if (ret_val) + cmd = (struct i40e_nvm_access *)eeprom; + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); + if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) dev_info(&pf->pdev->dev, - "NVMUpdate write failed err=%d status=0x%x\n", - ret_val, hw->aq.asq_last_status); + "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", + ret_val, hw->aq.asq_last_status, errno, + (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), + cmd->offset, cmd->data_size); return errno; } @@ -1292,6 +1340,10 @@ static int i40e_get_ts_info(struct net_device *dev, { struct i40e_pf *pf = i40e_netdev_to_pf(dev); + /* only report HW timestamping if PTP is enabled */ + if (!(pf->flags & I40E_FLAG_PTP)) + return ethtool_op_get_ts_info(dev, info); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | @@ -1355,6 +1407,9 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "eeprom test\n"); *data = i40e_diag_eeprom_test(&pf->hw); + /* forcebly clear the NVM Update state machine */ + pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; + return *data; } @@ -1367,7 +1422,10 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data) netif_info(pf, hw, netdev, "interrupt test\n"); wr32(&pf->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); usleep_range(1000, 2000); *data = (swc_old == pf->sw_int_count); @@ -1551,13 +1609,10 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->rx_itr_setting = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { vsi->rx_itr_setting = ec->rx_coalesce_usecs; - i40e_irq_dynamic_disable(vsi, vector); if (ec->use_adaptive_rx_coalesce) - netif_info(pf, drv, netdev, - "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n"); + netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); } else { - netif_info(pf, drv, netdev, - "Invalid value, Rx-usecs range is 0, 8-8160\n"); + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); return -EINVAL; } @@ -1566,13 +1621,11 @@ static int i40e_set_coalesce(struct net_device *netdev, vsi->tx_itr_setting = ec->tx_coalesce_usecs; } else if (ec->tx_coalesce_usecs == 0) { vsi->tx_itr_setting = ec->tx_coalesce_usecs; - i40e_irq_dynamic_disable(vsi, vector); if (ec->use_adaptive_tx_coalesce) - netif_info(pf, drv, netdev, - "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n"); + netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); } else { netif_info(pf, drv, netdev, - "Invalid value, Tx-usecs range is 0, 8-8160\n"); + "Invalid value, tx-usecs range is 0-8160\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 5d01db1d789b..a8b8bd95108d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -343,7 +343,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) **/ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) { - struct i40e_ieee_app_priority_table app; + struct i40e_dcb_app_priority_table app; struct i40e_hw *hw = &pf->hw; u8 enabled_tc = 0; u8 tc, i; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c3a7f4a4b775..0a7ea4c5f9d3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -38,8 +38,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 0 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_MINOR 2 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -74,6 +74,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, /* required last entry */ {0, } }; @@ -812,7 +813,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; + struct i40e_ring *p; u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@ -836,10 +840,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_buf = 0; rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { - struct i40e_ring *p; - u64 bytes, packets; - unsigned int start; - /* locate Tx ring */ p = ACCESS_ONCE(vsi->tx_rings[q]); @@ -2382,6 +2382,35 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) } /** + * i40e_config_xps_tx_ring - Configure XPS for a Tx ring + * @ring: The Tx ring to configure + * + * This enables/disables XPS for a given Tx descriptor ring + * based on the TCs enabled for the VSI that ring belongs to. + **/ +static void i40e_config_xps_tx_ring(struct i40e_ring *ring) +{ + struct i40e_vsi *vsi = ring->vsi; + cpumask_var_t mask; + + if (ring->q_vector && ring->netdev) { + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1 && + !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + netif_set_xps_queue(ring->netdev, + &ring->q_vector->affinity_mask, + ring->queue_index); + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, + ring->queue_index); + free_cpumask_var(mask); + } + } +} + +/** * i40e_configure_tx_ring - Configure a transmit ring context and rest * @ring: The Tx ring to configure * @@ -2404,13 +2433,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ring->atr_sample_rate = 0; } - /* initialize XPS */ - if (ring->q_vector && ring->netdev && - vsi->tc_config.numtc <= 1 && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) - netif_set_xps_queue(ring->netdev, - &ring->q_vector->affinity_mask, - ring->queue_index); + /* configure XPS */ + i40e_config_xps_tx_ring(ring); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(tx_ctx)); @@ -2462,10 +2486,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /* Now associate this queue with this PCI function */ - if (vsi->type == I40E_VSI_VMDQ2) + if (vsi->type == I40E_VSI_VMDQ2) { qtx_ctl = I40E_QTX_CTL_VM_QUEUE; - else + qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; + } else { qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + } + qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); @@ -3440,7 +3468,7 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; - udelay(10); + usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; @@ -3466,7 +3494,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) /* warn the TX unit of coming changes */ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); if (!enable) - udelay(10); + usleep_range(10, 20); for (j = 0; j < 50; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); @@ -3488,6 +3516,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) } wr32(hw, I40E_QTX_ENA(pf_q), tx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; /* wait for the change to finish */ ret = i40e_pf_txq_wait(pf, pf_q, enable); @@ -3526,7 +3557,7 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; - udelay(10); + usleep_range(10, 20); } if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT) return -ETIMEDOUT; @@ -3855,6 +3886,15 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi) if (test_bit(__I40E_DOWN, &vsi->state)) return; + /* No need to disable FCoE VSI when Tx suspended */ + if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) && + vsi->type == I40E_VSI_FCOE) { + dev_dbg(&vsi->back->pdev->dev, + "%s: VSI seid %d skipping FCoE VSI disable\n", + __func__, vsi->seid); + return; + } + set_bit(__I40E_NEEDS_RESTART, &vsi->state); if (vsi->netdev && netif_running(vsi->netdev)) { vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); @@ -3907,6 +3947,57 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) } } +#ifdef CONFIG_I40E_DCB +/** + * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * @vsi: the VSI being configured + * + * This function waits for the given VSI's Tx queues to be disabled. + **/ +static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int i, pf_q, ret; + + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_txq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "%s: VSI seid %d Tx ring %d disable timeout\n", + __func__, vsi->seid, pf_q); + return ret; + } + } + + return 0; +} + +/** + * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * @pf: the PF + * + * This function waits for the Tx queues to be in disabled state for all the + * VSIs that are managed by this PF. + **/ +static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +{ + int v, ret = 0; + + for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { + /* No need to wait for FCoE VSI queues */ + if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { + ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + if (ret) + break; + } + } + + return ret; +} + +#endif /** * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config * @dcbcfg: the corresponding DCBx configuration structure @@ -4378,6 +4469,31 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) } /** + * i40e_resume_port_tx - Resume port Tx + * @pf: PF struct + * + * Resume a port's Tx and issue a PF reset in case of failure to + * resume. + **/ +static int i40e_resume_port_tx(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_aq_resume_port_tx(hw, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "AQ command Resume Port Tx failed = %d\n", + pf->hw.aq.asq_last_status); + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } + + return ret; +} + +/** * i40e_init_pf_dcb - Initialize DCB configuration * @pf: PF being configured * @@ -4413,6 +4529,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) /* Enable DCB tagging only when more than one TC */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; + dev_dbg(&pf->pdev->dev, + "DCBX offload is supported for this PF.\n"); } } else { dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", @@ -4449,6 +4567,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) case I40E_LINK_SPEED_1GB: strlcpy(speed, "1000 Mbps", SPEED_SIZE); break; + case I40E_LINK_SPEED_100MB: + strncpy(speed, "100 Mbps", SPEED_SIZE); + break; default: break; } @@ -4479,12 +4600,8 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) static int i40e_up_complete(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - u8 set_fc_aq_fail = 0; int err; - /* force flow control off */ - i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_configure_msix(vsi); else @@ -4753,9 +4870,11 @@ int i40e_vsi_open(struct i40e_vsi *vsi) goto err_set_queues; } else if (vsi->type == I40E_VSI_FDIR) { - snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", - dev_driver_string(&pf->pdev->dev)); + snprintf(int_name, sizeof(int_name) - 1, "%s-%s-fdir", + dev_driver_string(&pf->pdev->dev), + dev_name(&pf->pdev->dev)); err = i40e_vsi_request_irq(vsi, int_name); + } else { err = -EINVAL; goto err_setup_rx; @@ -4995,6 +5114,8 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf, dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); } + dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, + need_reconfig); return need_reconfig; } @@ -5022,11 +5143,16 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Ignore if event is not for Nearest Bridge */ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & I40E_AQ_LLDP_BRIDGE_TYPE_MASK); + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib bridge type 0x%x\n", __func__, type); if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE) return ret; /* Check MIB Type and return if event for Remote MIB update */ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; + dev_dbg(&pf->pdev->dev, + "%s: LLDP event mib type %s\n", __func__, + type ? "remote" : "local"); if (type == I40E_AQ_LLDP_MIB_REMOTE) { /* Update the remote cached instance and return */ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE, @@ -5035,12 +5161,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - /* Convert/store the DCBX data from LLDPDU temporarily */ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); - ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg); + /* Store the old configuration */ + tmp_dcbx_cfg = *dcbx_cfg; + + /* Get updated DCBX data from firmware */ + ret = i40e_get_dcb_config(&pf->hw); if (ret) { - /* Error in LLDPDU parsing return */ - dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n"); + dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); goto exit; } @@ -5050,12 +5178,9 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); - i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg); - - /* Overwrite the new configuration */ - *dcbx_cfg = tmp_dcbx_cfg; + i40e_dcbnl_flush_apps(pf, dcbx_cfg); if (!need_reconfig) goto exit; @@ -5066,13 +5191,24 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, else pf->flags &= ~I40E_FLAG_DCB_ENABLED; + set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); /* Reconfiguration needed quiesce all VSIs */ i40e_pf_quiesce_all_vsi(pf); /* Changes in configuration update VEB/VSI */ i40e_dcb_reconfigure(pf); - i40e_pf_unquiesce_all_vsi(pf); + ret = i40e_resume_port_tx(pf); + + clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); + /* In case of error no point in resuming VSIs */ + if (ret) + goto exit; + + /* Wait for the PF's Tx queues to be disabled */ + ret = i40e_pf_wait_txq_disabled(pf); + if (!ret) + i40e_pf_unquiesce_all_vsi(pf); exit: return ret; } @@ -5212,6 +5348,9 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) int flush_wait_retry = 50; int reg; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + if (time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); @@ -5273,6 +5412,9 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (test_bit(__I40E_DOWN, &pf->state)) return; + if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) + return; + if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) @@ -5310,8 +5452,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) break; case I40E_VSI_SRIOV: - break; - case I40E_VSI_VMDQ2: case I40E_VSI_CTRL: case I40E_VSI_MIRROR: @@ -5353,14 +5493,21 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) static void i40e_link_event(struct i40e_pf *pf) { bool new_link, old_link; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + + /* set this to force the get_link_status call to refresh state */ + pf->hw.phy.get_link_info = true; - new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP); old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); + new_link = i40e_get_link_status(&pf->hw); - if (new_link == old_link) + if (new_link == old_link && + (test_bit(__I40E_DOWN, &vsi->state) || + new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) - i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link); + + if (!test_bit(__I40E_DOWN, &vsi->state)) + i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. @@ -5368,7 +5515,7 @@ static void i40e_link_event(struct i40e_pf *pf) if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); else - i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link); + i40e_vsi_link_event(vsi, new_link); if (pf->vf) i40e_vc_notify_link_state(pf); @@ -5418,11 +5565,17 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, (I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK)); + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); } else { u16 vec = vsi->base_vector - 1; u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK); for (i = 0; i < vsi->num_q_vectors; i++, vec++) wr32(&vsi->back->hw, I40E_PFINT_DYN_CTLN(vec), val); @@ -5433,7 +5586,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) } /** - * i40e_watchdog_subtask - Check and bring link up + * i40e_watchdog_subtask - periodic checks not using event driven response * @pf: board private structure **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) @@ -5445,6 +5598,15 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; + /* make sure we don't do these things too often */ + if (time_before(jiffies, (pf->service_timer_previous + + pf->service_timer_period))) + return; + pf->service_timer_previous = jiffies; + + i40e_check_hang_subtask(pf); + i40e_link_event(pf); + /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ @@ -5525,33 +5687,20 @@ static void i40e_handle_link_event(struct i40e_pf *pf, memcpy(&pf->hw.phy.link_info_old, hw_link_info, sizeof(pf->hw.phy.link_info_old)); + /* Do a new status request to re-enable LSE reporting + * and load new status information into the hw struct + * This completely ignores any state information + * in the ARQ event info, instead choosing to always + * issue the AQ update link status command. + */ + i40e_link_event(pf); + /* check for unqualified module, if link is down */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) dev_err(&pf->pdev->dev, "The driver failed to link because an unqualified module was detected.\n"); - - /* update link status */ - hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; - hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; - hw_link_info->link_info = status->link_info; - hw_link_info->an_info = status->an_info; - hw_link_info->ext_info = status->ext_info; - hw_link_info->lse_enable = - le16_to_cpu(status->command_flags) & - I40E_AQ_LSE_ENABLE; - - /* process the event */ - i40e_link_event(pf); - - /* Do a new status request to re-enable LSE reporting - * and load new status information into the hw struct, - * then see if the status changed while processing the - * initial event. - */ - i40e_update_link_info(&pf->hw, true); - i40e_link_event(pf); } /** @@ -5607,13 +5756,12 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) if (oldval != val) wr32(&pf->hw, pf->hw.aq.asq.len, val); - event.msg_size = I40E_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40E_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; do { - event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */ ret = i40e_clean_arq_element(hw, &event, &pending); if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) break; @@ -5634,7 +5782,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) le32_to_cpu(event.desc.cookie_high), le32_to_cpu(event.desc.cookie_low), event.msg_buf, - event.msg_size); + event.msg_len); break; case i40e_aqc_opc_lldp_update_mib: dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); @@ -5740,6 +5888,9 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; + /* Enable LB mode for the main VSI now that it is on a VEB */ + i40e_enable_pf_switch_lb(pf); + /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) @@ -5967,6 +6118,7 @@ static void i40e_send_version(struct i40e_pf *pf) static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) { struct i40e_hw *hw = &pf->hw; + u8 set_fc_aq_fail = 0; i40e_status ret; u32 v; @@ -6038,6 +6190,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) goto end_core_reset; + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + ret = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (ret) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); + + /* make sure our flow control settings are restored */ + ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); + if (ret) + dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); + /* Rebuild the VSIs and VEBs that existed before reset. * They are still in our local switch element arrays, so only * need to rebuild the switch model in the HW. @@ -6092,6 +6258,13 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) { + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6149,12 +6322,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; - u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> + u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> I40E_GL_MDET_TX_VF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; - u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> - I40E_GL_MDET_TX_QUEUE_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >> + I40E_GL_MDET_TX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", event, queue, pf_num, vf_num); @@ -6167,8 +6341,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; - u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> - I40E_GL_MDET_RX_QUEUE_SHIFT; + u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >> + I40E_GL_MDET_RX_QUEUE_SHIFT) - + pf->hw.func_caps.base_queue; if (netif_msg_rx_err(pf)) dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", event, queue, func); @@ -6298,7 +6473,6 @@ static void i40e_service_task(struct work_struct *work) i40e_vc_process_vflr_event(pf); i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); - i40e_check_hang_subtask(pf); i40e_sync_filters_subtask(pf); #ifdef CONFIG_I40E_VXLAN i40e_sync_vxlan_filters_subtask(pf); @@ -6676,6 +6850,7 @@ static int i40e_init_msix(struct i40e_pf *pf) { i40e_status err = 0; struct i40e_hw *hw = &pf->hw; + int other_vecs = 0; int v_budget, i; int vec; @@ -6701,10 +6876,10 @@ static int i40e_init_msix(struct i40e_pf *pf) */ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); pf->num_vmdq_msix = pf->num_vmdq_qps; - v_budget = 1 + pf->num_lan_msix; - v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix); + other_vecs = 1; + other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); if (pf->flags & I40E_FLAG_FD_SB_ENABLED) - v_budget++; + other_vecs++; #ifdef I40E_FCOE if (pf->flags & I40E_FLAG_FCOE_ENABLED) { @@ -6714,7 +6889,9 @@ static int i40e_init_msix(struct i40e_pf *pf) #endif /* Scale down if necessary, and the rings will share vectors */ - v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors); + pf->num_lan_msix = min_t(int, pf->num_lan_msix, + (hw->func_caps.num_msix_vectors - other_vecs)); + v_budget = pf->num_lan_msix + other_vecs; pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -6964,20 +7141,16 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) **/ static int i40e_config_rss(struct i40e_pf *pf) { - /* Set of random keys generated using kernel random number generator */ - static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687, - 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, - 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, - 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be}; + u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; u64 hena; u32 reg_val; - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed[i]); + wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | @@ -7341,7 +7514,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev, #endif static int i40e_get_phys_port_id(struct net_device *netdev, - struct netdev_phys_port_id *ppid) + struct netdev_phys_item_id *ppid) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -7356,18 +7529,18 @@ static int i40e_get_phys_port_id(struct net_device *netdev, return 0; } -#ifdef HAVE_FDB_OPS -#ifdef USE_CONST_DEV_UC_CHAR +/** + * i40e_ndo_fdb_add - add an entry to the hardware database + * @ndm: the input from the stack + * @tb: pointer to array of nladdr (unused) + * @dev: the net device pointer + * @addr: the MAC address entry being added + * @flags: instructions from stack about fdb operation + */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, - u16 flags) -#else -static int i40e_ndo_fdb_add(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) -#endif { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; @@ -7398,55 +7571,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, return err; } -#ifndef USE_DEFAULT_FDB_DEL_DUMP -#ifdef USE_CONST_DEV_UC_CHAR -static int i40e_ndo_fdb_del(struct ndmsg *ndm, - struct net_device *dev, - const unsigned char *addr) -#else -static int i40e_ndo_fdb_del(struct ndmsg *ndm, - struct net_device *dev, - unsigned char *addr) -#endif -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_pf *pf = np->vsi->back; - int err = -EOPNOTSUPP; - - if (ndm->ndm_state & NUD_PERMANENT) { - netdev_info(dev, "FDB only supports static addresses\n"); - return -EINVAL; - } - - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { - if (is_unicast_ether_addr(addr)) - err = dev_uc_del(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_del(dev, addr); - else - err = -EINVAL; - } - - return err; -} - -static int i40e_ndo_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, - int idx) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - struct i40e_pf *pf = np->vsi->back; - - if (pf->flags & I40E_FLAG_SRIOV_ENABLED) - idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx); - - return idx; -} - -#endif /* USE_DEFAULT_FDB_DEL_DUMP */ -#endif /* HAVE_FDB_OPS */ static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -7480,13 +7604,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, -#ifdef HAVE_FDB_OPS .ndo_fdb_add = i40e_ndo_fdb_add, -#ifndef USE_DEFAULT_FDB_DEL_DUMP - .ndo_fdb_del = i40e_ndo_fdb_del, - .ndo_fdb_dump = i40e_ndo_fdb_dump, -#endif -#endif }; /** @@ -7682,6 +7800,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = 0x1; /* regular data port */ ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7931,8 +8053,8 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) vsi->num_q_vectors, vsi->idx); if (vsi->base_vector < 0) { dev_info(&pf->pdev->dev, - "failed to get queue tracking for VSI %d, err=%d\n", - vsi->seid, vsi->base_vector); + "failed to get tracking for %d vectors for VSI %d, err=%d\n", + vsi->num_q_vectors, vsi->seid, vsi->base_vector); i40e_vsi_free_q_vectors(vsi); ret = -ENOENT; goto vector_setup_out; @@ -7968,8 +8090,9 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { - dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", - vsi->seid, ret); + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -8066,7 +8189,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); - + if (veb) { + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + dev_info(&vsi->back->pdev->dev, + "%s: New VSI creation error, uplink seid of LAN VSI expected.\n", + __func__); + return NULL; + } + i40e_enable_pf_switch_lb(pf); + } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) veb = pf->veb[i]; @@ -8098,8 +8229,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); if (ret < 0) { - dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n", - vsi->seid, ret); + dev_info(&pf->pdev->dev, + "failed to get tracking for %d queues for VSI %d err=%d\n", + vsi->alloc_queue_pairs, vsi->seid, ret); goto err_vsi; } vsi->base_queue = ret; @@ -8206,6 +8338,7 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb) veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); veb->bw_max_quanta = ets_data.tc_bw_max; veb->is_abs_credits = bw_data.absolute_credits_enable; + veb->enabled_tc = ets_data.tc_valid_bits; tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) | (le16_to_cpu(bw_data.tc_bw_max[1]) << 16); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -8719,6 +8852,14 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? true : false); + /* fill in link information and enable LSE reporting */ + i40e_update_link_info(&pf->hw, true); + i40e_link_event(pf); + + /* Initialize user-specific link properties */ + pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & + I40E_AQ_AN_COMPLETED) ? true : false); + i40e_ptp_init(pf); return ret; @@ -8987,6 +9128,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); pf->instance = pfs_found; + if (debug != -1) { + pf->msg_enable = pf->hw.debug_mask; + pf->msg_enable = debug; + } + /* do a special CORER for clearing PXE mode once at init */ if (hw->revision_id == 0 && (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) { @@ -9012,9 +9158,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; + snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1, - "%s-pf%d:misc", - dev_driver_string(&pf->pdev->dev), pf->hw.pf_id); + "%s-%s:misc", + dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); err = i40e_init_shared_code(hw); if (err) { @@ -9158,6 +9305,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } + /* driver is only interested in link up/down and module qualification + * reports from firmware + */ + err = i40e_aq_set_phy_int_mask(&pf->hw, + I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + if (err) + dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); + + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) { + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 25c4f9a3011f..3e70f2e45a47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -61,7 +61,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw) } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; ret_code = I40E_ERR_NVM_BLANK_MODE; - hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n"); } return ret_code; @@ -80,46 +80,45 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, { i40e_status ret_code = 0; u64 gtime, timeout; - u64 time = 0; + u64 time_left = 0; if (hw->nvm.blank_nvm_mode) goto i40e_i40e_acquire_nvm_exit; ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, - 0, &time, NULL); + 0, &time_left, NULL); /* Reading the Global Device Timer */ gtime = rd32(hw, I40E_GLVFGEN_TIMER); /* Store the timeout */ - hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; + hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime; - if (ret_code) { - /* Set the polling timeout */ - if (time > I40E_MAX_NVM_TIMEOUT) - timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) - + gtime; - else - timeout = hw->nvm.hw_semaphore_timeout; + if (ret_code) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n", + access, time_left, ret_code, hw->aq.asq_last_status); + + if (ret_code && time_left) { /* Poll until the current NVM owner timeouts */ - while (gtime < timeout) { + timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime; + while ((gtime < timeout) && time_left) { usleep_range(10000, 20000); + gtime = rd32(hw, I40E_GLVFGEN_TIMER); ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, - access, 0, &time, + access, 0, &time_left, NULL); if (!ret_code) { hw->nvm.hw_semaphore_timeout = - I40E_MS_TO_GTIME(time) + gtime; + I40E_MS_TO_GTIME(time_left) + gtime; break; } - gtime = rd32(hw, I40E_GLVFGEN_TIMER); } if (ret_code) { hw->nvm.hw_semaphore_timeout = 0; - hw->nvm.hw_semaphore_wait = - I40E_MS_TO_GTIME(time) + gtime; - hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n", - time); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n", + time_left, ret_code, hw->aq.asq_last_status); } } @@ -160,7 +159,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) udelay(5); } if (ret_code == I40E_ERR_TIMEOUT) - hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n"); + i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set"); return ret_code; } @@ -179,7 +178,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u32 sr_reg; if (offset >= hw->nvm.sr_size) { - hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: offset %d beyond Shadow RAM limit %d\n", + offset, hw->nvm.sr_size); ret_code = I40E_ERR_PARAM; goto read_nvm_exit; } @@ -202,8 +203,9 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, } } if (ret_code) - hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", - offset); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM read error: Couldn't access Shadow RAM address: 0x%x\n", + offset); read_nvm_exit: return ret_code; @@ -263,14 +265,20 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, * Firmware will check the module-based model. */ if ((offset + words) > hw->nvm.sr_size) - hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) /* We can write only up to 4KB (one sector), in one AQ write */ - hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) /* A single write cannot spread over two sectors */ - hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n"); + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); else ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ @@ -438,6 +446,22 @@ static inline u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } +static char *i40e_nvm_update_state_str[] = { + "I40E_NVMUPD_INVALID", + "I40E_NVMUPD_READ_CON", + "I40E_NVMUPD_READ_SNT", + "I40E_NVMUPD_READ_LCB", + "I40E_NVMUPD_READ_SA", + "I40E_NVMUPD_WRITE_ERA", + "I40E_NVMUPD_WRITE_CON", + "I40E_NVMUPD_WRITE_SNT", + "I40E_NVMUPD_WRITE_LCB", + "I40E_NVMUPD_WRITE_SA", + "I40E_NVMUPD_CSUM_CON", + "I40E_NVMUPD_CSUM_SA", + "I40E_NVMUPD_CSUM_LCB", +}; + /** * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure @@ -471,6 +495,8 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, default: /* invalid state, should never happen */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: no such state %d\n", hw->nvmupd_state); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -501,7 +527,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); i40e_release_nvm(hw); @@ -511,17 +538,22 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_READ_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno); - hw->nvmupd_state = I40E_NVMUPD_STATE_READING; + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_READING; } break; case I40E_NVMUPD_WRITE_ERA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, errno); if (status) @@ -534,7 +566,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); if (status) @@ -547,22 +580,28 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_SNT: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + if (status) + i40e_release_nvm(hw); + else + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; } break; case I40E_NVMUPD_CSUM_SA: status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; i40e_release_nvm(hw); } else { @@ -572,6 +611,9 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in init state\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_ERR_NVM; *errno = -ESRCH; break; @@ -611,6 +653,9 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw, break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in reading state.\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -644,33 +689,38 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, case I40E_NVMUPD_WRITE_LCB: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); - if (!status) { + if (!status) hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - } + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; case I40E_NVMUPD_CSUM_CON: status = i40e_update_nvm_checksum(hw); - if (status) + if (status) { *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + } break; case I40E_NVMUPD_CSUM_LCB: status = i40e_update_nvm_checksum(hw); - if (status) { + if (status) *errno = hw->aq.asq_last_status ? - i40e_aq_rc_to_posix(hw->aq.asq_last_status) : + i40e_aq_rc_to_posix(status, + hw->aq.asq_last_status) : -EIO; - } else { + else hw->aq.nvm_release_on_done = true; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - } + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; break; default: + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: bad cmd %s in writing state.\n", + i40e_nvm_update_state_str[upd_cmd]); status = I40E_NOT_SUPPORTED; *errno = -ESRCH; break; @@ -702,8 +752,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, /* limits on data size */ if ((cmd->data_size < 1) || (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { - hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n", - cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command data_size %d\n", + cmd->data_size); *errno = -EFAULT; return I40E_NVMUPD_INVALID; } @@ -755,12 +806,16 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, } break; } + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->aq.nvm_release_on_done); if (upd_cmd == I40E_NVMUPD_INVALID) { *errno = -EFAULT; - hw_dbg(hw, - "i40e_nvmupd_validate_command returns %d errno: %d\n", - upd_cmd, *errno); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *errno); } return upd_cmd; } @@ -785,14 +840,18 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_read status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } @@ -816,13 +875,17 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); - hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_erase status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } @@ -847,13 +910,18 @@ static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw, transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); - hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); + status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, NULL); - hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status); - if (status) - *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", + module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "i40e_nvmupd_nvm_write status %d aq %d\n", + status, hw->aq.asq_last_status); + *errno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 0988b5c1fe87..2fb4306597e8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -84,6 +84,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); +i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, @@ -173,6 +175,9 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, + void *buff, u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, u16 udp_port, u8 protocol_index, u8 *filter_index, @@ -228,6 +233,10 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, u16 seid, struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg); /* i40e_common */ i40e_status i40e_init_shared_code(struct i40e_hw *hw); i40e_status i40e_pf_reset(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 537b6216971d..6d1ec926aa37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -382,11 +382,17 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) incval = I40E_PTP_1GB_INCVAL; break; case I40E_LINK_SPEED_100MB: - dev_warn(&pf->pdev->dev, - "%s: 1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n", - __func__); + { + static int warn_once; + + if (!warn_once) { + dev_warn(&pf->pdev->dev, + "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); + warn_once++; + } incval = 0; break; + } case I40E_LINK_SPEED_40GB: default: incval = I40E_PTP_40GB_INCVAL; @@ -418,6 +424,9 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config = &pf->tstamp_config; + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : 0; } @@ -438,22 +447,12 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, struct hwtstamp_config *config) { struct i40e_hw *hw = &pf->hw; - u32 pf_id, tsyntype, regval; + u32 tsyntype, regval; /* Reserved for future extensions. */ if (config->flags) return -EINVAL; - /* Confirm that 1588 is supported on this PF. */ - pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> - I40E_PRTTSYN_CTL0_PF_ID_SHIFT; - if (hw->pf_id != pf_id) { - dev_err(&pf->pdev->dev, - "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n", - hw->pf_id, hw->port, pf_id); - return -EPERM; - } - switch (config->tx_type) { case HWTSTAMP_TX_OFF: pf->ptp_tx = false; @@ -556,6 +555,9 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) struct hwtstamp_config config; int err; + if (!(pf->flags & I40E_FLAG_PTP)) + return -EOPNOTSUPP; + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; @@ -625,8 +627,22 @@ void i40e_ptp_init(struct i40e_pf *pf) { struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; struct i40e_hw *hw = &pf->hw; + u32 pf_id; long err; + /* Only one PF is assigned to control 1588 logic per port. Do not + * enable any support for PFs not assigned via PRTTSYN_CTL0.PF_ID + */ + pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >> + I40E_PRTTSYN_CTL0_PF_ID_SHIFT; + if (hw->pf_id != pf_id) { + pf->flags &= ~I40E_FLAG_PTP; + dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", + __func__, + netdev->name); + return; + } + /* we have to initialize the lock first, since we can't control * when the user will enter the PHC device entry points */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3195d82e4942..04b441460bbd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2399,12 +2399,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* hardware can't handle really short frames, hardware padding works * beyond this point */ - if (unlikely(skb->len < I40E_MIN_TX_LEN)) { - if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len)) - return NETDEV_TX_OK; - skb->len = I40E_MIN_TX_LEN; - skb_set_tail_pointer(skb, I40E_MIN_TX_LEN); - } + if (skb_put_padto(skb, I40E_MIN_TX_LEN)) + return NETDEV_TX_OK; return i40e_xmit_frame_ring(skb, tx_ring); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index d7a625a6a14f..e60d3accb2e2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -30,10 +30,7 @@ /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ -#define I40E_MAX_IRATE 0x03F -#define I40E_MIN_IRATE 0x001 -#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 #define I40E_ITR_20K 0x0019 #define I40E_ITR_8K 0x003E diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index ce04d9093db6..c1f2eb963357 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -43,6 +43,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -260,8 +261,7 @@ enum i40e_aq_resource_access_type { }; struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ - u64 hw_semaphore_wait; /* - || - */ + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ @@ -380,9 +380,18 @@ struct i40e_fc_info { #define I40E_MAX_USER_PRIORITY 8 #define I40E_DCBX_MAX_APPS 32 #define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { +#define I40E_TLV_STATUS_OPER 0x1 +#define I40E_TLV_STATUS_SYNC 0x2 +#define I40E_TLV_STATUS_ERR 0x4 +#define I40E_CEE_OPER_MAX_APPS 3 +#define I40E_APP_PROTOID_FCOE 0x8906 +#define I40E_APP_PROTOID_ISCSI 0x0cbc +#define I40E_APP_PROTOID_FIP 0x8914 +#define I40E_APP_SEL_ETHTYPE 0x1 +#define I40E_APP_SEL_TCPIP 0x2 + +/* CEE or IEEE 802.1Qaz ETS Configuration data */ +struct i40e_dcb_ets_config { u8 willing; u8 cbs; u8 maxtcs; @@ -391,34 +400,30 @@ struct i40e_ieee_ets_config { u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; }; -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { +/* CEE or IEEE 802.1Qaz PFC Configuration data */ +struct i40e_dcb_pfc_config { u8 willing; u8 mbc; u8 pfccap; u8 pfcenable; }; -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { +/* CEE or IEEE 802.1Qaz Application Priority data */ +struct i40e_dcb_app_priority_table { u8 priority; u8 selector; u16 protocolid; }; struct i40e_dcbx_config { + u8 dcbx_mode; +#define I40E_DCBX_MODE_CEE 0x1 +#define I40E_DCBX_MODE_IEEE 0x2 u32 numapps; - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; + struct i40e_dcb_ets_config etscfg; + struct i40e_dcb_ets_config etsrec; + struct i40e_dcb_pfc_config pfc; + struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS]; }; /* Port hardware description */ @@ -476,6 +481,11 @@ struct i40e_hw { u32 debug_mask; }; +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + struct i40e_driver_version { u8 major_version; u8 minor_version; @@ -1371,6 +1381,18 @@ enum i40e_reset_type { I40E_RESET_EMPR = 3, }; +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0xD +struct i40e_lldp_variables { + u16 length; + u16 adminstatus; + u16 msgfasttx; + u16 msgtxinterval; + u16 txparams; + u16 timers; + u16 crc8; +}; + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 70951d2edcad..61dd1b187624 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -79,6 +79,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4eeed267e4b7..5bae89550657 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -674,7 +674,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * that the requested op was completed * successfully */ - udelay(10); + usleep_range(10, 20); reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) { rsd = true; @@ -707,7 +707,6 @@ complete_reset: wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); } -#ifdef CONFIG_PCI_IOV /** * i40e_enable_pf_switch_lb @@ -715,7 +714,7 @@ complete_reset: * * enable switch loop back or die - no point in a return value **/ -static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; @@ -742,7 +741,6 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) __func__, vsi->back->hw.aq.asq_last_status); } } -#endif /** * i40e_disable_pf_switch_lb @@ -1869,6 +1867,12 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; + /* re-enable vflr interrupt cause */ + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + i40e_flush(hw); + clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; @@ -1885,12 +1889,6 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) } } - /* re-enable vflr interrupt cause */ - reg = rd32(hw, I40E_PFINT_ICR0_ENA); - reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; - wr32(hw, I40E_PFINT_ICR0_ENA, reg); - i40e_flush(hw); - return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 0adc61e1052d..9452f5247cff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -126,5 +126,6 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +void i40e_enable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f206be917842..c1d25f8c1abc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -49,7 +49,7 @@ static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -801,7 +801,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { /* AQ designers suggest use of head for better @@ -809,9 +808,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, */ if (i40evf_asq_done(hw)) break; - /* ugh! delay while spin_lock */ - udelay(delay_len); - total_delay += delay_len; + usleep_range(1000, 2000); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } @@ -838,9 +836,6 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval; } - if (i40e_is_nvm_update_op(desc)) - hw->aq.nvm_busy = true; - i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, @@ -907,9 +902,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } @@ -931,13 +923,10 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, e->desc = *desc; datalen = le16_to_cpu(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size); - - if (i40e_is_nvm_update_op(&e->desc)) - hw->aq.nvm_busy = false; + e->msg_len); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 91a5c5bd80f3..6c31bf22c2c3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -28,6 +28,7 @@ #define _I40E_ADMINQ_H_ #include "i40e_osdep.h" +#include "i40e_status.h" #include "i40e_adminq_cmd.h" #define I40E_ADMINQ_DESC(R, i) \ @@ -76,7 +77,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -93,7 +95,6 @@ struct i40e_adminq_info { u16 fw_min_ver; /* firmware minor version */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_busy; bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ @@ -108,7 +109,7 @@ struct i40e_adminq_info { * i40e_aq_rc_to_posix - convert errors to user-land codes * aq_rc: AdminQ error code to convert **/ -static inline int i40e_aq_rc_to_posix(u16 aq_rc) +static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc) { int aq_to_posix[] = { 0, /* I40E_AQ_RC_OK */ @@ -136,12 +137,18 @@ static inline int i40e_aq_rc_to_posix(u16 aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; + /* aq_rc is invalid if AQ timed out */ + if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT) + return -EAGAIN; + + if (aq_rc >= ARRAY_SIZE(aq_to_posix)) + return -ERANGE; return aq_to_posix[aq_rc]; } /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index e656ea7a7920..ff1b16370da9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -33,8 +33,8 @@ * This file needs to comply with the Linux Kernel coding style. */ -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 #define I40E_FW_API_VERSION_A0_MINOR 0x0000 struct i40e_aq_desc { @@ -67,216 +67,216 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ }; /* Admin Queue command opcodes */ enum i40e_admin_queue_opc { /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, + i40e_aqc_opc_clear_pxe_mode = 0x0110, /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, + i40e_aqc_opc_event_lan_overflow = 0x1001, /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, }; /* command structures and indirect data structures */ @@ -303,7 +303,7 @@ enum i40e_admin_queue_opc { /* This macro is used extensively to ensure that command structures are 16 * bytes in length as they have to map to the raw array of that size. */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) /* internal (0x00XX) commands */ @@ -321,22 +321,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); /* Send driver version (indirect 0x0002) */ struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); /* Queue Shutdown (direct 0x0003) */ struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); @@ -352,19 +352,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); @@ -374,7 +374,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -385,123 +385,123 @@ struct i40e_aqc_list_capabilites { I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; }; /* list of caps */ -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; }; /* Set NS Proxy Table Entry Command (indirect 0x0105) */ struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; }; /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; }; /* Manage MAC Address Read Command (indirect 0x0107) */ struct i40e_aqc_mac_address_read { __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); @@ -517,14 +517,14 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); @@ -545,10 +545,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); * command */ struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); @@ -557,34 +557,34 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; }; struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; }; /* Get Switch Configuration (indirect 0x0200) @@ -592,73 +592,73 @@ struct i40e_aqc_switch_config_element_resp { * the first in the array is the header, remainder are elements */ struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; }; /* Add Statistics (direct 0x0201) * Remove Statistics (direct 0x0202) */ struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); /* Get Switch Resource Allocation (indirect 0x0204) */ struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; }; /* Add VSI (indirect 0x0210) @@ -672,24 +672,24 @@ struct i40e_aqc_switch_resource_alloc_element_resp { * uses the same completion and data structure as Add VSI */ struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); @@ -707,121 +707,121 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); struct i40e_aqc_vsi_properties_data { /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; + u8 up_enable_bits; + u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; + __le16 qs_handle[8]; #define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; }; I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); @@ -831,26 +831,26 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); * (IS_CTRL_PORT only works on add PV) */ struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); @@ -860,48 +860,48 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); */ struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); /* Add VEB (direct 0x0230) */ struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; + u8 reserved[6]; + __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); @@ -910,13 +910,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); * uses i40e_aqc_switch_seid for the descriptor */ struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); @@ -929,37 +929,37 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); /* used for the command for most vlan commands */ struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); /* indirect data for command and response */ struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_macvlan_completion { @@ -979,19 +979,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); */ struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; }; /* Add VLAN (indirect 0x0252) @@ -999,59 +999,58 @@ struct i40e_aqc_remove_macvlan_element_data { * use the generic i40e_aqc_macvlan for the command */ struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; + __le16 vlan_tag; + u8 vlan_flags; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; /* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF /* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; }; struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; }; /* Set VSI Promiscuous Modes (direct 0x0254) */ struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; + __le16 promiscuous_flags; + __le16 valid_flags; /* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); @@ -1060,23 +1059,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; + __le16 tag; + __le16 queue_number; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); @@ -1085,12 +1084,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); * Uses generic i40e_aqc_add_remove_tag_completion for completion */ struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; + __le16 tag; + u8 reserved[12]; }; /* Add multicast E-Tag (direct 0x0257) @@ -1098,22 +1097,22 @@ struct i40e_aqc_remove_tag { * and no external data */ struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; }; @@ -1121,21 +1120,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); @@ -1146,30 +1145,30 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); * and the generic direct completion structure */ struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; + __le16 queue; + u8 reserved[2]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); @@ -1180,23 +1179,23 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); * and the generic indirect completion structure */ struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; union { struct { u8 reserved[12]; @@ -1206,52 +1205,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 data[16]; } v6; } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007 /* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 /* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 /* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 /* 0x0007 reserved */ /* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; }; struct i40e_aqc_remove_cloud_filters_completion { @@ -1273,14 +1269,14 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); struct i40e_aqc_add_delete_mirror_rule { __le16 seid; __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 __le16 num_entries; __le16 destination; /* VSI for add, rule id for delete */ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ @@ -1290,12 +1286,12 @@ struct i40e_aqc_add_delete_mirror_rule { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); @@ -1306,11 +1302,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); * the command and response use the same descriptor structure */ struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); @@ -1325,10 +1321,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); * this generic struct to pass the SEID in param0 */ struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); @@ -1340,12 +1336,12 @@ struct i40e_aqc_qs_handles_resp { /* Configure VSI BW limits (direct 0x0400) */ struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); @@ -1354,58 +1350,58 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) * responds with i40e_aqc_qs_handles_resp */ struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; }; /* Query vsi bw configuration (indirect 0x0408) */ struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; }; /* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Configure Switching Component Bandwidth Limit (direct 0x0410) */ struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); @@ -1415,75 +1411,75 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); * Disable Physical Port ETS (indirect 0x0415) */ struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; }; /* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; + __le16 tc_bw_max[2]; + u8 reserved1[28]; }; /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) */ struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; }; /* Query Switching Component Configuration (indirect 0x0418) */ struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; }; /* Query PhysicalPort ETS Configuration (indirect 0x0419) */ struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; + __le16 tc_bw_max[2]; + u8 reserved3[32]; }; /* Query Switching Component Bandwidth Allocation per Traffic Type * (indirect 0x041A) */ struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; + __le16 tc_bw_max[2]; }; /* Suspend/resume port TX traffic @@ -1494,37 +1490,37 @@ struct i40e_aqc_query_switching_comp_bw_config_resp { * (indirect 0x041D) */ struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ }; /* Get and set the active HMC resource profile and status. * (direct 0x0500) and (direct 0x0501) */ struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); enum i40e_aq_hmc_profile { /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, }; -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 enum i40e_aq_phy_type { I40E_PHY_TYPE_SGMII = 0x0, @@ -1582,147 +1578,147 @@ struct i40e_aqc_module_desc { }; struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; }; /* Set PHY Config (direct 0x0601) */ struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; + __le32 phy_type; + u8 link_speed; + u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ #define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); /* Set MAC Config command data structure (direct 0x0603) */ struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); /* Restart Auto-Negotiation (direct 0x605) */ struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); /* Set event mask command (direct 0x613) */ struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); @@ -1732,27 +1728,27 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); * Get Link Partner AN advt register (direct 0x0616) */ struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); /* Set Loopback mode (0x0618) */ struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { - u8 command_flags; + u8 command_flags; #define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ @@ -1761,15 +1757,15 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; /* NVM Read command (indirect 0x0701) @@ -1777,40 +1773,40 @@ enum i40e_aq_phy_reg_type { * NVM Update commands (indirect 0x0703) */ struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; + __le16 cmd_flags; #define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 #define ANVM_READ_SINGLE_FEATURE 0 #define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); /* NVM Config Write (indirect 0x0705) */ struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); @@ -1835,10 +1831,10 @@ struct i40e_aqc_nvm_config_data_immediate_field { * Send to Peer PF command (indirect 0x0803) */ struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); @@ -1874,22 +1870,22 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); * uses i40e_aq_desc */ struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; + __le16 cmd_flags; #define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 #define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 #define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 #define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; + u8 reserved[14]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { - __le32 mode; + __le32 mode; #define I40E_AQ_ALTERNATE_MODE_NONE 0 #define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); @@ -1900,33 +1896,33 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); /* Lan Queue Overflow Event (direct, 0x1001) */ struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); /* Get LLDP MIB (indirect 0x0A00) */ struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); @@ -1935,12 +1931,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); * also used for the event (with type in the command field) */ struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); @@ -1949,35 +1945,35 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); * Delete LLDP TLV (indirect 0x0A04) */ struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); /* Update LLDP TLV (indirect 0x0A03) */ struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); @@ -1985,9 +1981,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); /* Start LLDP (direct 0x0A06) */ struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); @@ -1998,13 +1994,13 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); /* Add Udp Tunnel command and completion (direct 0x0B00) */ struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; + u8 reserved1[10]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); @@ -2013,8 +2009,8 @@ struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; @@ -2023,23 +2019,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); /* remove UDP Tunnel command (0x0B01) */ struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 pf_filters; - u8 total_filters; - u8 reserved2[11]; + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved; - u8 tunnels_free; - u8 reserved1[9]; + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); @@ -2068,11 +2060,11 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 /* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; #define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 #define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 @@ -2085,21 +2077,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); @@ -2111,18 +2103,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); /* set test more (0xFF01, internal) */ struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); @@ -2175,21 +2167,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); #define I40E_AQ_CLUSTER_ID_ALTRAM 11 struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 952560551964..28c40c57d4f5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -50,6 +50,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index f6dcf9dd9290..c7f29626eada 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -30,10 +30,7 @@ /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ -#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */ -#define I40E_MAX_IRATE 0x03F -#define I40E_MIN_IRATE 0x001 -#define I40E_IRATE_USEC_RESOLUTION 4 +#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */ #define I40E_ITR_100K 0x0005 #define I40E_ITR_20K 0x0019 #define I40E_ITR_8K 0x003E diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 15376436cead..68aec11f6523 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -43,6 +43,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -259,8 +260,7 @@ enum i40e_aq_resource_access_type { }; struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */ - u64 hw_semaphore_wait; /* - || - */ + u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ u32 timeout; /* [ms] */ u16 sr_size; /* Shadow RAM size in words */ bool blank_nvm_mode; /* is NVM empty (no FW present)*/ @@ -475,6 +475,11 @@ struct i40e_hw { u32 debug_mask; }; +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return hw->mac.type == I40E_MAC_VF; +} + struct i40e_driver_version { u8 major_version; u8 minor_version; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index cd18d5689006..e0c8208138f4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -79,6 +79,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 30ef519d4b91..981224743c73 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -191,6 +191,7 @@ struct i40evf_adapter { struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; + int num_active_queues; /* TX */ struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; @@ -243,7 +244,7 @@ struct i40evf_adapter { struct i40e_hw hw; /* defined in i40e_type.h */ enum i40evf_state_t state; - volatile unsigned long crit_section; + unsigned long crit_section; struct work_struct watchdog_task; bool netdev_registered; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index efee6b290c0f..69b97bac182c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -58,8 +58,8 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) #define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *) \ - netdev_priv(_dev))->vsi_res->num_queue_pairs \ + (((struct i40evf_adapter *)\ + netdev_priv(_dev))->num_active_queues \ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) @@ -121,11 +121,11 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; data[i] = *(u64 *)p; } - for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + for (j = 0; j < adapter->num_active_queues; j++) { data[i++] = adapter->tx_rings[j]->stats.packets; data[i++] = adapter->tx_rings[j]->stats.bytes; } - for (j = 0; j < adapter->vsi_res->num_queue_pairs; j++) { + for (j = 0; j < adapter->num_active_queues; j++) { data[i++] = adapter->rx_rings[j]->stats.packets; data[i++] = adapter->rx_rings[j]->stats.bytes; } @@ -151,13 +151,13 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); p += ETH_GSTRING_LEN; snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); @@ -175,6 +175,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static u32 i40evf_get_msglevel(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } @@ -189,6 +190,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -219,7 +221,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, * but the number of rings is not reported. **/ static void i40evf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -280,7 +282,7 @@ static int i40evf_set_ringparam(struct net_device *netdev, * this functionality. **/ static int i40evf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_vsi *vsi = &adapter->vsi; @@ -308,7 +310,7 @@ static int i40evf_get_coalesce(struct net_device *netdev, * Change current coalescing settings. **/ static int i40evf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; @@ -430,7 +432,7 @@ static int i40evf_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = adapter->vsi_res->num_queue_pairs; + cmd->data = adapter->num_active_queues; ret = 0; break; case ETHTOOL_GRXFH: @@ -598,12 +600,12 @@ static void i40evf_get_channels(struct net_device *netdev, struct i40evf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = adapter->vsi_res->num_queue_pairs; + ch->max_combined = adapter->num_active_queues; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; - ch->combined_count = adapter->vsi_res->num_queue_pairs; + ch->combined_count = adapter->num_active_queues; } /** @@ -621,17 +623,23 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) * i40evf_get_rxfh - get the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table - * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) + * @key: hash key * * Reads the indirection table directly from the hardware. Always returns 0. **/ -static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 hlut_val; int i, j; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); indir[j++] = hlut_val & 0xff; @@ -646,19 +654,26 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) * i40evf_set_rxfh - set the rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table - * @key: hash key (will be %NULL until get_rxfh_key_size is implemented) + * @key: hash key * * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. **/ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; u32 hlut_val; int i, j; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { hlut_val = indir[j++]; hlut_val |= indir[j++] << 8; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c51bc7a33bc5..cabaf599f562 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.0.5" +#define DRV_VERSION "1.0.6" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -185,6 +185,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, 0); /* read flush */ @@ -200,6 +201,7 @@ static void i40evf_misc_irq_disable(struct i40evf_adapter *adapter) static void i40evf_misc_irq_enable(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); @@ -226,7 +228,6 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter) } /* read flush */ rd32(hw, I40E_VFGEN_RSTAT); - } /** @@ -253,8 +254,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) * @adapter: board private structure * @mask: bitmap of vectors to trigger **/ -static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, - u32 mask) +static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) { struct i40e_hw *hw = &adapter->hw; int i; @@ -397,8 +397,8 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) int q_vectors; int v_start = 0; int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->vsi_res->num_queue_pairs; - int txr_remaining = adapter->vsi_res->num_queue_pairs; + int rxr_remaining = adapter->num_active_queues; + int txr_remaining = adapter->num_active_queues; int i, j; int rqpv, tqpv; int err = 0; @@ -551,6 +551,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) { int i; int q_vectors; + q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (i = 0; i < q_vectors; i++) { @@ -584,7 +585,8 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + + for (i = 0; i < adapter->num_active_queues; i++) adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); } @@ -629,7 +631,7 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) rx_buf_len = ALIGN(max_frame, 1024); } - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i]->rx_buf_len = rx_buf_len; } @@ -667,9 +669,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) struct i40evf_vlan_filter *f; f = i40evf_find_vlan(adapter, vlan); - if (NULL == f) { + if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) + if (!f) return NULL; f->vlan = vlan; @@ -705,7 +707,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) * @vid: VLAN tag **/ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -720,7 +722,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, * @vid: VLAN tag **/ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, - __always_unused __be16 proto, u16 vid) + __always_unused __be16 proto, u16 vid) { struct i40evf_adapter *adapter = netdev_priv(netdev); @@ -772,9 +774,9 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, udelay(1); f = i40evf_find_filter(adapter, macaddr); - if (NULL == f) { + if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) { + if (!f) { clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return NULL; @@ -881,6 +883,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; + q_vector = adapter->q_vector[q_idx]; napi = &q_vector->napi; napi_enable(napi); @@ -918,8 +921,9 @@ static void i40evf_configure(struct i40evf_adapter *adapter) i40evf_configure_rx(adapter); adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; + i40evf_alloc_rx_buffers(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); @@ -950,7 +954,7 @@ static void i40evf_clean_all_rx_rings(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) i40evf_clean_rx_ring(adapter->rx_rings[i]); } @@ -962,7 +966,7 @@ static void i40evf_clean_all_tx_rings(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) i40evf_clean_tx_ring(adapter->tx_rings[i]); } @@ -1064,7 +1068,7 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) if (!adapter->vsi_res) return; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { if (adapter->tx_rings[i]) kfree_rcu(adapter->tx_rings[i], rcu); adapter->tx_rings[i] = NULL; @@ -1084,11 +1088,11 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *tx_ring; struct i40e_ring *rx_ring; - tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); + tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); if (!tx_ring) goto err_out; @@ -1130,7 +1134,7 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) err = -EIO; goto out; } - pairs = adapter->vsi_res->num_queue_pairs; + pairs = adapter->num_active_queues; /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors @@ -1172,14 +1176,14 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { - q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL); + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; q_vector->vsi = &adapter->vsi; q_vector->v_idx = q_idx; netif_napi_add(adapter->netdev, &q_vector->napi, - i40evf_napi_poll, NAPI_POLL_WEIGHT); + i40evf_napi_poll, NAPI_POLL_WEIGHT); adapter->q_vector[q_idx] = q_vector; } @@ -1210,7 +1214,7 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) int napi_vectors; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; - napi_vectors = adapter->vsi_res->num_queue_pairs; + napi_vectors = adapter->num_active_queues; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; @@ -1265,8 +1269,8 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) } dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", - (adapter->vsi_res->num_queue_pairs > 1) ? "Enabled" : - "Disabled", adapter->vsi_res->num_queue_pairs); + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); return 0; err_alloc_queues: @@ -1284,6 +1288,7 @@ err_set_interrupt: static void i40evf_watchdog_timer(unsigned long data) { struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; + schedule_work(&adapter->watchdog_task); /* timer will be rescheduled in watchdog task */ } @@ -1295,8 +1300,8 @@ static void i40evf_watchdog_timer(unsigned long data) static void i40evf_watchdog_task(struct work_struct *work) { struct i40evf_adapter *adapter = container_of(work, - struct i40evf_adapter, - watchdog_task); + struct i40evf_adapter, + watchdog_task); struct i40e_hw *hw = &adapter->hw; uint32_t rstat_val; @@ -1334,7 +1339,7 @@ static void i40evf_watchdog_task(struct work_struct *work) /* check for reset */ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; + I40E_VFGEN_RSTAT_VFR_STATE_MASK; if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && (rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) { @@ -1425,7 +1430,7 @@ static int next_queue(struct i40evf_adapter *adapter, int j) { j += 1; - return j >= adapter->vsi_res->num_queue_pairs ? 0 : j; + return j >= adapter->num_active_queues ? 0 : j; } /** @@ -1434,23 +1439,23 @@ static int next_queue(struct i40evf_adapter *adapter, int j) **/ static void i40evf_configure_rss(struct i40evf_adapter *adapter) { + u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &adapter->hw; u32 lut = 0; int i, j; u64 hena; - /* Set of random keys generated using kernel random number generator */ - static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = { - 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127, - 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0, - 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e, - 0x4954b126 }; + /* No RSS for single queue. */ + if (adapter->num_active_queues == 1) { + wr32(hw, I40E_VFQF_HENA(0), 0); + wr32(hw, I40E_VFQF_HENA(1), 0); + return; + } /* Hash type is configured by the PF - we just supply the key */ - - /* Fill out hash function seed */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed[i]); + wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ hena = I40E_DEFAULT_RSS_HENA; @@ -1458,7 +1463,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - j = adapter->vsi_res->num_queue_pairs; + j = adapter->num_active_queues; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { j = next_queue(adapter, j); lut = j; @@ -1494,7 +1499,7 @@ static void i40evf_reset_task(struct work_struct *work) while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) - udelay(500); + usleep_range(500, 1000); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); @@ -1508,8 +1513,7 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) break; - else - msleep(I40EVF_RESET_WAIT_MS); + msleep(I40EVF_RESET_WAIT_MS); } if (i == I40EVF_RESET_WAIT_COUNT) { adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; @@ -1523,8 +1527,7 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val == I40E_VFR_VFACTIVE) || (rstat_val == I40E_VFR_COMPLETED)) break; - else - msleep(I40EVF_RESET_WAIT_MS); + msleep(I40EVF_RESET_WAIT_MS); } if (i == I40EVF_RESET_WAIT_COUNT) { struct i40evf_mac_filter *f, *ftmp; @@ -1575,12 +1578,12 @@ continue_reset: /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + "%s: Failed to destroy the Admin Queue resources\n", + __func__); err = i40evf_init_adminq(hw); if (err) dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + __func__, err); adapter->aq_pending = 0; adapter->aq_required = 0; @@ -1632,8 +1635,8 @@ static void i40evf_adminq_task(struct work_struct *work) if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) return; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) return; @@ -1645,13 +1648,9 @@ static void i40evf_adminq_task(struct work_struct *work) i40evf_virtchnl_completion(adapter, v_msg->v_opcode, v_msg->v_retval, event.msg_buf, - event.msg_size); - if (pending != 0) { - dev_info(&adapter->pdev->dev, - "%s: ARQ: Pending events %d\n", - __func__, pending); + event.msg_len); + if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); - } } while (pending); /* check for error indications */ @@ -1705,10 +1704,9 @@ static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->tx_rings[i]->desc) i40evf_free_tx_resources(adapter->tx_rings[i]); - } /** @@ -1725,7 +1723,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->tx_rings[i]->count = adapter->tx_desc_count; err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); if (!err) @@ -1753,7 +1751,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) { int i, err = 0; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) { + for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i]->count = adapter->rx_desc_count; err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); if (!err) @@ -1776,7 +1774,7 @@ static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; - for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) + for (i = 0; i < adapter->num_active_queues; i++) if (adapter->rx_rings[i]->desc) i40evf_free_rx_resources(adapter->rx_rings[i]); } @@ -1980,7 +1978,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) if ((rstat == I40E_VFR_VFACTIVE) || (rstat == I40E_VFR_COMPLETED)) return 0; - udelay(10); + usleep_range(10, 20); } return -EBUSY; } @@ -2022,7 +2020,7 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_check_reset_complete(hw); if (err) { dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", - err); + err); goto err; } hw->aq.num_arq_entries = I40EVF_AQ_LEN; @@ -2047,6 +2045,8 @@ static void i40evf_init_task(struct work_struct *work) case __I40EVF_INIT_VERSION_CHECK: if (!i40evf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); + i40evf_shutdown_adminq(hw); + adapter->state = __I40EVF_STARTUP; goto err; } @@ -2054,7 +2054,7 @@ static void i40evf_init_task(struct work_struct *work) err = i40evf_verify_api_ver(adapter); if (err) { dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n", - err); + err); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { dev_info(&pdev->dev, "Resending request\n"); err = i40evf_send_api_ver(adapter); @@ -2080,8 +2080,11 @@ static void i40evf_init_task(struct work_struct *work) goto err; } err = i40evf_get_vf_config(adapter); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto restart; + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + dev_info(&pdev->dev, "Resending VF config request\n"); + err = i40evf_send_vf_config_msg(adapter); + goto err; + } if (err) { dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err); @@ -2138,7 +2141,7 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (NULL == f) + if (!f) goto err_sw_init; ether_addr_copy(f->macaddr, adapter->hw.mac.addr); @@ -2152,6 +2155,9 @@ static void i40evf_init_task(struct work_struct *work) adapter->watchdog_timer.data = (unsigned long)adapter; mod_timer(&adapter->watchdog_timer, jiffies + 1); + adapter->num_active_queues = min_t(int, + adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); adapter->tx_desc_count = I40EVF_DEFAULT_TXD; adapter->rx_desc_count = I40EVF_DEFAULT_RXD; err = i40evf_init_interrupt_scheme(adapter); @@ -2500,8 +2506,9 @@ static struct pci_driver i40evf_driver = { static int __init i40evf_init_module(void) { int ret; + pr_info("i40evf: %s - version %s\n", i40evf_driver_string, - i40evf_driver_version); + i40evf_driver_version); pr_info("%s\n", i40evf_copyright); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 66d12f5b4ca8..5fde5a7f4591 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -89,27 +89,37 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter) struct i40e_virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; + enum i40e_virtchnl_ops op; i40e_status err; - event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = I40EVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; + while (1) { + err = i40evf_clean_arq_element(hw, &event, NULL); + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_VERSION) + break; + } + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); if (err) goto out_alloc; - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_VERSION) { + if (op != I40E_VIRTCHNL_OP_VERSION) { dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n", - le32_to_cpu(event.desc.cookie_high)); + op); err = -EIO; goto out_alloc; } @@ -153,42 +163,34 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - u16 len; + enum i40e_virtchnl_ops op; i40e_status err; + u16 len; len = sizeof(struct i40e_virtchnl_vf_resource) + I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); - event.msg_size = len; - event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); + event.buf_len = len; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) { err = -ENOMEM; goto out; } - err = i40evf_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - goto out_alloc; - - err = (i40e_status)le32_to_cpu(event.desc.cookie_low); - if (err) { - dev_err(&adapter->pdev->dev, - "%s: Error returned from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; + while (1) { + /* When the AQ is empty, i40evf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + err = i40evf_clean_arq_element(hw, &event, NULL); + if (err) + goto out_alloc; + op = + (enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + if (op == I40E_VIRTCHNL_OP_GET_VF_RESOURCES) + break; } - if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) != - I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { - dev_err(&adapter->pdev->dev, - "%s: Invalid response from PF, %d, %d\n", __func__, - le32_to_cpu(event.desc.cookie_high), - le32_to_cpu(event.desc.cookie_low)); - err = -EIO; - goto out_alloc; - } - memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len)); + err = (i40e_status)le32_to_cpu(event.desc.cookie_low); + memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); i40e_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: @@ -207,7 +209,7 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) { struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - int pairs = adapter->vsi_res->num_queue_pairs; + int pairs = adapter->num_active_queues; int i, len; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { @@ -273,7 +275,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES; @@ -299,7 +301,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter) } adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; vqs.vsi_id = adapter->vsi_res->vsi_id; - vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1; + vqs.tx_queues = (1 << adapter->num_active_queues) - 1; vqs.rx_queues = vqs.tx_queues; adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES; adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES; @@ -393,7 +395,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -454,7 +456,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) (count * sizeof(struct i40e_virtchnl_ether_addr)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_ether_addr_list)) / sizeof(struct i40e_virtchnl_ether_addr); @@ -516,7 +518,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -576,7 +578,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) (count * sizeof(u16)); if (len > I40EVF_MAX_AQ_BUF_SIZE) { dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n", - __func__); + __func__); count = (I40EVF_MAX_AQ_BUF_SIZE - sizeof(struct i40e_virtchnl_vlan_filter_list)) / sizeof(u16); @@ -635,6 +637,7 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) void i40evf_request_stats(struct i40evf_adapter *adapter) { struct i40e_virtchnl_queue_select vqs; + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* no error message, this isn't crucial */ return; @@ -709,19 +712,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, "%s: Unknown event %d from pf\n", __func__, vpe->event); break; - } return; } - if (v_opcode != adapter->current_op) { - dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n", - __func__, adapter->current_op, v_opcode); - /* We're probably completely screwed at this point, but clear - * the current op and try to carry on.... - */ - adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; - return; - } + if (v_opcode != adapter->current_op) + dev_info(&adapter->pdev->dev, "Pending op is %d, received %d\n", + adapter->current_op, v_opcode); if (v_retval) { dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n", __func__, v_retval, v_opcode); @@ -773,8 +769,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS); break; default: - dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n", - __func__, v_opcode); + dev_info(&adapter->pdev->dev, "Received unexpected message %d from PF\n", + v_opcode); break; } /* switch v_opcode */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 02cfd3b14762..d5673eb90c54 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2842,11 +2842,16 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev) return IGB_RETA_SIZE; } -static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) { struct igb_adapter *adapter = netdev_priv(netdev); int i; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; for (i = 0; i < IGB_RETA_SIZE; i++) indir[i] = adapter->rss_indir_tbl[i]; @@ -2889,13 +2894,20 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter) } static int igb_set_rxfh(struct net_device *netdev, const u32 *indir, - const u8 *key) + const u8 *key, const u8 hfunc) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; int i; u32 num_queues; + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + num_queues = adapter->rss_queues; switch (hw->mac.type) { diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b85880a6e4c4..2e526d4904a6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3373,14 +3373,11 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; u32 j, num_rx_queues; - static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, - 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, - 0xA32DCB77, 0x0CF23080, 0x3BB7426A, - 0xFA01ACBE }; + u32 rss_key[10]; - /* Fill out hash function seeds */ + netdev_rss_key_fill(rss_key, sizeof(rss_key)); for (j = 0; j < 10; j++) - wr32(E1000_RSSRK(j), rsskey[j]); + wr32(E1000_RSSRK(j), rss_key[j]); num_rx_queues = adapter->rss_queues; @@ -5092,12 +5089,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); } @@ -6649,8 +6642,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IGB_RX_HDR_LEN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_failed++; return NULL; @@ -6851,14 +6843,9 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring, if (skb_is_nonlinear(skb)) igb_pull_tail(rx_ring, rx_desc, skb); - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -6993,7 +6980,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, return true; /* alloc new page for storage */ - page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL); + page = dev_alloc_page(); if (unlikely(!page)) { rx_ring->rx_stats.alloc_failed++; return false; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 055961b0f24b..aa87605b144a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1963,7 +1963,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter, * this should improve performance for small packets with large amounts * of reassembly being done in the stack */ -static void ixgb_check_copybreak(struct net_device *netdev, +static void ixgb_check_copybreak(struct napi_struct *napi, struct ixgb_buffer *buffer_info, u32 length, struct sk_buff **skb) { @@ -1972,7 +1972,7 @@ static void ixgb_check_copybreak(struct net_device *netdev, if (length > copybreak) return; - new_skb = netdev_alloc_skb_ip_align(netdev, length); + new_skb = napi_alloc_skb(napi, length); if (!new_skb) return; @@ -2064,7 +2064,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) goto rxdesc_done; } - ixgb_check_copybreak(netdev, buffer_info, length, &skb); + ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb); /* Good Receive */ skb_put(skb, length); diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index be2989e60009..35e6fa643c7e 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5032a602d5c9..b6137be43920 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -300,16 +300,17 @@ enum ixgbe_ring_f_enum { RING_F_ARRAY_SIZE /* must be last in enum set */ }; -#define IXGBE_MAX_RSS_INDICES 16 -#define IXGBE_MAX_VMDQ_INDICES 64 -#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ -#define IXGBE_MAX_FCOE_INDICES 8 -#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define IXGBE_MAX_L2A_QUEUES 4 -#define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 31 -#define IXGBE_MAX_DCBMACVLANS 8 +#define IXGBE_MAX_RSS_INDICES 16 +#define IXGBE_MAX_RSS_INDICES_X550 64 +#define IXGBE_MAX_VMDQ_INDICES 64 +#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_L2A_QUEUES 4 +#define IXGBE_BAD_L2A_QUEUE 3 +#define IXGBE_MAX_MACVLANS 31 +#define IXGBE_MAX_DCBMACVLANS 8 struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ @@ -553,11 +554,6 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value) -{ - writel(value, ring->tail); -} - #define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) #define IXGBE_TX_DESC(R, i) \ @@ -769,6 +765,21 @@ struct ixgbe_adapter { unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ }; +static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) +{ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + return IXGBE_MAX_RSS_INDICES; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + return IXGBE_MAX_RSS_INDICES_X550; + default: + return 0; + } +} + struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; @@ -804,11 +815,15 @@ enum ixgbe_boards { board_82598, board_82599, board_X540, + board_X550, + board_X550EM_x, }; extern struct ixgbe_info ixgbe_82598_info; extern struct ixgbe_info ixgbe_82599_info; extern struct ixgbe_info ixgbe_X540_info; +extern struct ixgbe_info ixgbe_X550_info; +extern struct ixgbe_info ixgbe_X550EM_x_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index b5f484bf3fda..9c66babd4edd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1625,7 +1625,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure **/ -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -1636,7 +1636,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (hw->eeprom.ops.read(hw, i, &word) != 0) { + if (hw->eeprom.ops.read(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -1645,24 +1645,35 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - hw->eeprom.ops.read(hw, i, &pointer); + if (hw->eeprom.ops.read(hw, i, &pointer)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } + + /* If the pointer seems invalid */ + if (pointer == 0xFFFF || pointer == 0) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; + } - /* Make sure the pointer seems valid */ - if (pointer != 0xFFFF && pointer != 0) { - hw->eeprom.ops.read(hw, pointer, &length); + if (length == 0xFFFF || length == 0) + continue; - if (length != 0xFFFF && length != 0) { - for (j = pointer+1; j <= pointer+length; j++) { - hw->eeprom.ops.read(hw, j, &word); - checksum += word; - } + for (j = pointer + 1; j <= pointer + length; j++) { + if (hw->eeprom.ops.read(hw, j, &word)) { + hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; } + checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -1686,26 +1697,33 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; - hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + checksum = (u16)(status & 0xffff); - /* - * Verify read checksum from EEPROM is the same as - * calculated checksum - */ - if (read_checksum != checksum) - status = IXGBE_ERR_EEPROM_CHECKSUM; - - /* If the user cares, return the calculated checksum */ - if (checksum_val) - *checksum_val = checksum; - } else { + status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) { hw_dbg(hw, "EEPROM read failed\n"); + return status; } + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + return status; } @@ -1724,15 +1742,19 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); - - if (status == 0) { - checksum = hw->eeprom.ops.calc_checksum(hw); - status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, - checksum); - } else { + if (status) { hw_dbg(hw, "EEPROM read failed\n"); + return status; } + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); + return status; } @@ -2469,7 +2491,7 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; @@ -2514,7 +2536,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; @@ -2799,6 +2821,8 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; @@ -3192,17 +3216,27 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *link_up = false; } - if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_10G_82599) - *speed = IXGBE_LINK_SPEED_10GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_1G_82599) + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; - else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_100_82599) - *speed = IXGBE_LINK_SPEED_100_FULL; - else + break; + case IXGBE_LINKS_SPEED_100_82599: + if ((hw->mac.type >= ixgbe_mac_X550) && + (links_reg & IXGBE_LINKS_SPEED_NON_STD)) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + else + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + default: *speed = IXGBE_LINK_SPEED_UNKNOWN; + } return 0; } @@ -3434,23 +3468,34 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. * * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ -static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length) +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, + bool return_data) { - u32 hicr, i, bi; + u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); - u8 buf_len, dword_len; + u16 buf_len, dword_len; - if (length == 0 || length & 0x3 || - length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { - hw_dbg(hw, "Buffer length failure.\n"); + if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Set bit 9 of FWSTS clearing FW reset indication */ + fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); + IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); + /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if ((hicr & IXGBE_HICR_EN) == 0) { @@ -3458,7 +3503,12 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, return IXGBE_ERR_HOST_INTERFACE_COMMAND; } - /* Calculate length in DWORDs */ + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + hw_dbg(hw, "Buffer length failure, not aligned to dword"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + dword_len = length >> 2; /* @@ -3472,7 +3522,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); - for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; @@ -3480,12 +3530,15 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, } /* Check command successful completion. */ - if (i == IXGBE_HI_COMMAND_TIMEOUT || + if ((timeout != 0 && i == timeout) || (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { hw_dbg(hw, "Command has failed with no status valid.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + if (!return_data) + return 0; + /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; @@ -3556,7 +3609,9 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, - sizeof(fw_cmd)); + sizeof(fw_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); if (ret_val != 0) continue; @@ -3583,7 +3638,8 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, **/ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { - u32 gcr_ext, hlreg0; + u32 gcr_ext, hlreg0, i, poll; + u16 value; /* * If double reset is not requested then all transactions should @@ -3600,6 +3656,23 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + /* wait for a last completion before clearing buffers */ + IXGBE_WRITE_FLUSH(hw); + usleep_range(3000, 6000); + + /* Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { + usleep_range(100, 200); + value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); + if (ixgbe_removed(hw->hw_addr)) + break; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + break; + } + /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2ae5d4b8fc93..8cfadcb2676e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -64,7 +64,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); @@ -84,8 +84,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); -void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); @@ -110,6 +110,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, + u32 length, u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 48f35fc963f8..a507a6fe3624 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -286,6 +286,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, bwgid, ptype); case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: @@ -302,6 +304,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) return ixgbe_dcb_config_pfc_82598(hw, pfc_en); case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; @@ -357,6 +361,8 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, @@ -385,6 +391,8 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 58a7f5312a96..2707bda37418 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -180,6 +180,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0ae038b9af90..e5be0dd508de 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -511,6 +511,8 @@ static void ixgbe_get_regs(struct net_device *netdev, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -622,6 +624,8 @@ static void ixgbe_get_regs(struct net_device *netdev, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) @@ -1406,6 +1410,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1644,6 +1650,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1680,6 +1688,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1733,12 +1743,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); - /* X540 needs to set the MACC.FLU bit to force link up */ - if (adapter->hw.mac.type == ixgbe_mac_X540) { + /* X540 and X550 needs to set the MACC.FLU bit to force link up */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); - } else { + break; + default: if (hw->mac.orig_autoc) { reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); @@ -2776,7 +2790,14 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, /* if we changed something we need to update flags */ if (flags2 != adapter->flags2) { struct ixgbe_hw *hw = &adapter->hw; - u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + u32 mrqc; + unsigned int pf_pool = adapter->num_vfs; + + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); + else + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); if ((flags2 & UDP_RSS_FLAGS) && !(adapter->flags2 & UDP_RSS_FLAGS)) @@ -2799,7 +2820,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); + else + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } return 0; @@ -2833,6 +2858,8 @@ static int ixgbe_get_ts_info(struct net_device *dev, struct ixgbe_adapter *adapter = netdev_priv(dev); switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -2900,7 +2927,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) max_combined = IXGBE_MAX_FDIR_INDICES; } else { /* support up to 16 queues with RSS */ - max_combined = IXGBE_MAX_RSS_INDICES; + max_combined = ixgbe_max_rss_indices(adapter); } return max_combined; @@ -2948,6 +2975,7 @@ static int ixgbe_set_channels(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); unsigned int count = ch->combined_count; + u8 max_rss_indices = ixgbe_max_rss_indices(adapter); /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) @@ -2964,9 +2992,9 @@ static int ixgbe_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ adapter->ring_feature[RING_F_FDIR].limit = count; - /* cap RSS limit at 16 */ - if (count > IXGBE_MAX_RSS_INDICES) - count = IXGBE_MAX_RSS_INDICES; + /* cap RSS limit */ + if (count > max_rss_indices) + count = max_rss_indices; adapter->ring_feature[RING_F_RSS].limit = count; #ifdef IXGBE_FCOE diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ce40c77381e9..68e1e757ecef 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -126,6 +126,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc51554c9e99..798b05556e1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -42,6 +42,7 @@ #include <linux/slab.h> #include <net/checksum.h> #include <net/ip6_checksum.h> +#include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> @@ -50,6 +51,15 @@ #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> +#ifdef CONFIG_OF +#include <linux/of_net.h> +#endif + +#ifdef CONFIG_SPARC +#include <asm/idprom.h> +#include <asm/prom.h> +#endif + #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" @@ -65,15 +75,17 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "3.19.1-k" +#define DRV_VERSION "4.0.1-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2014 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { - [board_82598] = &ixgbe_82598_info, - [board_82599] = &ixgbe_82599_info, - [board_X540] = &ixgbe_X540_info, + [board_82598] = &ixgbe_82598_info, + [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, + [board_X550] = &ixgbe_X550_info, + [board_X550EM_x] = &ixgbe_X550EM_x_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -115,6 +127,9 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, /* required last entry */ {0, } }; @@ -835,6 +850,8 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -871,6 +888,8 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1412,41 +1431,21 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* update next to alloc since we have filled the ring */ - rx_ring->next_to_alloc = val; - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbe_write_tail(rx_ring, val); -} - static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *bi) { struct page *page = bi->page; - dma_addr_t dma = bi->dma; + dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ - if (likely(dma)) + if (likely(page)) return true; /* alloc new page for storage */ - if (likely(!page)) { - page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, - bi->skb, ixgbe_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - return false; - } - bi->page = page; + page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; } /* map page for use */ @@ -1459,13 +1458,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, */ if (dma_mapping_error(rx_ring->dev, dma)) { __free_pages(page, ixgbe_rx_pg_order(rx_ring)); - bi->page = NULL; rx_ring->rx_stats.alloc_rx_page_failed++; return false; } bi->dma = dma; + bi->page = page; bi->page_offset = 0; return true; @@ -1509,16 +1508,28 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) i -= rx_ring->count; } - /* clear the hdr_addr for the next_to_use descriptor */ - rx_desc->read.hdr_addr = 0; + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.upper.status_error = 0; cleaned_count--; } while (cleaned_count); i += rx_ring->count; - if (rx_ring->next_to_use != i) - ixgbe_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, rx_ring->tail); + } } static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, @@ -1763,14 +1774,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, return false; #endif - /* if skb_pad returns an error the skb was freed */ - if (unlikely(skb->len < 60)) { - int pad_len = 60 - skb->len; - - if (skb_pad(skb, pad_len)) - return true; - __skb_put(skb, pad_len); - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; return false; } @@ -1795,9 +1801,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; /* transfer page from old buffer to new buffer */ - new_buff->page = old_buff->page; - new_buff->dma = old_buff->dma; - new_buff->page_offset = old_buff->page_offset; + *new_buff = *old_buff; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, @@ -1806,6 +1810,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, DMA_FROM_DEVICE); } +static inline bool ixgbe_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + /** * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on @@ -1841,12 +1850,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - /* we can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(page) == numa_node_id())) + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!ixgbe_page_is_reserved(page))) return true; /* this page cannot be reused so discard it */ - put_page(page); + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); return false; } @@ -1854,7 +1863,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, rx_buffer->page_offset, size, truesize); /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) + if (unlikely(ixgbe_page_is_reserved(page))) return false; #if (PAGE_SIZE < 8192) @@ -1864,22 +1873,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, /* flip page offset to other buffer */ rx_buffer->page_offset ^= truesize; - - /* Even if we own the page, we are not allowed to use atomic_set() - * This would break get_page_unless_zero() users. - */ - atomic_inc(&page->_count); #else /* move offset up to the next cache line */ rx_buffer->page_offset += truesize; if (rx_buffer->page_offset > last_offset) return false; - - /* bump ref count on page before it is given to the stack */ - get_page(page); #endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + return true; } @@ -1907,8 +1913,8 @@ static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, #endif /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBE_RX_HDR_SIZE); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, + IXGBE_RX_HDR_SIZE); if (unlikely(!skb)) { rx_ring->rx_stats.alloc_rx_buff_failed++; return NULL; @@ -1942,6 +1948,8 @@ dma_sync: rx_buffer->page_offset, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); + + rx_buffer->skb = NULL; } /* pull page into skb */ @@ -1959,8 +1967,6 @@ dma_sync: } /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; rx_buffer->page = NULL; return skb; @@ -2155,6 +2161,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2264,6 +2272,8 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2467,6 +2477,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2493,6 +2505,8 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2525,6 +2539,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, mask |= IXGBE_EIMS_GPI_SDP0; break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask |= IXGBE_EIMS_TS; break; default: @@ -2536,7 +2552,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; + /* fall through */ case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; @@ -2544,9 +2563,6 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } - if (adapter->hw.mac.type == ixgbe_mac_X540) - mask |= IXGBE_EIMS_TIMESYNC; - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; @@ -2592,6 +2608,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2811,6 +2829,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_sfp_event(adapter, eicr); /* Fall through */ case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -2905,6 +2925,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3190,16 +3212,14 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed) { struct ixgbe_hw *hw = &adapter->hw; - static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, - 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, - 0x6A3E67EA, 0x14364D17, 0x3BED200D}; - u32 mrqc = 0, reta = 0; - u32 rxcsum; + u32 reta = 0; int i, j; + int reta_entries = 128; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + int indices_multi; /* * Program table for at least 2 queues w/ SR-IOV so that VFs can @@ -3213,16 +3233,69 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + /* Fill out the redirection table as follows: + * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices + * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index + * X550: 512 (8 bit wide) entries containing 6 bit RSS index + */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + indices_multi = 0x11; + else + indices_multi = 0x1; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + reta_entries = 512; + default: + break; + } + /* Fill out redirection table */ - for (i = 0, j = 0; i < 128; i++, j++) { + for (i = 0, j = 0; i < reta_entries; i++, j++) { if (j == rss_i) j = 0; - /* reta = 4-byte sliding window of - * 0x00..(indices-1)(indices-1)00..etc. */ - reta = (reta << 8) | (j * 0x11); + reta = (reta << 8) | (j * indices_multi); + if ((i & 3) == 3) { + if (i < 128) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + else + IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), + reta); + } + } +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vfreta = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + unsigned int pf_pool = adapter->num_vfs; + int i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]); + + /* Fill out the redirection table */ + for (i = 0, j = 0; i < 64; i++, j++) { + if (j == rss_i) + j = 0; + vfreta = (vfreta << 8) | j; if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); } +} + +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc = 0, rss_field = 0, vfmrqc = 0; + u32 rss_key[10]; + u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); @@ -3255,17 +3328,35 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) } /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | - IXGBE_MRQC_RSS_FIELD_IPV4_TCP | - IXGBE_MRQC_RSS_FIELD_IPV6 | - IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + if ((hw->mac.type >= ixgbe_mac_X550) && + (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { + unsigned int pf_pool = adapter->num_vfs; + + /* Enable VF RSS mode */ + mrqc |= IXGBE_MRQC_MULTIPLE_RSS; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* Setup RSS through the VF registers */ + ixgbe_setup_vfreta(adapter, rss_key); + vfmrqc = IXGBE_MRQC_RSSEN; + vfmrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + } else { + ixgbe_setup_reta(adapter, rss_key); + mrqc |= rss_field; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } } /** @@ -3534,6 +3625,8 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or @@ -3657,6 +3750,8 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -3691,6 +3786,8 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4112,6 +4209,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4170,6 +4269,8 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -4308,29 +4409,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; - rx_buffer = &rx_ring->rx_buffer_info[i]; if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; - if (IXGBE_CB(skb)->page_released) { + if (IXGBE_CB(skb)->page_released) dma_unmap_page(dev, IXGBE_CB(skb)->dma, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } dev_kfree_skb(skb); rx_buffer->skb = NULL; } - if (rx_buffer->dma) - dma_unmap_page(dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - rx_buffer->dma = 0; - if (rx_buffer->page) - __free_pages(rx_buffer->page, - ixgbe_rx_pg_order(rx_ring)); + + if (!rx_buffer->page) + continue; + + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); + rx_buffer->page = NULL; } @@ -4606,6 +4704,8 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -4948,10 +5048,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); } - /* Disable the Tx DMA engine on 82599 and X540 */ + /* Disable the Tx DMA engine on 82599 and later MAC */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5016,7 +5118,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->subsystem_device_id = pdev->subsystem_device; /* Set common capability flags and settings */ - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; @@ -5071,6 +5173,12 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550: +#ifdef CONFIG_IXGBE_DCA + adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; +#endif + break; default: break; } @@ -5086,6 +5194,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCB switch (hw->mac.type) { case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; break; @@ -5675,6 +5785,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -5806,6 +5918,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -5819,7 +5933,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -5842,7 +5958,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); break; case ixgbe_mac_X540: - /* OS2BMC stats are X540 only*/ + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); @@ -6110,6 +6228,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) } break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -6221,6 +6341,10 @@ static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) return false; + /* resetting the PF is only needed for MAC before X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + return false; + for (i = 0; i < adapter->num_vfs; i++) { for (j = 0; j < q_per_pool; j++) { u32 h, t; @@ -6256,6 +6380,66 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } } +#ifdef CONFIG_PCI_IOV +static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, + struct pci_dev *vfdev) +{ + if (!pci_wait_for_pending_transaction(vfdev)) + e_dev_warn("Issuing VFLR with pending transactions\n"); + + e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); + pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); +} + +static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + u32 gpc; + int pos; + unsigned short vf_id; + + if (!(netif_carrier_ok(adapter->netdev))) + return; + + gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); + if (gpc) /* If incrementing then no need for the check below */ + return; + /* Check to see if a bad DMA write target from an errant or + * malicious VF has caused a PCIe error. If so then we can + * issue a VFLR to the offending VF(s) and then resume without + * requesting a full slot reset. + */ + + if (!pdev) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return; + + /* get the device ID for the VF */ + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + + /* check status reg for all VFs owned by this PF */ + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && (vfdev->physfn == pdev)) { + u16 status_reg; + + pci_read_config_word(vfdev, PCI_STATUS, &status_reg); + if (status_reg & PCI_STATUS_REC_MASTER_ABORT) + /* issue VFLR */ + ixgbe_issue_vf_flr(adapter, vfdev); + } + + vfdev = pci_get_device(pdev->vendor, vf_id, vfdev); + } +} + static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; @@ -6276,6 +6460,17 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) e_warn(drv, "%u Spoofed packets detected\n", ssvpc); } +#else +static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) +{ +} + +static void +ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) +{ +} +#endif /* CONFIG_PCI_IOV */ + /** * ixgbe_watchdog_subtask - check and bring link up @@ -6296,6 +6491,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) else ixgbe_watchdog_link_is_down(adapter); + ixgbe_check_for_bad_vf(adapter); ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); @@ -6407,51 +6603,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); } -#ifdef CONFIG_PCI_IOV -static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) -{ - int vf; - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - u32 gpc; - u32 ciaa, ciad; - - gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); - if (gpc) /* If incrementing then no need for the check below */ - return; - /* - * Check to see if a bad DMA write target from an errant or - * malicious VF has caused a PCIe error. If so then we can - * issue a VFLR to the offending VF(s) and then resume without - * requesting a full slot reset. - */ - - for (vf = 0; vf < adapter->num_vfs; vf++) { - ciaa = (vf << 16) | 0x80000000; - /* 32 bit read so align, we really want status at offset 6 */ - ciaa |= PCI_COMMAND; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); - ciaa &= 0x7FFFFFFF; - /* disable debug mode asap after reading data */ - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - /* Get the upper 16 bits which will be the PCI status reg */ - ciad >>= 16; - if (ciad & PCI_STATUS_REC_MASTER_ABORT) { - netdev_err(netdev, "VF %d Hung DMA\n", vf); - /* Issue VFLR */ - ciaa = (vf << 16) | 0x80000000; - ciaa |= 0xA8; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - ciad = 0x00008000; /* VFLR */ - IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); - ciaa &= 0x7FFFFFFF; - IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); - } - } -} - -#endif /** * ixgbe_service_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long @@ -6460,7 +6611,6 @@ static void ixgbe_service_timer(unsigned long data) { struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; unsigned long next_event_offset; - bool ready = true; /* poll faster when waiting for link */ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) @@ -6468,32 +6618,10 @@ static void ixgbe_service_timer(unsigned long data) else next_event_offset = HZ * 2; -#ifdef CONFIG_PCI_IOV - /* - * don't bother with SR-IOV VF DMA hang check if there are - * no VFs or the link is down - */ - if (!adapter->num_vfs || - (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - goto normal_timer_service; - - /* If we have VFs allocated then we must check for DMA hangs */ - ixgbe_check_for_bad_vf(adapter); - next_event_offset = HZ / 50; - adapter->timer_event_accumulator++; - - if (adapter->timer_event_accumulator >= 100) - adapter->timer_event_accumulator = 0; - else - ready = false; - -normal_timer_service: -#endif /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); - if (ready) - ixgbe_service_event_schedule(adapter); + ixgbe_service_event_schedule(adapter); } static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) @@ -6898,8 +7026,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { - /* notify HW of packet */ - ixgbe_write_tail(tx_ring, i); + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); } return; @@ -7197,12 +7329,8 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) - return NETDEV_TX_OK; - skb->len = 17; - skb_set_tail_pointer(skb, 17); - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; @@ -7646,7 +7774,7 @@ static int ixgbe_set_features(struct net_device *netdev, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, + const unsigned char *addr, u16 vid, u16 flags) { /* guarantee we can provide a unique filter for the unicast address */ @@ -7655,7 +7783,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return -ENOMEM; } - return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, @@ -7716,7 +7844,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, else mode = BRIDGE_MODE_VEPA; - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -7965,6 +8093,29 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } /** + * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM + * @adapter: Pointer to adapter struct + */ +static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_OF + struct device_node *dp = pci_device_to_OF_node(adapter->pdev); + struct ixgbe_hw *hw = &adapter->hw; + const unsigned char *addr; + + addr = of_get_mac_address(dp); + if (addr) { + ether_addr_copy(hw->mac.perm_addr, addr); + return; + } +#endif /* CONFIG_OF */ + +#ifdef CONFIG_SPARC + ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); +#endif /* CONFIG_SPARC */ +} + +/** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl @@ -8046,7 +8197,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); adapter = netdev_priv(netdev); - pci_set_drvdata(pdev, adapter); adapter->netdev = netdev; adapter->pdev = pdev; @@ -8104,6 +8254,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -8167,6 +8319,8 @@ skip_sriov: switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: netdev->features |= NETIF_F_SCTP_CSUM; netdev->hw_features |= NETIF_F_SCTP_CSUM | NETIF_F_NTUPLE; @@ -8229,6 +8383,8 @@ skip_sriov: goto err_sw_init; } + ixgbe_get_platform_mac_addr(adapter); + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { @@ -8320,6 +8476,8 @@ skip_sriov: if (err) goto err_register; + pci_set_drvdata(pdev, adapter); + /* power down the optics for 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); @@ -8399,9 +8557,14 @@ err_dma: static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; + struct net_device *netdev; bool disable_dev; + /* if !adapter then we already cleaned up in probe */ + if (!adapter) + return; + + netdev = adapter->netdev; ixgbe_dbg_adapter_exit(adapter); set_bit(__IXGBE_REMOVING, &adapter->state); @@ -8523,6 +8686,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X540: device_id = IXGBE_X540_VF_DEVICE_ID; break; + case ixgbe_mac_X550: + device_id = IXGBE_DEV_ID_X550_VF; + break; + case ixgbe_mac_X550EM_x: + device_id = IXGBE_DEV_ID_X550EM_X_VF; + break; default: device_id = 0; break; @@ -8542,8 +8711,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, * VFLR. Just clean up the AER in that case. */ if (vfdev) { - e_dev_err("Issuing VFLR to VF %d\n", vf); - pci_write_config_dword(vfdev, 0xA8, 0x00008000); + ixgbe_issue_vf_flr(adapter, vfdev); /* Free device reference count */ pci_dev_put(vfdev); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index cc8f0128286c..9993a471d668 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -305,6 +305,8 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: @@ -426,6 +428,8 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) struct ixgbe_mbx_info *mbx = &hw->mbx; if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X540) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 28b81ae09b5a..8a2be444113d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -43,13 +43,195 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); -static bool ixgbe_get_i2c_data(u32 *i2cctl); +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); /** + * ixgbe_out_i2c_byte_ack - Send I2C byte with ack + * @hw: pointer to the hardware structure + * @byte: byte to send + * + * Returns an error code on error. + **/ +static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +{ + s32 status; + + status = ixgbe_clock_out_i2c_byte(hw, byte); + if (status) + return status; + return ixgbe_get_i2c_ack(hw); +} + +/** + * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack + * @hw: pointer to the hardware structure + * @byte: pointer to a u8 to receive the byte + * + * Returns an error code on error. + **/ +static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +{ + s32 status; + + status = ixgbe_clock_in_i2c_byte(hw, byte); + if (status) + return status; + /* ACK */ + return ixgbe_clock_out_i2c_bit(hw, false); +} + +/** + * ixgbe_ones_comp_byte_add - Perform one's complement addition + * @add1: addend 1 + * @add2: addend 2 + * + * Returns one's complement 8-bit sum. + **/ +static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) +{ + u16 sum = add1 + add2; + + sum = (sum & 0xFF) + (sum >> 8); + return sum & 0xFF; +} + +/** + * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to read from + * @reg: I2C device register to read from + * @val: pointer to location to receive read value + * + * Returns an error code on error. + **/ +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + int max_retry = 10; + int retry = 0; + u8 csum_byte; + u8 high_bits; + u8 low_bits; + u8 reg_high; + u8 csum; + + reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ~csum; + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) + return IXGBE_ERR_SWFW_SYNC; + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + /* Re-start condition */ + ixgbe_i2c_start(hw); + /* Device Address and read indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) + goto fail; + /* Get upper bits */ + if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) + goto fail; + /* Get low bits */ + if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) + goto fail; + /* Get csum */ + if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) + goto fail; + /* NACK */ + if (ixgbe_clock_out_i2c_bit(hw, false)) + goto fail; + ixgbe_i2c_stop(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + *val = (high_bits << 8) | low_bits; + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte read combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte read combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** + * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation + * @hw: pointer to the hardware structure + * @addr: I2C bus address to write to + * @reg: I2C device register to write to + * @val: value to write + * + * Returns an error code on error. + **/ +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, + u8 addr, u16 reg, u16 val) +{ + int max_retry = 1; + int retry = 0; + u8 reg_high; + u8 csum; + + reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ + csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); + csum = ixgbe_ones_comp_byte_add(csum, val >> 8); + csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); + csum = ~csum; + do { + ixgbe_i2c_start(hw); + /* Device Address and write indication */ + if (ixgbe_out_i2c_byte_ack(hw, addr)) + goto fail; + /* Write bits 14:8 */ + if (ixgbe_out_i2c_byte_ack(hw, reg_high)) + goto fail; + /* Write bits 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) + goto fail; + /* Write data 15:8 */ + if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) + goto fail; + /* Write data 7:0 */ + if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) + goto fail; + /* Write csum */ + if (ixgbe_out_i2c_byte_ack(hw, csum)) + goto fail; + ixgbe_i2c_stop(hw); + return 0; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + hw_dbg(hw, "I2C byte write combined error - Retry.\n"); + else + hw_dbg(hw, "I2C byte write combined error.\n"); + } while (retry < max_retry); + + return IXGBE_ERR_I2C; +} + +/** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * @@ -60,6 +242,15 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) u32 phy_addr; u16 ext_ability = 0; + if (!hw->phy.phy_semaphore_mask) { + hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) & + IXGBE_STATUS_LAN_ID_1; + if (hw->phy.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + } + if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { hw->phy.mdio.prtad = phy_addr; @@ -315,12 +506,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; - u16 gssr; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - gssr = IXGBE_GSSR_PHY1_SM; - else - gssr = IXGBE_GSSR_PHY0_SM; + u32 gssr = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, @@ -418,7 +604,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; - u16 gssr; + u32 gssr; if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; @@ -576,6 +762,10 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, *speed |= IXGBE_LINK_SPEED_100_FULL; } + /* Internal PHY does not support 100 Mbps */ + if (hw->mac.type == ixgbe_mac_X550EM_x) + *speed &= ~IXGBE_LINK_SPEED_100_FULL; + return status; } @@ -632,6 +822,9 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. + * This function always returns success, this is nessary since + * it is called via a function pointer that could call other + * functions that could return an error. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { @@ -1462,15 +1655,10 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status; u32 max_retry = 10; u32 retry = 0; - u16 swfw_mask = 0; + u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = true; *data = 0; - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; - do { if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1548,12 +1736,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status; u32 max_retry = 1; u32 retry = 0; - u16 swfw_mask = 0; - - if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) - swfw_mask = IXGBE_GSSR_PHY1_SM; - else - swfw_mask = IXGBE_GSSR_PHY0_SM; + u32 swfw_mask = hw->phy.phy_semaphore_mask; if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; @@ -1610,7 +1793,7 @@ fail: **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -1639,7 +1822,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); @@ -1697,9 +1880,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - i2cctl |= IXGBE_I2C_DATA_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -1715,7 +1898,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { s32 status = 0; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; bool ack = true; @@ -1728,8 +1911,8 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - ack = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + ack = ixgbe_get_i2c_data(hw, &i2cctl); udelay(1); if (ack == 0) @@ -1758,15 +1941,15 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - *data = ixgbe_get_i2c_data(&i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -1786,7 +1969,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { @@ -1822,14 +2005,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) u32 i2cctl_r = 0; for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT; - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (i2cctl_r & IXGBE_I2C_CLK_IN) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } @@ -1844,9 +2027,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - *i2cctl &= ~IXGBE_I2C_CLK_OUT; + *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ @@ -1864,19 +2047,19 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT; + *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT; + *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); - if (data != ixgbe_get_i2c_data(i2cctl)) { + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); return IXGBE_ERR_I2C; } @@ -1891,9 +2074,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) * * Returns the I2C data bit value **/ -static bool ixgbe_get_i2c_data(u32 *i2cctl) +static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - if (*i2cctl & IXGBE_I2C_DATA_IN) + if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) return true; return false; } @@ -1907,7 +2090,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 i; ixgbe_i2c_start(hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 54071ed17e3b..434643881287 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -77,6 +77,11 @@ #define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +#define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */ +#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 +#define IXGBE_CS4227_EDC_MODE_SR 0x0004 + /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 #define IXGBE_TAF_ASM_PAUSE 0x800 @@ -110,7 +115,6 @@ /* SFP+ SFF-8472 Compliance code */ #define IXGBE_SFF_SFF_8472_UNSUP 0x00 -s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, @@ -157,4 +161,8 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 *val); +s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, + u16 reg, u16 val); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 97c85b859536..c76ba90ecc6e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -221,7 +221,8 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); + rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); } else { rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); } @@ -618,6 +619,27 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) return 0; } +static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, + u32 qde) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + int i; + + for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { + u32 reg; + + /* flush previous write */ + IXGBE_WRITE_FLUSH(hw); + + /* indicate to hardware that we want to set drop enable */ + reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; + reg |= i << IXGBE_QDE_IDX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); + } +} + static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; @@ -647,15 +669,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ - for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { - /* flush previous write */ - IXGBE_WRITE_FLUSH(hw); - - /* indicate to hardware that we want to set drop enable */ - reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; - reg |= i << IXGBE_QDE_IDX_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); - } + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); @@ -1079,52 +1093,86 @@ int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return ixgbe_set_vf_mac(adapter, vf, mac); } +static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + + if (adapter->vfinfo[vf].pf_vlan) + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, + vf); + if (err) + goto out; + ixgbe_set_vmvir(adapter, vlan, qos, vf); + ixgbe_set_vmolr(hw, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + adapter->vfinfo[vf].vlan_count++; + + /* enable hide vlan on X550 */ + if (hw->mac.type >= ixgbe_mac_X550) + ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | + IXGBE_QDE_HIDE_VLAN); + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + +out: + return err; +} + +static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + + err = ixgbe_set_vf_vlan(adapter, false, + adapter->vfinfo[vf].pf_vlan, vf); + ixgbe_clear_vmvir(adapter, vf); + ixgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); + if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + + return err; +} + int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { + /* Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ if (adapter->vfinfo[vf].pf_vlan) - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, - vf); - if (err) - goto out; - err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); + err = ixgbe_disable_port_vlan(adapter, vf); if (err) goto out; - ixgbe_set_vmvir(adapter, vlan, qos, vf); - ixgbe_set_vmolr(hw, vf, false); - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].vlan_count++; - adapter->vfinfo[vf].pf_vlan = vlan; - adapter->vfinfo[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IXGBE_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set," - " but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before" - " attempting to use the VF device.\n"); - } + err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); } else { - err = ixgbe_set_vf_vlan(adapter, false, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_clear_vmvir(adapter, vf); - ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - adapter->vfinfo[vf].pf_vlan = 0; - adapter->vfinfo[vf].pf_qos = 0; + err = ixgbe_disable_port_vlan(adapter, vf); } + out: return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index dfd55d83bc03..d101b25dc4b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -74,9 +74,22 @@ #define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 #define IXGBE_DEV_ID_X540T1 0x1560 +#define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA +#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB +#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC +#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD +#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -84,7 +97,8 @@ #define IXGBE_CTRL_EXT 0x00018 #define IXGBE_ESDP 0x00020 #define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL 0x00028 +#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + 0x15F5C : 0x00028)) #define IXGBE_LEDCTL 0x00200 #define IXGBE_FRTIMER 0x00048 #define IXGBE_TCPTIMER 0x0004C @@ -112,10 +126,14 @@ #define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN 0x00000001 -#define IXGBE_I2C_CLK_OUT 0x00000002 -#define IXGBE_I2C_DATA_IN 0x00000004 -#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00004000 : 0x00000001) +#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000200 : 0x00000002) +#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00001000 : 0x00000004) +#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ + 0x00000400 : 0x00000008) #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -290,8 +308,17 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_IMIRVP 0x05AC0 #define IXGBE_VMD_CTL 0x0581C #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ +/* Registers for setting up RSS on X550 with SRIOV + * _p - pool number (0..63) + * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) + */ +#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) +#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) +#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + /* Flow Director registers */ #define IXGBE_FDIRCTRL 0x0EE00 #define IXGBE_FDIRHKEY 0x0EE68 @@ -725,6 +752,24 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_LDPCECL 0x0E820 #define IXGBE_LDPCECH 0x0E821 +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register */ +#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ + +#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ + +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s F Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ + /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -798,6 +843,12 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_PBACLR_82599 0x11068 #define IXGBE_CIAA_82599 0x11088 #define IXGBE_CIAD_82599 0x1108C +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAA_X550 : IXGBE_CIAA_82599)) +#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ + IXGBE_CIAD_X550 : IXGBE_CIAD_82599)) #define IXGBE_PICAUSE 0x110B0 #define IXGBE_PIENA 0x110B8 #define IXGBE_CDQ_MBR_82599 0x110B4 @@ -1120,6 +1171,13 @@ struct ixgbe_thermal_sensor_data { /* MDIO definitions */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ @@ -1129,9 +1187,23 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ -#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ + +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ +#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ +#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ +#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ + +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ +#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ /* MII clause 22/28 definitions */ #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ @@ -1632,6 +1704,7 @@ enum { #define IXGBE_LINKS_TL_FAULT 0x00001000 #define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 #define IXGBE_LINKS_SPEED_82599 0x30000000 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000 #define IXGBE_LINKS_SPEED_1G_82599 0x20000000 @@ -1674,12 +1747,14 @@ enum { #define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ /* SW_FW_SYNC/GSSR definitions */ -#define IXGBE_GSSR_EEP_SM 0x0001 -#define IXGBE_GSSR_PHY0_SM 0x0002 -#define IXGBE_GSSR_PHY1_SM 0x0004 -#define IXGBE_GSSR_MAC_CSR_SM 0x0008 -#define IXGBE_GSSR_FLASH_SM 0x0010 -#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ +#define IXGBE_GSSR_I2C_MASK 0x1800 /* FW Status register bitmask */ #define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ @@ -1713,27 +1788,32 @@ enum { #define IXGBE_PBANUM_LENGTH 11 /* Checksum and EEPROM pointers */ -#define IXGBE_PBANUM_PTR_GUARD 0xFAFA -#define IXGBE_EEPROM_CHECKSUM 0x3F -#define IXGBE_EEPROM_SUM 0xBABA -#define IXGBE_PCIE_ANALOG_PTR 0x03 -#define IXGBE_ATLAS0_CONFIG_PTR 0x04 -#define IXGBE_PHY_PTR 0x04 -#define IXGBE_ATLAS1_CONFIG_PTR 0x05 -#define IXGBE_OPTION_ROM_PTR 0x05 -#define IXGBE_PCIE_GENERAL_PTR 0x06 -#define IXGBE_PCIE_CONFIG0_PTR 0x07 -#define IXGBE_PCIE_CONFIG1_PTR 0x08 -#define IXGBE_CORE0_PTR 0x09 -#define IXGBE_CORE1_PTR 0x0A -#define IXGBE_MAC0_PTR 0x0B -#define IXGBE_MAC1_PTR 0x0C -#define IXGBE_CSR0_CONFIG_PTR 0x0D -#define IXGBE_CSR1_CONFIG_PTR 0x0E -#define IXGBE_FW_PTR 0x0F -#define IXGBE_PBANUM0_PTR 0x15 -#define IXGBE_PBANUM1_PTR 0x16 -#define IXGBE_FREE_SPACE_PTR 0X3E +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 +#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 +#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 +#define IXGBE_PCIE_CONFIG_SIZE 0x08 +#define IXGBE_EEPROM_LAST_WORD 0x41 +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E /* External Thermal Sensor Config */ #define IXGBE_ETS_CFG 0x26 @@ -1994,12 +2074,14 @@ enum { #define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 #define IXGBE_MRQC_L3L4TXSWEN 0x00008000 #define IXGBE_FWSM_TS_ENABLED 0x1 /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_HIDE_VLAN 0x00000002 #define IXGBE_QDE_IDX_MASK 0x00007F00 #define IXGBE_QDE_IDX_SHIFT 8 #define IXGBE_QDE_WRITE 0x00010000 @@ -2289,18 +2371,32 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIR_DROP_QUEUE 127 /* Manageablility Host Interface defines */ -#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ -#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ -#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ +#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ +#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ +#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ /* CEM Support */ -#define FW_CEM_HDR_LEN 0x4 -#define FW_CEM_CMD_DRIVER_INFO 0xDD -#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 -#define FW_CEM_CMD_RESERVED 0x0 -#define FW_CEM_UNUSED_VER 0x0 -#define FW_CEM_MAX_RETRIES 3 -#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0x0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_READ_SHADOW_RAM_CMD 0x31 +#define FW_READ_SHADOW_RAM_LEN 0x6 +#define FW_WRITE_SHADOW_RAM_CMD 0x33 +#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ +#define FW_SHADOW_RAM_DUMP_CMD 0x36 +#define FW_SHADOW_RAM_DUMP_LEN 0 +#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ +#define FW_NVM_DATA_OFFSET 3 +#define FW_MAX_READ_BUFFER_SIZE 1024 +#define FW_DISABLE_RXEN_CMD 0xDE +#define FW_DISABLE_RXEN_LEN 0x1 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2313,6 +2409,25 @@ struct ixgbe_hic_hdr { u8 checksum; }; +struct ixgbe_hic_hdr2_req { + u8 cmd; + u8 buf_lenh; + u8 buf_lenl; + u8 checksum; +}; + +struct ixgbe_hic_hdr2_rsp { + u8 cmd; + u8 buf_lenl; + u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ + u8 checksum; +}; + +union ixgbe_hic_hdr2 { + struct ixgbe_hic_hdr2_req req; + struct ixgbe_hic_hdr2_rsp rsp; +}; + struct ixgbe_hic_drv_info { struct ixgbe_hic_hdr hdr; u8 port_num; @@ -2324,6 +2439,32 @@ struct ixgbe_hic_drv_info { u16 pad2; /* end spacing to ensure length is mult. of dword2 */ }; +/* These need to be dword aligned */ +struct ixgbe_hic_read_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_write_shadow_ram { + union ixgbe_hic_hdr2 hdr; + u32 address; + u16 length; + u16 pad2; + u16 data; + u16 pad3; +}; + +struct ixgbe_hic_disable_rxen { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 pad2; + u16 pad3; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2437,10 +2578,12 @@ struct ixgbe_adv_tx_context_desc { typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_UNKNOWN 0 -#define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 +#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ IXGBE_LINK_SPEED_10GB_FULL) #define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ @@ -2588,6 +2731,8 @@ enum ixgbe_mac_type { ixgbe_mac_82598EB, ixgbe_mac_82599EB, ixgbe_mac_X540, + ixgbe_mac_X550, + ixgbe_mac_X550EM_x, ixgbe_num_macs }; @@ -2596,6 +2741,9 @@ enum ixgbe_phy_type { ixgbe_phy_none, ixgbe_phy_tn, ixgbe_phy_aq, + ixgbe_phy_x550em_kr, + ixgbe_phy_x550em_kx4, + ixgbe_phy_x550em_ext_t, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, @@ -2839,7 +2987,7 @@ struct ixgbe_eeprom_operations { s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); s32 (*update_checksum)(struct ixgbe_hw *); - u16 (*calc_checksum)(struct ixgbe_hw *); + s32 (*calc_checksum)(struct ixgbe_hw *); }; struct ixgbe_mac_operations { @@ -2861,8 +3009,8 @@ struct ixgbe_mac_operations { s32 (*disable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); - void (*release_swfw_sync)(struct ixgbe_hw *, u16); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + void (*release_swfw_sync)(struct ixgbe_hw *, u32); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); @@ -2908,6 +3056,11 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + + /* DMA Coalescing */ + s32 (*dmac_config)(struct ixgbe_hw *hw); + s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); + s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); }; struct ixgbe_phy_operations { @@ -2920,6 +3073,7 @@ struct ixgbe_phy_operations { s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_internal_link)(struct ixgbe_hw *); s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); @@ -2928,6 +3082,8 @@ struct ixgbe_phy_operations { s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); }; @@ -2980,6 +3136,8 @@ struct ixgbe_phy_info { bool sfp_setup_needed; u32 revision; enum ixgbe_media_type media_type; + u8 lan_id; + u32 phy_semaphore_mask; bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; enum ixgbe_smart_speed smart_speed; @@ -3086,4 +3244,71 @@ struct ixgbe_info { #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) + +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) + +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) + +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) + +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) + +#define IXGBE_KX4_LINK_CNTL_1 0x4C +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) + +#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 +#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 + +#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 +#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 +#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ + (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 +#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ + (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 +#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 +#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 +#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 +#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 +#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 + #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index e88305d5d18d..ba54ff07b438 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -32,6 +32,7 @@ #include "ixgbe.h" #include "ixgbe_phy.h" +#include "ixgbe_x540.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 @@ -42,17 +43,15 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); -static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) { return ixgbe_media_type_copper; } -static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -76,9 +75,8 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed **/ -static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); @@ -92,7 +90,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; @@ -179,7 +177,7 @@ mac_reset_top: * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ -static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { s32 ret_val; @@ -197,7 +195,7 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; @@ -316,7 +314,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, * * @hw: pointer to hardware structure **/ -static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -324,6 +322,8 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) u16 length = 0; u16 pointer = 0; u16 word = 0; + u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM; + u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; /* * Do not use hw->eeprom.ops.read because we do not want to take @@ -332,10 +332,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) */ /* Include 0x0-0x3F in the checksum */ - for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { + for (i = 0; i < checksum_last_word; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -344,11 +344,11 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) * Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ - for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + for (i = ptr_start; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; - if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { + if (ixgbe_read_eerd_generic(hw, i, &pointer)) { hw_dbg(hw, "EEPROM read failed\n"); break; } @@ -358,8 +358,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) pointer >= hw->eeprom.word_size) continue; - if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { + if (ixgbe_read_eerd_generic(hw, pointer, &length)) { hw_dbg(hw, "EEPROM read failed\n"); + return IXGBE_ERR_EEPROM; break; } @@ -368,10 +369,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) (pointer + length) >= hw->eeprom.word_size) continue; - for (j = pointer+1; j <= pointer+length; j++) { - if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { + for (j = pointer + 1; j <= pointer + length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word)) { hw_dbg(hw, "EEPROM read failed\n"); - break; + return IXGBE_ERR_EEPROM; } checksum += word; } @@ -379,7 +380,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return checksum; + return (s32)checksum; } /** @@ -410,23 +411,34 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores twice here. */ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + if (status) + goto out; - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + hw_dbg(hw, "Invalid EEPROM checksum"); + status = IXGBE_ERR_EEPROM_CHECKSUM; + } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; - /* Verify read and calculated checksums are the same */ - if (read_checksum != checksum) - return IXGBE_ERR_EEPROM_CHECKSUM; +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -457,15 +469,22 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; - checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + goto out; + + checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.write because we do not want to * take the synchronization semaphores twice here. */ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); - if (!status) - status = ixgbe_update_flash_X540(hw); + if (status) + goto out; + + status = ixgbe_update_flash_X540(hw); +out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } @@ -544,7 +563,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ -static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swfw_sync; u32 swmask = mask; @@ -612,7 +631,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) * Releases the SWFW semaphore through the SW_FW_SYNC register * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ -static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swfw_sync; u32 swmask = mask; @@ -699,7 +718,7 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) * Devices that implement the version 2 interface: * X540 **/ -static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; @@ -735,7 +754,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) * Devices that implement the version 2 interface: * X540 **/ -static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h new file mode 100644 index 000000000000..a1468b1f4d8a --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -0,0 +1,39 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include "ixgbe_type.h" + +s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c new file mode 100644 index 000000000000..ffdd1231f419 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -0,0 +1,1432 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux driver + * Copyright(c) 1999 - 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +/** ixgbe_identify_phy_x550em - Get PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_SFP: + /* set up for CS4227 usage */ + hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + + return ixgbe_identify_module_generic(hw); + case IXGBE_DEV_ID_X550EM_X_KX4: + hw->phy.type = ixgbe_phy_x550em_kx4; + break; + case IXGBE_DEV_ID_X550EM_X_KR: + hw->phy.type = ixgbe_phy_x550em_kr; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + return ixgbe_identify_phy_generic(hw); + default: + break; + } + return 0; +} + +static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + return IXGBE_NOT_IMPLEMENTED; +} + +/** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @phy_data: Pointer to read data from the register + **/ +s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 *data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to read, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Read timed out\n"); + return IXGBE_ERR_PHY; + } + + *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); + + return 0; +} + +/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface + * command assuming that the semaphore is already obtained. + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + struct ixgbe_hic_read_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32(offset * 2); + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + if (status) + return status; + + *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, + FW_NVM_DATA_OFFSET); + + return 0; +} + +/** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + struct ixgbe_hic_read_shadow_ram buffer; + u32 current_word = 0; + u16 words_to_read; + s32 status; + u32 i; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM read buffer - semaphore failed\n"); + return status; + } + + while (words) { + if (words > FW_MAX_READ_BUFFER_SIZE / 2) + words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; + else + words_to_read = words; + + buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* convert offset from words to bytes */ + buffer.address = cpu_to_be32((offset + current_word) * 2); + buffer.length = cpu_to_be16(words_to_read * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, + false); + if (status) { + hw_dbg(hw, "Host interface command failed\n"); + goto out; + } + + for (i = 0; i < words_to_read; i++) { + u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + + 2 * i; + u32 value = IXGBE_READ_REG(hw, reg); + + data[current_word] = (u16)(value & 0xffff); + current_word++; + i++; + if (i < words_to_read) { + value >>= 16; + data[current_word] = (u16)(value & 0xffff); + current_word++; + } + } + words -= words_to_read; + } + +out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** ixgbe_checksum_ptr_x550 - Checksum one pointer region + * @hw: pointer to hardware structure + * @ptr: pointer offset in eeprom + * @size: size of section pointed by ptr, if 0 first word will be used as size + * @csum: address of checksum to update + * + * Returns error status for any failure + **/ +static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, + u16 size, u16 *csum, u16 *buffer, + u32 buffer_size) +{ + u16 buf[256]; + s32 status; + u16 length, bufsz, i, start; + u16 *local_buffer; + + bufsz = sizeof(buf) / sizeof(buf[0]); + + /* Read a chunk at the pointer location */ + if (!buffer) { + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = buf; + } else { + if (buffer_size < ptr) + return IXGBE_ERR_PARAM; + local_buffer = &buffer[ptr]; + } + + if (size) { + start = 0; + length = size; + } else { + start = 1; + length = local_buffer[0]; + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (ptr + length) >= hw->eeprom.word_size) + return 0; + } + + if (buffer && ((u32)start + (u32)length > buffer_size)) + return IXGBE_ERR_PARAM; + + for (i = start; length; i++, length--) { + if (i == bufsz && !buffer) { + ptr += bufsz; + i = 0; + if (length < bufsz) + bufsz = length; + + /* Read a chunk at the pointer location */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, + bufsz, buf); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + } + *csum += local_buffer[i]; + } + return 0; +} + +/** ixgbe_calc_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * @buffer: pointer to buffer containing calculated checksum + * @buffer_size: size of buffer + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) +{ + u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 *local_buffer; + s32 status; + u16 checksum = 0; + u16 pointer, i, size; + + hw->eeprom.ops.init_params(hw); + + if (!buffer) { + /* Read pointer area */ + status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, + IXGBE_EEPROM_LAST_WORD + 1, + eeprom_ptrs); + if (status) { + hw_dbg(hw, "Failed to read EEPROM image\n"); + return status; + } + local_buffer = eeprom_ptrs; + } else { + if (buffer_size < IXGBE_EEPROM_LAST_WORD) + return IXGBE_ERR_PARAM; + local_buffer = buffer; + } + + /* For X550 hardware include 0x0-0x41 in the checksum, skip the + * checksum word itself + */ + for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) + if (i != IXGBE_EEPROM_CHECKSUM) + checksum += local_buffer[i]; + + /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + pointer = local_buffer[i]; + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + switch (i) { + case IXGBE_PCIE_GENERAL_PTR: + size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; + break; + case IXGBE_PCIE_CONFIG0_PTR: + case IXGBE_PCIE_CONFIG1_PTR: + size = IXGBE_PCIE_CONFIG_SIZE; + break; + default: + size = 0; + break; + } + + status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, + buffer, buffer_size); + if (status) + return status; + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return (s32)checksum; +} + +/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum + * @hw: pointer to hardware structure + * + * Returns a negative error code on error, or the 16-bit checksum + **/ +s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + return ixgbe_calc_checksum_X550(hw, NULL, 0); +} + +/** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the hostif. + **/ +s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_read_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = hw->eeprom.ops.calc_checksum(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + if (status) + return status; + + /* Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) { + status = IXGBE_ERR_EEPROM_CHECKSUM; + hw_dbg(hw, "Invalid EEPROM checksum"); + } + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + struct ixgbe_hic_write_shadow_ram buffer; + + buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; + buffer.hdr.req.buf_lenh = 0; + buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; + buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; + + /* one word */ + buffer.length = cpu_to_be16(sizeof(u16)); + buffer.data = data; + buffer.address = cpu_to_be32(offset * 2); + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = 0; + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + } else { + hw_dbg(hw, "write ee hostif failed to get semaphore"); + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. + **/ +s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +{ + s32 status = 0; + union ixgbe_hic_hdr2 buffer; + + buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; + buffer.req.buf_lenh = 0; + buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; + buffer.req.checksum = FW_DEFAULT_CHECKSUM; + + status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + sizeof(buffer), + IXGBE_HI_COMMAND_TIMEOUT, false); + return status; +} + +/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum = 0; + + /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); + if (status) { + hw_dbg(hw, "EEPROM read failed\n"); + return status; + } + + status = ixgbe_calc_eeprom_checksum_X550(hw); + if (status < 0) + return status; + + checksum = (u16)(status & 0xffff); + + status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + if (status) + return status; + + status = ixgbe_update_flash_X550(hw); + + return status; +} + +/** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * + * Write a 16 bit word(s) to the EEPROM using the hostif. + **/ +s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = 0; + u32 i = 0; + + /* Take semaphore for the entire operation. */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + if (status) { + hw_dbg(hw, "EEPROM write buffer - semaphore failed\n"); + return status; + } + + for (i = 0; i < words; i++) { + status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, + data[i]); + if (status) { + hw_dbg(hw, "Eeprom buffered write failed\n"); + break; + } + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + bool setup_linear; + u16 reg_slice, edc_mode; + s32 ret_val; + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_unknown: + return 0; + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + setup_linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + setup_linear = false; + break; + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + /* The CS4227 slice address is the base address + the port-pair reg + * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. + */ + reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); + + if (setup_linear) + edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + + /* Configure CS4227 for connection type. */ + ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + edc_mode); + + if (ret_val) + ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, + edc_mode); + + return ret_val; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + return 0; +} + +/** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the + * IOSF device + * + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u32 data) +{ + u32 i, command, error; + + command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | + (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); + + /* Write IOSF control register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); + + /* Write IOSF data register */ + IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); + + /* Check every 10 usec to see if the address cycle completed. + * The SB IOSF BUSY bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usleep_range(10, 20); + + command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); + if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) + break; + } + + if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { + error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> + IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; + hw_dbg(hw, "Failed to write, error %x\n", error); + return IXGBE_ERR_PHY; + } + + if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { + hw_dbg(hw, "Write timed out\n"); + return IXGBE_ERR_PHY; + } + + return 0; +} + +/** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. + * @hw: pointer to hardware structure + * @speed: the link speed to force + * + * Configures the integrated KR PHY to use iXFI mode. Used to connect an + * internal and external PHY at a specific speed, without autonegotiation. + **/ +static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +{ + s32 status; + u32 reg_val; + + /* Disable AN and force speed to 10G Serial. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + + /* Select forced link speed for internal PHY. */ + switch (*speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + break; + default: + /* Other link speeds are not supported by internal KR PHY. */ + return IXGBE_ERR_LINK_SETUP; + } + + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable training protocol FSM. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Disable Flex from training TXFFE. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; + reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Enable override for coefficients. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; + reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + if (status) + return status; + + /* Toggle port SW reset by AN reset. */ + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KX4 PHY. + **/ +s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); + if (status) + return status; + + reg_val &= ~(IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 | + IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX); + + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE; + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); + + return status; +} + +/** ixgbe_setup_kr_x550em - Configure the KR PHY. + * @hw: pointer to hardware structure + * + * Configures the integrated KR PHY. + **/ +s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ; + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + +/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY + * @hw: point to hardware structure + * + * Configures the integrated KR PHY to talk to the external PHY. The base + * driver will call this function when it gets notification via interrupt from + * the external PHY. This function forces the internal PHY into iXFI mode at + * the correct speed. + * + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. + **/ +s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 lasi, autoneg_status, speed; + ixgbe_link_speed force_speed; + + /* Verify that the external link status has changed */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi); + if (status) + return status; + + /* If there was no change in link status, we can just exit */ + if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM)) + return 0; + + /* we read this twice back to back to indicate current status */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + /* If link is not up return an error indicating treat link as down */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + return IXGBE_ERR_INVALID_LINK_SETTINGS; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &speed); + + /* clear everything but the speed and duplex bits */ + speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; + + switch (speed) { + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + /* Internal PHY does not support anything else */ + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + return ixgbe_setup_ixfi_x550em(hw, &force_speed); +} + +/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + **/ +s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + u32 esdp; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + } + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_X550em(hw); + if (phy->sfp_type != ixgbe_sfp_type_unknown) + phy->ops.reset = NULL; + + /* Set functions pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_x550em_kx4: + phy->ops.setup_link = ixgbe_setup_kx4_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_kr: + phy->ops.setup_link = ixgbe_setup_kr_x550em; + phy->ops.read_reg = ixgbe_read_phy_reg_x550em; + phy->ops.write_reg = ixgbe_write_phy_reg_x550em; + break; + case ixgbe_phy_x550em_ext_t: + phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em; + break; + default: + break; + } + return ret_val; +} + +/** ixgbe_get_media_type_X550em - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + * + */ +enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + /* Detect if there is a copper PHY attached. */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_KX4: + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_X550EM_X_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_X550EM_X_1G_T: + case IXGBE_DEV_ID_X550EM_X_10G_T: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } + return media_type; +} + +/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. + ** @hw: pointer to hardware structure + **/ +s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + u32 retries = 2; + + do { + /* decrement retries counter and exit if we hit 0 */ + if (retries < 1) { + hw_dbg(hw, "External PHY not yet finished resetting."); + return IXGBE_ERR_PHY; + } + retries--; + + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + /* Verify PHY FW reset has completed */ + } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1); + + /* Set port to low power mode */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + /* Enable the transmitter */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + reg); + if (status) + return status; + + /* Un-stall the PHY FW */ + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg &= ~IXGBE_MDIO_POWER_UP_STALL; + + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} + +/** ixgbe_reset_hw_X550em - Perform hardware reset + ** @hw: pointer to hardware structure + ** + ** Resets the hardware by resetting the transmit and receive units, masks + ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) + ** reset. + **/ +s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl = 0; + u32 i; + bool link_up = false; + + /* Call adapter stop to disable Tx/Rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status) + return status; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + /* start the external PHY */ + if (hw->phy.type == ixgbe_phy_x550em_ext_t) { + status = ixgbe_init_ext_t_x550em(hw); + if (status) + return status; + } + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = false; + } + + /* Reset PHY */ + if (!hw->phy.reset_disable && hw->phy.ops.reset) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + msleep(50); + + /* Double resets are required for recovery from certain error + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + return status; +} + +#define X550_COMMON_MAC \ + .init_hw = &ixgbe_init_hw_generic, \ + .start_hw = &ixgbe_start_hw_X540, \ + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \ + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \ + .get_mac_addr = &ixgbe_get_mac_addr_generic, \ + .get_device_caps = &ixgbe_get_device_caps_generic, \ + .stop_adapter = &ixgbe_stop_adapter_generic, \ + .get_bus_info = &ixgbe_get_bus_info_generic, \ + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \ + .read_analog_reg8 = NULL, \ + .write_analog_reg8 = NULL, \ + .set_rxpba = &ixgbe_set_rxpba_generic, \ + .check_link = &ixgbe_check_mac_link_generic, \ + .led_on = &ixgbe_led_on_generic, \ + .led_off = &ixgbe_led_off_generic, \ + .blink_led_start = &ixgbe_blink_led_start_X540, \ + .blink_led_stop = &ixgbe_blink_led_stop_X540, \ + .set_rar = &ixgbe_set_rar_generic, \ + .clear_rar = &ixgbe_clear_rar_generic, \ + .set_vmdq = &ixgbe_set_vmdq_generic, \ + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \ + .clear_vmdq = &ixgbe_clear_vmdq_generic, \ + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \ + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \ + .enable_mc = &ixgbe_enable_mc_generic, \ + .disable_mc = &ixgbe_disable_mc_generic, \ + .clear_vfta = &ixgbe_clear_vfta_generic, \ + .set_vfta = &ixgbe_set_vfta_generic, \ + .fc_enable = &ixgbe_fc_enable_generic, \ + .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, \ + .init_uta_tables = &ixgbe_init_uta_tables_generic, \ + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, \ + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \ + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ + .get_thermal_sensor_data = NULL, \ + .init_thermal_sensor_thresh = NULL, \ + .prot_autoc_read = &prot_autoc_read_generic, \ + .prot_autoc_write = &prot_autoc_write_generic, \ + +static struct ixgbe_mac_operations mac_ops_X550 = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X540, + .get_media_type = &ixgbe_get_media_type_X540, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .setup_link = &ixgbe_setup_mac_link_X540, + .set_rxpba = &ixgbe_set_rxpba_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .setup_sfp = NULL, +}; + +static struct ixgbe_mac_operations mac_ops_X550EM_x = { + X550_COMMON_MAC + .reset_hw = &ixgbe_reset_hw_X550em, + .get_media_type = &ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + +}; + +#define X550_COMMON_EEP \ + .read = &ixgbe_read_ee_hostif_X550, \ + .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \ + .write = &ixgbe_write_ee_hostif_X550, \ + .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \ + .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \ + .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ + .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ + +static struct ixgbe_eeprom_operations eeprom_ops_X550 = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X550, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { + X550_COMMON_EEP + .init_params = &ixgbe_init_eeprom_params_X540, +}; + +#define X550_COMMON_PHY \ + .identify_sfp = &ixgbe_identify_module_generic, \ + .reset = NULL, \ + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \ + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \ + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \ + .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .check_overtemp = &ixgbe_tn_check_overtemp, \ + .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, + +static struct ixgbe_phy_operations phy_ops_X550 = { + X550_COMMON_PHY + .init = NULL, + .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .read_i2c_combined = &ixgbe_read_i2c_combined_generic, + .write_i2c_combined = &ixgbe_write_i2c_combined_generic, +}; + +static struct ixgbe_phy_operations phy_ops_X550EM_x = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = NULL, /* defined later */ + .write_reg = NULL, /* defined later */ + .setup_link = NULL, /* defined later */ +}; + +struct ixgbe_info ixgbe_X550_info = { + .mac = ixgbe_mac_X550, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550, + .eeprom_ops = &eeprom_ops_X550, + .phy_ops = &phy_ops_X550, + .mbx_ops = &mbx_ops_generic, +}; + +struct ixgbe_info ixgbe_X550EM_x_info = { + .mac = ixgbe_mac_X550EM_x, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X550EM_x, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_X550EM_x, + .mbx_ops = &mbx_ops_generic, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 05e4f32d84f7..7412d378b77b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -31,6 +31,8 @@ /* Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED #define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X550_VF 0x1565 +#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ba96cb5b886d..8c44ab25f3fa 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -58,8 +58,9 @@ struct ixgbevf_tx_buffer { }; struct ixgbevf_rx_buffer { - struct sk_buff *skb; dma_addr_t dma; + struct page *page; + unsigned int page_offset; }; struct ixgbevf_stats { @@ -79,7 +80,6 @@ struct ixgbevf_tx_queue_stats { }; struct ixgbevf_rx_queue_stats { - u64 non_eop_descs; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 csum_err; @@ -92,9 +92,10 @@ struct ixgbevf_ring { void *desc; /* descriptor ring memory */ dma_addr_t dma; /* phys. address of descriptor ring */ unsigned int size; /* length in bytes */ - unsigned int count; /* amount of descriptors */ - unsigned int next_to_use; - unsigned int next_to_clean; + u16 count; /* amount of descriptors */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; union { struct ixgbevf_tx_buffer *tx_buffer_info; @@ -110,12 +111,11 @@ struct ixgbevf_ring { u64 hw_csum_rx_error; u8 __iomem *tail; + struct sk_buff *skb; u16 reg_idx; /* holds the special value that gets the hardware register * offset associated with this ring, which is different * for DCB and RSS modes */ - - u16 rx_buf_len; int queue_index; /* needed for multiqueue queue management */ }; @@ -134,12 +134,10 @@ struct ixgbevf_ring { /* Supported Rx Buffer Sizes */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2K 2048 -#define IXGBEVF_RXBUFFER_4K 4096 -#define IXGBEVF_RXBUFFER_8K 8192 -#define IXGBEVF_RXBUFFER_10K 10240 +#define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -307,6 +305,13 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG +/* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) { u16 ntc = ring->next_to_clean; @@ -339,8 +344,10 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) /* board specific private data structure */ struct ixgbevf_adapter { - struct timer_list watchdog_timer; + /* this field must be first, see ixgbevf_process_skb_fields */ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + + struct timer_list watchdog_timer; struct work_struct reset_task; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; @@ -363,7 +370,6 @@ struct ixgbevf_adapter { struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; - u64 non_eop_descs; int num_msix_vectors; u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; @@ -373,7 +379,7 @@ struct ixgbevf_adapter { */ u32 flags; #define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) + #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) struct msix_entry *msix_entries; @@ -423,18 +429,17 @@ enum ixbgevf_state_t { __IXGBEVF_WORK_INIT, }; -struct ixgbevf_cb { - struct sk_buff *prev; -}; -#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb) - enum ixgbevf_boards { board_82599_vf, board_X540_vf, + board_X550_vf, + board_X550EM_x_vf, }; extern const struct ixgbevf_info ixgbevf_82599_vf_info; extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; /* needed by ethtool.c */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 030a219c85e3..62a0d8e0f17d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -66,6 +66,8 @@ static char ixgbevf_copyright[] = static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -79,6 +81,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, /* required last entry */ {0, } }; @@ -143,21 +147,6 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) return value; } -static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, - u32 val) -{ - rx_ring->next_to_use = val; - - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - ixgbevf_write_tail(rx_ring, val); -} - /** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct @@ -343,39 +332,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, } /** - * ixgbevf_receive_skb - Send a completed packet up the stack - * @q_vector: structure containing interrupt and ring information - * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_desc: rx descriptor - **/ -static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - union ixgbe_adv_rx_desc *rx_desc) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - bool is_vlan = (status & IXGBE_RXD_STAT_VP); - u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - - if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); - - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); - else - netif_rx(skb); -} - -/** * ixgbevf_rx_skb - Helper function to determine proper Rx method * @q_vector: structure containing interrupt and ring information * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_desc: rx descriptor **/ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, - struct sk_buff *skb, u8 status, - union ixgbe_adv_rx_desc *rx_desc) + struct sk_buff *skb) { #ifdef CONFIG_NET_RX_BUSY_POLL skb_mark_napi_id(skb, &q_vector->napi); @@ -387,17 +349,17 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, } #endif /* CONFIG_NET_RX_BUSY_POLL */ - ixgbevf_receive_skb(q_vector, skb, status, rx_desc); + napi_gro_receive(&q_vector->napi, skb); } -/** - * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum - * @ring: pointer to Rx descriptor ring structure - * @status_err: hardware indication of status of receive +/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containig ring specific data + * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - **/ + */ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, - u32 status_err, struct sk_buff *skb) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -406,16 +368,16 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, return; /* if IP and error */ - if ((status_err & IXGBE_RXD_STAT_IPCS) && - (status_err & IXGBE_RXDADV_ERR_IPE)) { + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { ring->rx_stats.csum_err++; return; } - if (!(status_err & IXGBE_RXD_STAT_L4CS)) + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; - if (status_err & IXGBE_RXDADV_ERR_TCPE) { + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { ring->rx_stats.csum_err++; return; } @@ -424,52 +386,408 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } +/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the checksum, VLAN, protocol, and other fields within + * the skb. + */ +static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ixgbevf_rx_checksum(rx_ring, rx_desc, skb); + + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + unsigned long *active_vlans = netdev_priv(rx_ring->netdev); + + if (test_bit(vid & VLAN_VID_MASK, active_vlans)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +/** + * ixgbevf_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBEVF_RX_DESC(rx_ring, ntc)); + + if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + return true; +} + +static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma = bi->dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_page(page); + + rx_ring->rx_stats.alloc_rx_buff_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + return true; +} + /** * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on + * @cleaned_count: number of buffers to replace **/ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, - int cleaned_count) + u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; unsigned int i = rx_ring->next_to_use; - while (cleaned_count--) { - rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; + /* nothing to do or no valid netdev defined */ + if (!cleaned_count || !rx_ring->netdev) + return; - if (!bi->skb) { - struct sk_buff *skb; + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); - if (!skb) - goto no_buffers; + do { + if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) + break; - bi->skb = skb; + /* Refresh the desc even if pkt_addr didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); - bi->dma = dma_map_single(rx_ring->dev, skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - dev_kfree_skb(skb); - bi->skb = NULL; - dev_err(rx_ring->dev, "Rx DMA map failed\n"); - break; - } + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBEVF_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; } - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->read.hdr_addr = 0; + + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) { + /* record the next descriptor to use */ + rx_ring->next_to_use = i; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = i; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ixgbevf_write_tail(rx_ring, i); } +} + +/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbevf specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; -no_buffers: - rx_ring->rx_stats.alloc_rx_buff_failed++; - if (rx_ring->next_to_use != i) - ixgbevf_release_rx_desc(rx_ring, i); + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/* ixgbevf_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + */ +static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbevf_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + struct net_device *netdev = rx_ring->netdev; + + if (!(netdev->features & NETIF_F_RXALL)) { + dev_kfree_skb_any(skb); + return true; + } + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbevf_pull_tail(rx_ring, skb); + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + */ +static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *old_buff) +{ + struct ixgbevf_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; + new_buff->page_offset = old_buff->page_offset; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); +} + +static inline bool ixgbevf_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; +} + +/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + */ +static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, + struct ixgbevf_rx_buffer *rx_buffer, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = IXGBEVF_RX_BUFSZ; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +#endif + + if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as is */ + if (likely(!ixgbevf_page_is_reserved(page))) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(ixgbevf_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ; + +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ)) + return false; + +#endif + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + atomic_inc(&page->_count); + + return true; +} + +static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_rx_buffer *rx_buffer; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBEVF_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + IXGBEVF_RX_BUFSZ, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbevf_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, @@ -484,78 +802,51 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int budget) { - union ixgbe_adv_rx_desc *rx_desc, *next_rxd; - struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; - struct sk_buff *skb; - unsigned int i; - u32 len, staterr; - int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = ixgbevf_desc_unused(rx_ring); + struct sk_buff *skb = rx_ring->skb; - i = rx_ring->next_to_clean; - rx_desc = IXGBEVF_RX_DESC(rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; - while (staterr & IXGBE_RXD_STAT_DD) { - if (!budget) - break; - budget--; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { + ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } - rmb(); /* read descriptor and rx_buffer_info after status DD */ - len = le16_to_cpu(rx_desc->wb.upper.length); - skb = rx_buffer_info->skb; - prefetch(skb->data - NET_IP_ALIGN); - rx_buffer_info->skb = NULL; + rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - skb_put(skb, len); - } + if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + break; - i++; - if (i == rx_ring->count) - i = 0; + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); - next_rxd = IXGBEVF_RX_DESC(rx_ring, i); - prefetch(next_rxd); - cleaned_count++; + /* retrieve a buffer from the ring */ + skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); - next_buffer = &rx_ring->rx_buffer_info[i]; + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; - if (!(staterr & IXGBE_RXD_STAT_EOP)) { - skb->next = next_buffer->skb; - IXGBE_CB(skb->next)->prev = skb; - rx_ring->rx_stats.non_eop_descs++; - goto next_desc; - } + cleaned_count++; - /* we should not be chaining buffers, if we did drop the skb */ - if (IXGBE_CB(skb)->prev) { - do { - struct sk_buff *this = skb; - skb = IXGBE_CB(skb)->prev; - dev_kfree_skb(this); - } while (skb); - goto next_desc; - } + /* fetch next buffer in frame if non-eop */ + if (ixgbevf_is_non_eop(rx_ring, rx_desc)) + continue; - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_irq(skb); - goto next_desc; + /* verify the packet layout is correct */ + if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + continue; } - ixgbevf_rx_checksum(rx_ring, staterr, skb); - /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); /* Workaround hardware that can't do proper VEPA multicast * source pruning. @@ -565,32 +856,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); - goto next_desc; + continue; } - ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc); + /* populate checksum, VLAN, and protocol */ + ixgbevf_process_skb_fields(rx_ring, rx_desc, skb); -next_desc: - rx_desc->wb.upper.status_error = 0; + ixgbevf_rx_skb(q_vector, skb); - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { - ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; + /* reset skb pointer */ + skb = NULL; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + /* update budget accounting */ + total_rx_packets++; } - rx_ring->next_to_clean = i; - cleaned_count = ixgbevf_desc_unused(rx_ring); - - if (cleaned_count) - ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count); + /* place incomplete frames back on ring for completion */ + rx_ring->skb = skb; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -634,12 +916,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) else per_ring_budget = budget; - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; ixgbevf_for_each_ring(ring, q_vector->rx) clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget) < per_ring_budget); - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; #ifdef CONFIG_NET_RX_BUSY_POLL ixgbevf_qv_unlock_napi(q_vector); @@ -1229,19 +1509,15 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) { - struct ixgbevf_ring *rx_ring; struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; - rx_ring = adapter->rx_ring[index]; - srrctl = IXGBE_SRRCTL_DROP_EN; + srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } @@ -1260,40 +1536,6 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } -static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i; - u16 rx_buf_len; - - /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, max_frame); - - /* PF will allow an extra 4 bytes past for vlan tagged frames */ - max_frame += VLAN_HLEN; - - /* - * Allocate buffer sizes that fit well into 32K and - * take into account max frame size of 9.5K - */ - if ((hw->mac.type == ixgbe_mac_X540_vf) && - (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else if (max_frame <= IXGBEVF_RXBUFFER_2K) - rx_buf_len = IXGBEVF_RXBUFFER_2K; - else if (max_frame <= IXGBEVF_RXBUFFER_4K) - rx_buf_len = IXGBEVF_RXBUFFER_4K; - else if (max_frame <= IXGBEVF_RXBUFFER_8K) - rx_buf_len = IXGBEVF_RXBUFFER_8K; - else - rx_buf_len = IXGBEVF_RXBUFFER_10K; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->rx_buf_len = rx_buf_len; -} - #define IXGBEVF_MAX_RX_DESC_POLL 10 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) @@ -1371,12 +1613,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, /* reset ntu and ntc to place SW in sync with hardwdare */ ring->next_to_clean = 0; ring->next_to_use = 0; + ring->next_to_alloc = 0; ixgbevf_configure_srrctl(adapter, reg_idx); - /* prevent DMA from exceeding buffer space available */ - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; + /* allow any size packet since we can handle overflow */ + rxdctl &= ~IXGBE_RXDCTL_RLPML_EN; + rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); @@ -1393,11 +1636,13 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { int i; + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; ixgbevf_setup_psrtype(adapter); - /* set_rx_buffer_len must be called before ring initialization */ - ixgbevf_set_rx_buffer_len(adapter); + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -1702,32 +1947,32 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) **/ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) { + struct device *dev = rx_ring->dev; unsigned long size; unsigned int i; + /* Free Rx ring sk_buff */ + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* ring already cleared, nothing to do */ if (!rx_ring->rx_buffer_info) return; - /* Free all the Rx ring sk_buffs */ + /* Free all the Rx ring pages */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbevf_rx_buffer *rx_buffer_info; + struct ixgbevf_rx_buffer *rx_buffer; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - } - if (rx_buffer_info->skb) { - struct sk_buff *skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - do { - struct sk_buff *this = skb; - skb = IXGBE_CB(skb)->prev; - dev_kfree_skb(this); - } while (skb); - } + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + __free_page(rx_buffer->page); + rx_buffer->page = NULL; } size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; @@ -3274,6 +3519,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; @@ -3282,7 +3528,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: - if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + if (adapter->hw.mac.type != ixgbe_mac_82599_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; } @@ -3291,17 +3537,35 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; - hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", + hw_dbg(hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - if (netif_running(netdev)) - ixgbevf_reinit_locked(adapter); + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); return 0; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ixgbevf_netpoll(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBEVF_DOWN, &adapter->state)) + return; + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3438,6 +3702,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = ixgbevf_busy_poll_recv, #endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbevf_netpoll, +#endif }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -3465,6 +3732,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; int err, pci_using_dac; + bool disable_dev = false; err = pci_enable_device(pdev); if (err) @@ -3499,7 +3767,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(netdev, &pdev->dev); - pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; @@ -3588,16 +3855,28 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_register; + pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); ixgbevf_init_last_counter_stats(adapter); - /* print the MAC address */ - hw_dbg(hw, "%pM\n", netdev->dev_addr); + /* print the VF info */ + dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); - hw_dbg(hw, "MAC: %d\n", hw->mac.type); + switch (hw->mac.type) { + case ixgbe_mac_X550_vf: + dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); + break; + case ixgbe_mac_X540_vf: + dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); + break; + case ixgbe_mac_82599_vf: + default: + dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); + break; + } - hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); return 0; err_register: @@ -3606,12 +3885,13 @@ err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->io_addr); err_ioremap: + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_pci_reg: err_dma: - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + if (!adapter || disable_dev) pci_disable_device(pdev); return err; } @@ -3628,7 +3908,13 @@ err_dma: static void ixgbevf_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_adapter *adapter; + bool disable_dev; + + if (!netdev) + return; + + adapter = netdev_priv(netdev); set_bit(__IXGBEVF_REMOVING, &adapter->state); @@ -3648,9 +3934,10 @@ static void ixgbevf_remove(struct pci_dev *pdev) hw_dbg(&adapter->hw, "Remove complete\n"); + disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); free_netdev(netdev); - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) + if (disable_dev) pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 9cddd56d02c3..cdb53be7d995 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -617,3 +617,13 @@ const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_X550_vf_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_mac_ops, +}; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index aa8cc8dc25d1..5b172427f459 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -74,6 +74,8 @@ enum ixgbe_mac_type { ixgbe_mac_unknown = 0, ixgbe_mac_82599_vf, ixgbe_mac_X540_vf, + ixgbe_mac_X550_vf, + ixgbe_mac_X550EM_x_vf, ixgbe_num_macs }; |