diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
98 files changed, 5683 insertions, 2776 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index e9c3a87e5b11..1a450f4b6b12 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -414,7 +414,7 @@ enum cb_status { /** * cb_command - Command Block flags - * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory + * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory */ enum cb_command { cb_nop = 0x0000, @@ -899,7 +899,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, /* Order is important otherwise we'll be in a race with h/w: * set S-bit in current first, then clear S-bit in previous. */ cb->command |= cpu_to_le16(cb_s); - wmb(); + dma_wmb(); cb->prev->command &= cpu_to_le16(~cb_s); while (nic->cb_to_send != nic->cb_to_use) { @@ -1843,7 +1843,7 @@ static int e100_tx_clean(struct nic *nic) for (cb = nic->cb_to_clean; cb->status & cpu_to_le16(cb_complete); cb = nic->cb_to_clean = cb->next) { - rmb(); /* read skb after status */ + dma_rmb(); /* read skb after status */ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, "cb[%d]->status = 0x%04X\n", (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), @@ -1993,7 +1993,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, "status=0x%04X\n", rfd_status); - rmb(); /* read size after status bit */ + dma_rmb(); /* read size after status bit */ /* If data isn't ready, nothing to indicate */ if (unlikely(!(rfd_status & cb_complete))) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 7f997d36948f..983eb4e6f7aa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count); @@ -516,6 +521,7 @@ void e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; u32 rctl, tctl; + netif_carrier_off(netdev); /* disable receives in the hardware */ rctl = er32(RCTL); @@ -544,7 +550,6 @@ void e1000_down(struct e1000_adapter *adapter) adapter->link_speed = 0; adapter->link_duplex = 0; - netif_carrier_off(netdev); e1000_reset(adapter); e1000_clean_all_tx_rings(adapter); @@ -1111,7 +1116,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (e1000_read_mac_addr(hw)) e_err(probe, "EEPROM Read Error\n"); } - /* don't block initalization here due to bad MAC address */ + /* don't block initialization here due to bad MAC address */ memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) @@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) msleep(1); /* e1000_down has a dependency on max_frame_size */ hw->max_frame_size = max_frame; - if (netif_running(netdev)) + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; e1000_down(adapter); + } /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next @@ -3848,7 +3856,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + dma_rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -4146,7 +4154,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; @@ -4367,7 +4375,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; length = le16_to_cpu(rx_desc->length); diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index bb7ab3c321d6..0570c668ec3d 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -141,6 +141,7 @@ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 9416e5a7e0c8..5d9ceb17b4cb 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -132,6 +132,7 @@ enum e1000_boards { board_pchlan, board_pch2lan, board_pch_lpt, + board_pch_spt }; struct e1000_ps_page { @@ -342,6 +343,7 @@ struct e1000_adapter { struct timecounter tc; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; u16 eee_advert; }; @@ -501,6 +503,7 @@ extern const struct e1000_info e1000_ich10_info; extern const struct e1000_info e1000_pch_info; extern const struct e1000_info e1000_pch2_info; extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 865ce45f9ec3..11f486e4ff7b 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: mask |= (1 << 18); break; default: break; } - if (mac->type == e1000_pch_lpt) + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> E1000_FWSM_WLOCK_MAC_SHIFT; for (i = 0; i < mac->rar_entry_count; i++) { - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { /* Cannot test write-protected SHRAL[n] registers */ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) continue; diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 72f5475c4b90..19e8c487db06 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -87,6 +87,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_I218_V2 0x15A1 #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ #define E1000_REVISION_4 4 @@ -108,6 +112,7 @@ enum e1000_mac_type { e1000_pchlan, e1000_pch2lan, e1000_pch_lpt, + e1000_pch_spt, }; enum e1000_media_type { @@ -153,6 +158,7 @@ enum e1000_bus_width { e1000_bus_width_pcie_x1, e1000_bus_width_pcie_x2, e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, e1000_bus_width_32, e1000_bus_width_64, e1000_bus_width_reserved diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 48b74a549155..9d81c0317433 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); @@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Unforce SMBus mode in PHY */ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; @@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -590,35 +601,54 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; - - /* Can't read flash registers if the register set isn't mapped. */ - if (!hw->flash_address) { - e_dbg("ERROR: Flash registers not mapped\n"); - return -E1000_ERR_CONFIG; - } + u32 nvm_size; nvm->type = e1000_nvm_flash_sw; - gfpreg = er32flash(ICH_FLASH_GFPREG); + if (hw->mac.type == e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } - /* sector_X_addr is a "sector"-aligned address (4096 bytes) - * Add 1 to sector_end_addr since this sector is included in - * the overall size. - */ - sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; - sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + gfpreg = er32flash(ICH_FLASH_GFPREG); - /* flash_base_addr is byte-aligned */ - nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; - /* find total size of the NVM, then cut in half since the total - * size represents two separate NVM banks. - */ - nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT); - nvm->flash_bank_size /= 2; - /* Adjust to word count */ - nvm->flash_bank_size /= sizeof(u16); + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; @@ -682,6 +712,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) mac->ops.rar_set = e1000_rar_set_pch2lan; /* fall-through */ case e1000_pch_lpt: + case e1000_pch_spt: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -699,7 +730,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) break; } - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = @@ -919,8 +950,9 @@ release: /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; - if (!link || ((status & E1000_STATUS_SPEED_100) && - (status & E1000_STATUS_FD))) + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); @@ -1100,6 +1132,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + if (ret_val) + goto release; + } + /* Force SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) @@ -1302,7 +1349,8 @@ out: static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; bool link; u16 phy_reg; @@ -1333,48 +1381,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * the IPG and reduce Rx latency in the PHY. */ if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt)) && link) { + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { u32 reg; reg = er32(STATUS); + tipg_reg = er32(TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { - u16 emi_addr; + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else { - reg = er32(TIPG); - reg &= ~E1000_TIPG_IPGT_MASK; - reg |= 0xFF; - ew32(TIPG, reg); + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } - /* Reduce Rx latency in analog PHY */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + ew32(TIPG, tipg_reg); - if (hw->mac.type == e1000_pch2lan) - emi_addr = I82579_RX_CONFIG; - else - emi_addr = I217_RX_CONFIG; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; - ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); - hw->phy.ops.release(hw); + hw->phy.ops.release(hw); - if (ret_val) - return ret_val; - } + if (ret_val) + return ret_val; } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } - - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) */ @@ -1386,6 +1441,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = er32(PCIEANACFG); + u32 fextnvm6 = er32(FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + ew32(FEXTNVM6, fextnvm6); + } + if (!link) return 0; /* No link detected */ @@ -1479,6 +1547,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -1929,6 +1998,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -2961,6 +3031,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) s32 ret_val; switch (hw->mac.type) { + /* In SPT, read from the CTRL_EXT reg instead of + * accessing the sector valid bits from the nvm + */ + case e1000_pch_spt: + *bank = er32(CTRL_EXT) + & E1000_CTRL_EXT_NVMVS; + if ((*bank == 0) || (*bank == 1)) { + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } else { + *bank = *bank - 2; + return 0; + } + break; case e1000_ich8lan: case e1000_ich9lan: eecd = er32(EECD); @@ -3008,6 +3092,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) } /** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = + dev_spec->shadow_ram[offset + i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset + i].modified) || + !(dev_spec->shadow_ram[offset + i + 1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset + i].modified) + data[i] = + dev_spec->shadow_ram[offset + i].value; + else + data[i] = (u16)(dword & 0xFFFF); + if (dev_spec->shadow_ram[offset + i].modified) + data[i + 1] = + dev_spec->shadow_ram[offset + i + 1].value; + else + data[i + 1] = (u16)(dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. @@ -3090,8 +3267,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or @@ -3107,7 +3286,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { s32 i; @@ -3128,7 +3310,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); } else { e_dbg("Flash controller busy, cannot get access\n"); } @@ -3151,9 +3337,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { @@ -3170,6 +3363,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) } /** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + /* Must convert word offset into bytes. */ + offset <<= 1; + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location @@ -3201,7 +3411,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u16 word = 0; - ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) return ret_val; @@ -3287,6 +3504,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ + +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + *data = er32flash(ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. @@ -3321,7 +3614,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, } /** - * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * e1000_update_nvm_checksum_spt - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, @@ -3331,13 +3624,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ -static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; - u16 data; + u32 dword = 0; ret_val = e1000e_update_nvm_checksum_generic(hw); if (ret_val) @@ -3371,12 +3664,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) goto release; } - - for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { /* Determine whether to write the value stored * in the other NVM bank or a modified value stored * in the shadow RAM */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + + /* Write the data to the new bank. Offset in words */ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword */ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword */ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { @@ -3498,6 +3954,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -3583,9 +4040,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, s32 ret_val; u8 count = 0; - if (size < 1 || size > 2 || data > size * 0xff || - offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); @@ -3596,12 +4057,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); ew32flash(ICH_FLASH_FADDR, flash_linear_addr); @@ -3640,6 +4114,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ew32flash(ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. @@ -3656,6 +4214,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, } /** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + /* Must convert word offset into bytes. */ + offset <<= 1; + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. @@ -3759,9 +4351,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash @@ -4180,7 +4781,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) ew32(RFCTL, reg); /* Enable ECC on Lynxpoint */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { reg = er32(PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; ew32(PBECCSTS, reg); @@ -4583,7 +5185,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || - (device_id == E1000_DEV_ID_PCH_I218_V3)) { + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { u32 fextnvm6 = er32(FEXTNVM6); ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); @@ -5058,6 +5661,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = { .write = e1000_write_nvm_ich8lan, }; +static const struct e1000_nvm_operations spt_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .read = e1000_read_nvm_spt, + .update = e1000_update_nvm_checksum_spt, + .reload = e1000e_reload_nvm_generic, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + const struct e1000_info e1000_ich8_info = { .mac = e1000_ich8lan, .flags = FLAG_HAS_WOL @@ -5166,3 +5780,23 @@ const struct e1000_info e1000_pch_lpt_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &ich8_nvm_ops, }; + +const struct e1000_info e1000_pch_spt_info = { + .mac = e1000_pch_spt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9018, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 8066a498eaac..770a573b9eea 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -95,9 +95,18 @@ #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1e8c40fd5c3d..c509a5c900f5 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pchlan] = &e1000_pch_info, [board_pch2lan] = &e1000_pch2_info, [board_pch_lpt] = &e1000_pch_lpt_info, + [board_pch_spt] = &e1000_pch_spt_info, }; struct e1000_reg_info { @@ -946,7 +947,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; buffer_info->skb = NULL; @@ -1231,7 +1232,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) (count < tx_ring->count)) { bool cleaned = false; - rmb(); /* read buffer_info after eop_desc */ + dma_rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1331,7 +1332,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, break; (*work_done)++; skb = buffer_info->skb; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ /* in the packet split case this is header only */ prefetch(skb->data - NET_IP_ALIGN); @@ -1535,7 +1536,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, if (*work_done >= work_to_do) break; (*work_done)++; - rmb(); /* read descriptor and rx_buffer_info after status DD */ + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; buffer_info->skb = NULL; @@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data) } /* Reset on uncorrectable ECC error */ - if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) if (adapter->msix_entries) { ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); } else { ew32(IMS, IMS_ENABLE_MASK); @@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TCTL, tctl); hw->mac.ops.config_collision_dist(hw); + + /* SPT Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + u32 reg_val; + + reg_val = er32(IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + ew32(IOSFPC, reg_val); + + reg_val = er32(TARC(0)); + reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + ew32(TARC(0), reg_val); + } } /** @@ -3280,9 +3297,9 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(RXDCTL(0), rxdctl | 0x3); } - pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); + pm_qos_update_request(&adapter->pm_qos_req, lat); } else { - pm_qos_update_request(&adapter->netdev->pm_qos_req, + pm_qos_update_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE); } @@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) struct e1000_hw *hw = &adapter->hw; u32 incvalue, incperiod, shift; - /* Make sure clock is enabled on I217 before checking the frequency */ - if ((hw->mac.type == e1000_pch_lpt) && + /* Make sure clock is enabled on I217/I218/I219 before checking + * the frequency + */ + if (((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); @@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - /* On I217, the clock frequency is 25MHz or 96MHz as - * indicated by the System Clock Frequency Indication + case e1000_pch_spt: + /* On I217, I218 and I219, the clock frequency is 25MHz + * or 96MHz as indicated by the System Clock Frequency + * Indication */ - if ((hw->mac.type != e1000_pch_lpt) || + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; @@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter) break; case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: fc->refresh_time = 0x0400; if (adapter->netdev->mtu <= ETH_DATA_LEN) { @@ -4060,6 +4084,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) */ set_bit(__E1000_DOWN, &adapter->state); + netif_carrier_off(netdev); + /* disable receives in the hardware */ rctl = er32(RCTL); if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) @@ -4084,8 +4110,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); spin_unlock(&adapter->stats64_lock); @@ -4379,7 +4403,7 @@ static int e1000_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); /* DMA latency requirement to workaround jumbo issue */ - pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); /* before we allocate an interrupt, we must be ready to handle it. @@ -4490,7 +4514,7 @@ static int e1000_close(struct net_device *netdev) !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); - pm_qos_remove_request(&adapter->netdev->pm_qos_req); + pm_qos_remove_request(&adapter->pm_qos_req); pm_runtime_put_sync(&pdev->dev); @@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter) adapter->stats.mgpdc += er32(MGTPDC); /* Correctable ECC Errors */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { u32 pbeccsts = er32(PBECCSTS); adapter->corr_errors += @@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); - } else if (hw->mac.type == e1000_pch_lpt) { + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) /* ULP does not support wake from unicast, multicast * or broadcast. @@ -6807,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_ioremap; if ((adapter->flags & FLAG_HAS_FLASH) && - (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && + (hw->mac.type < e1000_pch_spt)) { flash_start = pci_resource_start(pdev, 1); flash_len = pci_resource_len(pdev, 1); adapter->hw.flash_address = ioremap(flash_start, flash_len); @@ -6847,7 +6874,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_hw_init; if ((adapter->flags & FLAG_IS_ICH) && - (adapter->flags & FLAG_READ_ONLY_NVM)) + (adapter->flags & FLAG_READ_ONLY_NVM) && + (hw->mac.type < e1000_pch_spt)) e1000e_write_protect_nvm_ich8lan(&adapter->hw); hw->mac.ops.get_bus_info(&adapter->hw); @@ -7043,7 +7071,7 @@ err_hw_init: kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: - if (adapter->hw.flash_address) + if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); e1000e_reset_interrupt_capability(adapter); err_flashmap: @@ -7116,7 +7144,8 @@ static void e1000_remove(struct pci_dev *pdev) kfree(adapter->rx_ring); iounmap(adapter->hw.hw_addr); - if (adapter->hw.flash_address) + if ((adapter->hw.flash_address) && + (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); @@ -7213,6 +7242,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 978ef9c4a043..8d7b21dc7e19 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -106,20 +106,18 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) * Read the timecounter and return the correct value in ns after converting * it into a struct timespec. **/ -static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; - u32 remainder; u64 ns; spin_lock_irqsave(&adapter->systim_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->systim_lock, flags); - ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -133,14 +131,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * wall timer value. **/ static int e1000e_phc_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; u64 ns; - ns = timespec_to_ns(ts); + ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->systim_lock, flags); @@ -171,11 +169,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; - struct timespec ts; + struct timespec64 ts; - adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts); + adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); - e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + e_dbg("SYSTIM overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); schedule_delayed_work(&adapter->systim_overflow_work, E1000_SYSTIM_OVERFLOW_PERIOD); @@ -190,8 +189,8 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = { .pps = 0, .adjfreq = e1000e_phc_adjfreq, .adjtime = e1000e_phc_adjtime, - .gettime = e1000e_phc_gettime, - .settime = e1000e_phc_settime, + .gettime64 = e1000e_phc_gettime, + .settime64 = e1000e_phc_settime, .enable = e1000e_phc_enable, }; @@ -221,7 +220,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - if ((hw->mac.type != e1000_pch_lpt) || + case e1000_pch_spt: + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; break; diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index ea235bbe50d3..85eefc4832ba 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,6 +38,7 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ @@ -67,6 +68,7 @@ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ @@ -121,6 +123,7 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 42eb4344a9dc..c8c8c5baefda 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -235,6 +235,9 @@ struct fm10k_vxlan_port { __be16 port; }; +/* one work queue for entire driver */ +extern struct workqueue_struct *fm10k_workqueue; + struct fm10k_intfc { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; @@ -266,7 +269,6 @@ struct fm10k_intfc { u64 tx_csum_errors; u64 alloc_failed; u64 rx_csum_errors; - u64 rx_errors; u64 tx_bytes_nic; u64 tx_packets_nic; @@ -439,6 +441,7 @@ extern char fm10k_driver_name[]; extern const char fm10k_driver_version[]; int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); +__be16 fm10k_tx_encap_offload(struct sk_buff *skb); netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring); void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); @@ -457,6 +460,9 @@ void fm10k_down(struct fm10k_intfc *interface); void fm10k_update_stats(struct fm10k_intfc *interface); void fm10k_service_event_schedule(struct fm10k_intfc *interface); void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); +#ifdef CONFIG_NET_POLL_CONTROLLER +void fm10k_netpoll(struct net_device *netdev); +#endif /* Netdev */ struct net_device *fm10k_alloc_netdev(void); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index bf19dccd4288..6cfae6ac04ea 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -398,7 +398,7 @@ static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw, /* Retrieve RX Owner Data */ id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx)); - /* Process RX Ring*/ + /* Process RX Ring */ do { rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx), &q->rx_drops); @@ -466,7 +466,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, * Function invalidates the index values for the queues so any updates that * may have happened are ignored and the base for the queue stats is reset. **/ - void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) { u32 i; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 212a92dad222..5c7a4d7662d8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -128,7 +128,7 @@ static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) * * Returns that we support only IEEE DCB for this interface **/ -static u8 fm10k_dcbnl_getdcbx(struct net_device *dev) +static u8 fm10k_dcbnl_getdcbx(struct net_device __always_unused *dev) { return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; } @@ -140,7 +140,7 @@ static u8 fm10k_dcbnl_getdcbx(struct net_device *dev) * * Returns error on attempt to enable anything but IEEE DCB for this interface **/ -static u8 fm10k_dcbnl_setdcbx(struct net_device *dev, u8 mode) +static u8 fm10k_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode) { return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 4327f86218b9..f45b4d71adb8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,14 +36,16 @@ static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos) return (*pos < ring->count) ? pos : NULL; } -static void *fm10k_dbg_desc_seq_next(struct seq_file *s, void *v, loff_t *pos) +static void *fm10k_dbg_desc_seq_next(struct seq_file *s, + void __always_unused *v, loff_t *pos) { struct fm10k_ring *ring = s->private; return (++(*pos) < ring->count) ? pos : NULL; } -static void fm10k_dbg_desc_seq_stop(struct seq_file *s, void *v) +static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, + __always_unused void *v) { /* Do nothing. */ } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 651f53bc7376..4b9d9f88af70 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -57,13 +57,12 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = { .stat_offset = offsetof(struct fm10k_intfc, _stat) \ } -static const struct fm10k_stats fm10k_gstrings_stats[] = { +static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("tx_restart_queue", restart_queue), FM10K_STAT("tx_busy", tx_busy), FM10K_STAT("tx_csum_errors", tx_csum_errors), FM10K_STAT("rx_alloc_failed", alloc_failed), FM10K_STAT("rx_csum_errors", rx_csum_errors), - FM10K_STAT("rx_errors", rx_errors), FM10K_STAT("tx_packets_nic", tx_packets_nic), FM10K_STAT("tx_bytes_nic", tx_bytes_nic), @@ -73,38 +72,42 @@ static const struct fm10k_stats fm10k_gstrings_stats[] = { FM10K_STAT("rx_overrun_pf", rx_overrun_pf), FM10K_STAT("rx_overrun_vf", rx_overrun_vf), - FM10K_STAT("timeout", stats.timeout.count), - FM10K_STAT("ur", stats.ur.count), - FM10K_STAT("ca", stats.ca.count), - FM10K_STAT("um", stats.um.count), - FM10K_STAT("xec", stats.xec.count), - FM10K_STAT("vlan_drop", stats.vlan_drop.count), - FM10K_STAT("loopback_drop", stats.loopback_drop.count), - FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), - FM10K_STAT("swapi_status", hw.swapi.status), FM10K_STAT("mac_rules_used", hw.swapi.mac.used), FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), FM10K_STAT("mbx_tx_busy", hw.mbx.tx_busy), - FM10K_STAT("mbx_tx_dropped", hw.mbx.tx_dropped), + FM10K_STAT("mbx_tx_oversized", hw.mbx.tx_dropped), FM10K_STAT("mbx_tx_messages", hw.mbx.tx_messages), FM10K_STAT("mbx_tx_dwords", hw.mbx.tx_dwords), FM10K_STAT("mbx_rx_messages", hw.mbx.rx_messages), FM10K_STAT("mbx_rx_dwords", hw.mbx.rx_dwords), FM10K_STAT("mbx_rx_parse_err", hw.mbx.rx_parse_err), + FM10K_STAT("tx_hang_count", tx_timeout_count), + FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), }; -#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_stats) +static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { + FM10K_STAT("timeout", stats.timeout.count), + FM10K_STAT("ur", stats.ur.count), + FM10K_STAT("ca", stats.ca.count), + FM10K_STAT("um", stats.um.count), + FM10K_STAT("xec", stats.xec.count), + FM10K_STAT("vlan_drop", stats.vlan_drop.count), + FM10K_STAT("loopback_drop", stats.loopback_drop.count), + FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), +}; + +#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) +#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) -#define FM10K_QUEUE_STATS_LEN \ - (MAX_QUEUES * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) +#define FM10K_QUEUE_STATS_LEN(_n) \ + ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) -#define FM10K_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ - FM10K_NETDEV_STATS_LEN + \ - FM10K_QUEUE_STATS_LEN) +#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ + FM10K_NETDEV_STATS_LEN) static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = { "Mailbox test (on/offline)" @@ -117,9 +120,9 @@ enum fm10k_self_test_types { FM10K_TEST_MAX = FM10K_TEST_LEN }; -static void fm10k_get_strings(struct net_device *dev, u32 stringset, - u8 *data) +static void fm10k_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct fm10k_intfc *interface = netdev_priv(dev); char *p = (char *)data; int i; @@ -135,12 +138,19 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, p += ETH_GSTRING_LEN; } for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_stats[i].stat_string, + memcpy(p, fm10k_gstrings_global_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < MAX_QUEUES; i++) { + if (interface->hw.mac.type != fm10k_mac_vf) + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); @@ -156,18 +166,28 @@ static void fm10k_get_strings(struct net_device *dev, u32 stringset, static int fm10k_get_sset_count(struct net_device *dev, int sset) { + struct fm10k_intfc *interface = netdev_priv(dev); + struct fm10k_hw *hw = &interface->hw; + int stats_len = FM10K_STATIC_STATS_LEN; + switch (sset) { case ETH_SS_TEST: return FM10K_TEST_LEN; case ETH_SS_STATS: - return FM10K_STATS_LEN; + stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues); + + if (hw->mac.type != fm10k_mac_vf) + stats_len += FM10K_PF_STATS_LEN; + + return stats_len; default: return -EOPNOTSUPP; } } static void fm10k_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats __always_unused *stats, + u64 *data) { const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); struct fm10k_intfc *interface = netdev_priv(netdev); @@ -184,12 +204,21 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, } for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - p = (char *)interface + fm10k_gstrings_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_stats[i].sizeof_stat == + p = (char *)interface + + fm10k_gstrings_global_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (i = 0; i < MAX_QUEUES; i++) { + if (interface->hw.mac.type != fm10k_mac_vf) + for (i = 0; i < FM10K_PF_STATS_LEN; i++) { + p = (char *)interface + + fm10k_gstrings_pf_stats[i].stat_offset; + *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; u64 *queue_stat; @@ -369,7 +398,7 @@ static void fm10k_get_drvinfo(struct net_device *dev, strncpy(info->bus_info, pci_name(interface->pdev), sizeof(info->bus_info) - 1); - info->n_stats = FM10K_STATS_LEN; + info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS); info->regdump_len = fm10k_get_regs_len(dev); } @@ -645,7 +674,7 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, } static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, - u32 *rule_locs) + u32 __always_unused *rule_locs) { struct fm10k_intfc *interface = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -851,7 +880,7 @@ static void fm10k_self_test(struct net_device *dev, eth_test->flags |= ETH_TEST_FL_FAILED; } -static u32 fm10k_get_reta_size(struct net_device *netdev) +static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; } @@ -911,7 +940,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) return 0; } -static u32 fm10k_get_rssrk_size(struct net_device *netdev) +static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev) { return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG; } @@ -1019,7 +1048,7 @@ static int fm10k_set_channels(struct net_device *dev, } static int fm10k_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct ethtool_ts_info *info) { struct fm10k_intfc *interface = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 060190864238..5b08e6284a3c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -47,7 +47,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) { struct fm10k_hw *hw = &interface->hw; struct fm10k_iov_data *iov_data; - s64 mbicr, vflre; + s64 vflre; int i; /* if there is no iov_data then there is no mailboxes to process */ @@ -63,7 +63,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) goto read_unlock; if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) - goto process_mbx; + goto read_unlock; /* read VFLRE to determine if any VFs have been reset */ do { @@ -86,32 +86,6 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) } } while (i != iov_data->num_vfs); -process_mbx: - /* read MBICR to determine which VFs require attention */ - mbicr = fm10k_read_reg(hw, FM10K_MBICR(1)); - mbicr <<= 32; - mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0)); - - i = iov_data->next_vf_mbx ? : iov_data->num_vfs; - - for (mbicr <<= 64 - i; i--; mbicr += mbicr) { - struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx; - - if (mbicr >= 0) - continue; - - if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) - break; - - mbx->ops.process(hw, mbx); - } - - if (i >= 0) { - iov_data->next_vf_mbx = i + 1; - } else if (iov_data->next_vf_mbx) { - iov_data->next_vf_mbx = 0; - goto process_mbx; - } read_unlock: rcu_read_unlock(); @@ -139,6 +113,13 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) /* lock the mailbox for transmit and receive */ fm10k_mbx_lock(interface); + /* Most VF messages sent to the PF cause the PF to respond by + * requesting from the SM mailbox. This means that too many VF + * messages processed at once could cause a mailbox timeout on the PF. + * To prevent this, store a pointer to the next VF mbx to process. Use + * that as the start of the loop so that we don't starve whichever VF + * got ignored on the previous run. + */ process_mbx: for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; @@ -155,10 +136,6 @@ process_mbx: mbx->ops.connect(hw, mbx); } - /* no work pending, then just continue */ - if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx)) - continue; - /* guarantee we have free space in the SM mailbox */ if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) break; @@ -167,6 +144,10 @@ process_mbx: mbx->ops.process(hw, mbx); } + /* if we stopped processing mailboxes early, update next_vf_mbx. + * Otherwise, reset next_vf_mbx, and restart loop so that we process + * the remaining mailboxes we skipped at the start. + */ if (i >= 0) { iov_data->next_vf_mbx = i + 1; } else if (iov_data->next_vf_mbx) { @@ -275,7 +256,7 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) if (vf_idx >= iov_data->num_vfs) return FM10K_ERR_PARAM; - /* determine if an update has occured and if so notify the VF */ + /* determine if an update has occurred and if so notify the VF */ vf_info = &iov_data->vf_info[vf_idx]; if (vf_info->sw_vid != pvid) { vf_info->sw_vid = pvid; @@ -488,8 +469,8 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, return 0; } -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused, - int rate) +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused unused, int rate) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 84ab9eea2768..1b0661e3573b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.12.2-k" +#define DRV_VERSION "0.15.2-k" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = @@ -41,6 +41,9 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +/* single workqueue for entire fm10k driver */ +struct workqueue_struct *fm10k_workqueue = NULL; + /** * fm10k_init_module - Driver Registration Routine * @@ -52,6 +55,10 @@ static int __init fm10k_init_module(void) pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); pr_info("%s\n", fm10k_copyright); + /* create driver workqueue */ + if (!fm10k_workqueue) + fm10k_workqueue = create_workqueue("fm10k"); + fm10k_dbg_init(); return fm10k_register_pci_driver(); @@ -69,6 +76,11 @@ static void __exit fm10k_exit_module(void) fm10k_unregister_pci_driver(); fm10k_dbg_exit(); + + /* destroy driver workqueue */ + flush_workqueue(fm10k_workqueue); + destroy_workqueue(fm10k_workqueue); + fm10k_workqueue = NULL; } module_exit(fm10k_exit_module); @@ -209,7 +221,7 @@ static inline bool fm10k_page_is_reserved(struct page *page) static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, struct page *page, - unsigned int truesize) + unsigned int __maybe_unused truesize) { /* avoid re-using remote pages */ if (unlikely(fm10k_page_is_reserved(page))) @@ -240,7 +252,6 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, /** * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into @@ -253,8 +264,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, * The function will then update the page offset if necessary and return * true if the buffer can be reused by the interface. **/ -static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, - struct fm10k_rx_buffer *rx_buffer, +static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, union fm10k_rx_desc *rx_desc, struct sk_buff *skb) { @@ -330,7 +340,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, DMA_FROM_DEVICE); /* pull page into skb */ - if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) { /* hand second half of page back to the ring */ fm10k_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -412,7 +422,7 @@ static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, } static void fm10k_type_trans(struct fm10k_ring *rx_ring, - union fm10k_rx_desc *rx_desc, + union fm10k_rx_desc __maybe_unused *rx_desc, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; @@ -509,8 +519,6 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, /** * fm10k_pull_tail - fm10k specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being adjusted * * This function is an fm10k specific version of __pskb_pull_tail. The @@ -520,9 +528,7 @@ static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring, * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. */ -static void fm10k_pull_tail(struct fm10k_ring *rx_ring, - union fm10k_rx_desc *rx_desc, - struct sk_buff *skb) +static void fm10k_pull_tail(struct sk_buff *skb) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; @@ -576,7 +582,7 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring, /* place header in linear portion of buffer */ if (skb_is_nonlinear(skb)) - fm10k_pull_tail(rx_ring, rx_desc, skb); + fm10k_pull_tail(skb); /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) @@ -711,10 +717,6 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS) return NULL; - /* verify protocol is transparent Ethernet bridging */ - if (nvgre_hdr->proto != htons(ETH_P_TEB)) - return NULL; - /* report start of ethernet header */ if (nvgre_hdr->flags & NVGRE_TNI) return (struct ethhdr *)(nvgre_hdr + 1); @@ -722,15 +724,13 @@ static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb) return (struct ethhdr *)(&nvgre_hdr->tni); } -static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) +__be16 fm10k_tx_encap_offload(struct sk_buff *skb) { + u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen; struct ethhdr *eth_hdr; - u8 l4_hdr = 0; -/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */ -#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164 - if (skb_inner_transport_header(skb) - skb_mac_header(skb) > - FM10K_MAX_ENCAP_TRANSPORT_OFFSET) + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) return 0; switch (vlan_get_protocol(skb)) { @@ -760,12 +760,33 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) switch (eth_hdr->h_proto) { case htons(ETH_P_IP): + inner_l4_hdr = inner_ip_hdr(skb)->protocol; + break; case htons(ETH_P_IPV6): + inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr; + break; + default: + return 0; + } + + switch (inner_l4_hdr) { + case IPPROTO_TCP: + inner_l4_hlen = inner_tcp_hdrlen(skb); + break; + case IPPROTO_UDP: + inner_l4_hlen = 8; break; default: return 0; } + /* The hardware allows tunnel offloads only if the combined inner and + * outer header is 184 bytes or less + */ + if (skb_inner_transport_header(skb) + inner_l4_hlen - + skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH) + return 0; + return eth_hdr->h_proto; } @@ -934,10 +955,10 @@ static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ smp_mb(); - /* We need to check again in a case another CPU has just - * made room available. */ + /* Check again in a case another CPU has just made room available */ if (likely(fm10k_desc_unused(tx_ring) < size)) return -EBUSY; @@ -1182,7 +1203,6 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) { /* Do the reset outside of interrupt context */ if (!test_bit(__FM10K_DOWN, &interface->state)) { - netdev_err(interface->netdev, "Reset interface\n"); interface->tx_timeout_count++; interface->flags |= FM10K_FLAG_RESET_REQUESTED; fm10k_service_event_schedule(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 9f5457c9e627..1b2738380518 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -72,7 +72,7 @@ static bool fm10k_fifo_empty(struct fm10k_mbx_fifo *fifo) * @fifo: pointer to FIFO * @offset: offset to add to head * - * This function returns the indicies into the fifo based on head + offset + * This function returns the indices into the fifo based on head + offset **/ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -84,7 +84,7 @@ static u16 fm10k_fifo_head_offset(struct fm10k_mbx_fifo *fifo, u16 offset) * @fifo: pointer to FIFO * @offset: offset to add to tail * - * This function returns the indicies into the fifo based on tail + offset + * This function returns the indices into the fifo based on tail + offset **/ static u16 fm10k_fifo_tail_offset(struct fm10k_mbx_fifo *fifo, u16 offset) { @@ -126,6 +126,18 @@ static u16 fm10k_fifo_head_drop(struct fm10k_mbx_fifo *fifo) } /** + * fm10k_fifo_drop_all - Drop all messages in FIFO + * @fifo: pointer to FIFO + * + * This function resets the head pointer to drop all messages in the FIFO, + * and ensure the FIFO is empty. + **/ +static void fm10k_fifo_drop_all(struct fm10k_mbx_fifo *fifo) +{ + fifo->head = fifo->tail; +} + +/** * fm10k_mbx_index_len - Convert a head/tail index into a length value * @mbx: pointer to mailbox * @head: head index @@ -315,7 +327,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) } while (total_len < len); /* message extends out of pushed section, but fits in FIFO */ - if ((len < total_len) && (msg_len <= mbx->rx.size)) + if ((len < total_len) && (msg_len <= mbx->max_size)) return 0; /* return length of invalid section */ @@ -326,8 +338,7 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len) * fm10k_mbx_write_copy - pulls data off of Tx FIFO and places it in mbmem * @mbx: pointer to mailbox * - * This function will take a seciton of the Rx FIFO and copy it into the - mbx->tail--; + * This function will take a section of the Tx FIFO and copy it into the * mailbox memory. The offset in mbmem is based on the lower bits of the * tail and len determines the length to copy. **/ @@ -418,7 +429,7 @@ static void fm10k_mbx_pull_head(struct fm10k_hw *hw, * @hw: pointer to hardware structure * @mbx: pointer to mailbox * - * This function will take a seciton of the mailbox memory and copy it + * This function will take a section of the mailbox memory and copy it * into the Rx FIFO. The offset is based on the lower bits of the * head and len determines the length to copy. **/ @@ -464,7 +475,7 @@ static void fm10k_mbx_read_copy(struct fm10k_hw *hw, * @tail: tail index of message * * This function will first validate the tail index and size for the - * incoming message. It then updates the acknowlegment number and + * incoming message. It then updates the acknowledgment number and * copies the data into the FIFO. It will return the number of messages * dequeued on success and a negative value on error. **/ @@ -761,7 +772,7 @@ static s32 fm10k_mbx_enqueue_tx(struct fm10k_hw *hw, err = fm10k_fifo_enqueue(&mbx->tx, msg); } - /* if we failed trhead the error */ + /* if we failed treat the error */ if (err) { mbx->timeout = 0; mbx->tx_busy++; @@ -815,10 +826,10 @@ static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx) { u32 mbmem = mbx->mbmem_reg; - /* write new msg header to notify recepient of change */ + /* write new msg header to notify recipient of change */ fm10k_write_reg(hw, mbmem, mbx->mbx_hdr); - /* write mailbox to sent interrupt */ + /* write mailbox to send interrupt */ if (mbx->mbx_lock) fm10k_write_reg(hw, mbx->mbx_reg, mbx->mbx_lock); @@ -1052,8 +1063,11 @@ static void fm10k_mbx_reset_work(struct fm10k_mbx_info *mbx) * @mbx: pointer to mailbox * @size: new value for max_size * - * This function will update the max_size value and drop any outgoing messages - * from the head of the Tx FIFO that are larger than max_size. + * This function updates the max_size value and drops any outgoing messages + * at the head of the Tx FIFO if they are larger than max_size. It does not + * drop all messages, as this is too difficult to parse and remove them from + * the FIFO. Instead, rely on the checking to ensure that messages larger + * than max_size aren't pushed into the memory buffer. **/ static void fm10k_mbx_update_max_size(struct fm10k_mbx_info *mbx, u16 size) { @@ -1251,7 +1265,7 @@ static s32 fm10k_mbx_process_error(struct fm10k_hw *hw, /* we will need to pull all of the fields for verification */ head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); - /* we only have lower 10 bits of error number os add upper bits */ + /* we only have lower 10 bits of error number so add upper bits */ err_no = FM10K_MSG_HDR_FIELD_GET(*hdr, ERR_NO); err_no |= ~FM10K_MSG_HDR_MASK(ERR_NO); @@ -1371,9 +1385,11 @@ static void fm10k_mbx_disconnect(struct fm10k_hw *hw, timeout -= FM10K_MBX_POLL_DELAY; } while ((timeout > 0) && (mbx->state != FM10K_STATE_CLOSED)); - /* in case we didn't close just force the mailbox into shutdown */ + /* in case we didn't close, just force the mailbox into shutdown and + * drop all left over messages in the FIFO. + */ fm10k_mbx_connect_reset(mbx); - fm10k_mbx_update_max_size(mbx, 0); + fm10k_fifo_drop_all(&mbx->tx); fm10k_write_reg(hw, mbx->mbmem_reg, 0); } @@ -1548,7 +1564,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, mbx->timeout = 0; mbx->udelay = FM10K_MBX_INIT_DELAY; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; @@ -1627,7 +1643,7 @@ static void fm10k_sm_mbx_connect_reset(struct fm10k_mbx_info *mbx) mbx->local = FM10K_SM_MBX_VERSION; mbx->remote = 0; - /* initalize tail and head */ + /* initialize tail and head */ mbx->tail = 1; mbx->head = 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index cfde8bac1aeb..2f4f41b7eae7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -356,7 +356,7 @@ static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface) * fm10k_request_glort_range - Request GLORTs for use in configuring rules * @interface: board private structure * - * This function allocates a range of glorts for this inteface to use. + * This function allocates a range of glorts for this interface to use. **/ static void fm10k_request_glort_range(struct fm10k_intfc *interface) { @@ -770,18 +770,18 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (hw->mac.vlan_override) return -EACCES; - /* if default VLAN is already present do nothing */ - if (vid == hw->mac.default_vid) - return -EBUSY; - /* update active_vlans bitmask */ set_bit(vid, interface->active_vlans); if (!set) clear_bit(vid, interface->active_vlans); + /* if default VLAN is already present do nothing */ + if (vid == hw->mac.default_vid) + return 0; + fm10k_mbx_lock(interface); - /* only need to update the VLAN if not in promiscous mode */ + /* only need to update the VLAN if not in promiscuous mode */ if (!(netdev->flags & IFF_PROMISC)) { err = hw->mac.ops.update_vlan(hw, vid, 0, set); if (err) @@ -970,14 +970,7 @@ static void fm10k_set_rx_mode(struct net_device *dev) fm10k_mbx_lock(interface); - /* syncronize all of the addresses */ - if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { - __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); - if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) - __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); - } - - /* if we aren't changing modes there is nothing to do */ + /* update xcast mode first, but only if it changed */ if (interface->xcast_mode != xcast_mode) { /* update VLAN table */ if (xcast_mode == FM10K_XCAST_MODE_PROMISC) @@ -992,6 +985,13 @@ static void fm10k_set_rx_mode(struct net_device *dev) interface->xcast_mode = xcast_mode; } + /* synchronize all of the addresses */ + if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { + __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); + if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) + __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); + } + fm10k_mbx_unlock(interface); } @@ -1051,16 +1051,16 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) vid, true, 0); } - /* syncronize all of the addresses */ + /* update xcast mode before syncronizing addresses */ + hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); + + /* synchronize all of the addresses */ if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); } - /* update xcast mode */ - hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); - fm10k_mbx_unlock(interface); /* record updated xcast mode state */ @@ -1126,7 +1126,7 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev, } for (i = 0; i < interface->num_tx_queues; i++) { - ring = ACCESS_ONCE(interface->rx_ring[i]); + ring = ACCESS_ONCE(interface->tx_ring[i]); if (!ring) continue; @@ -1350,6 +1350,16 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) } } +static netdev_features_t fm10k_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) + return features; + + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); +} + static const struct net_device_ops fm10k_netdev_ops = { .ndo_open = fm10k_open, .ndo_stop = fm10k_close, @@ -1372,6 +1382,10 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_do_ioctl = fm10k_ioctl, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fm10k_netpoll, +#endif + .ndo_features_check = fm10k_features_check, }; #define DEFAULT_DEBUG_LEVEL_SHIFT 3 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4f5892cc32d7..df9fda38bdd1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -94,7 +94,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface) { if (!test_bit(__FM10K_SERVICE_DISABLE, &interface->state) && !test_and_set_bit(__FM10K_SERVICE_SCHED, &interface->state)) - schedule_work(&interface->service_task); + queue_work(fm10k_workqueue, &interface->service_task); } static void fm10k_service_event_complete(struct fm10k_intfc *interface) @@ -191,7 +191,6 @@ static void fm10k_reset_subtask(struct fm10k_intfc *interface) interface->flags &= ~FM10K_FLAG_RESET_REQUESTED; netdev_err(interface->netdev, "Reset interface\n"); - interface->tx_timeout_count++; fm10k_reinit(interface); } @@ -357,11 +356,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface) net_stats->rx_packets = pkts; interface->alloc_failed = alloc_failed; interface->rx_csum_errors = rx_csum_errors; - interface->rx_errors = rx_errors; hw->mac.ops.update_hw_stats(hw, &interface->stats); - for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) { + for (i = 0; i < hw->mac.max_queues; i++) { struct fm10k_hw_stats_q *q = &interface->stats.q[i]; tx_bytes_nic += q->tx_bytes.count; @@ -378,7 +376,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface) interface->rx_drops_nic = rx_drops_nic; /* Fill out the OS statistics structure */ - net_stats->rx_errors = interface->stats.xec.count; + net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; } @@ -648,7 +646,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, /* Configure the Rx buffer size for one buff without split */ srrctl |= FM10K_RX_BUFSZ >> FM10K_SRRCTL_BSIZEPKT_SHIFT; - /* Configure the Rx ring to supress loopback packets */ + /* Configure the Rx ring to suppress loopback packets */ srrctl |= FM10K_SRRCTL_LOOPBACK_SUPPRESS; fm10k_write_reg(hw, FM10K_SRRCTL(reg_idx), srrctl); @@ -808,7 +806,7 @@ static void fm10k_napi_enable_all(struct fm10k_intfc *interface) } } -static irqreturn_t fm10k_msix_clean_rings(int irq, void *data) +static irqreturn_t fm10k_msix_clean_rings(int __always_unused irq, void *data) { struct fm10k_q_vector *q_vector = data; @@ -818,7 +816,7 @@ static irqreturn_t fm10k_msix_clean_rings(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data) +static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) { struct fm10k_intfc *interface = data; struct fm10k_hw *hw = &interface->hw; @@ -840,6 +838,28 @@ static irqreturn_t fm10k_msix_mbx_vf(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +/** + * fm10k_netpoll - A Polling 'interrupt' handler + * @netdev: network interface device structure + * + * This is used by netconsole to send skbs without having to re-enable + * interrupts. It's not called while the normal interrupt routine is executing. + **/ +void fm10k_netpoll(struct net_device *netdev) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__FM10K_DOWN, &interface->state)) + return; + + for (i = 0; i < interface->num_q_vectors; i++) + fm10k_msix_clean_rings(0, interface->q_vector[i]); +} + +#endif #define FM10K_ERR_MSG(type) case (type): error = #type; break static void fm10k_print_fault(struct fm10k_intfc *interface, int type, struct fm10k_fault *fault) @@ -964,7 +984,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) } } -static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) +static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) { struct fm10k_intfc *interface = data; struct fm10k_hw *hw = &interface->hw; @@ -986,6 +1006,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) /* service mailboxes */ if (fm10k_mbx_trylock(interface)) { mbx->ops.process(hw, mbx); + /* handle VFLRE events */ fm10k_iov_event(interface); fm10k_mbx_unlock(interface); } @@ -1002,6 +1023,8 @@ static irqreturn_t fm10k_msix_mbx_pf(int irq, void *data) /* we should validate host state after interrupt event */ hw->mac.get_host_state = 1; + + /* validate host state, and handle VF mailboxes in the service task */ fm10k_service_event_schedule(interface); /* re-enable mailbox interrupt and indicate 20us delay */ @@ -1069,7 +1092,7 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, } static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_intfc *interface; u64 timestamp; @@ -1089,7 +1112,7 @@ static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, /* generic error handler for mailbox issues */ static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_intfc *interface; struct pci_dev *pdev; @@ -1165,7 +1188,7 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, } static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_intfc *interface; u16 glort, pvid; @@ -1206,7 +1229,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, } static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info *mbx) + struct fm10k_mbx_info __always_unused *mbx) { struct fm10k_swapi_1588_timestamp timestamp; struct fm10k_iov_data *iov_data; @@ -1488,7 +1511,7 @@ void fm10k_up(struct fm10k_intfc *interface) /* enable transmits */ netif_tx_start_all_queues(interface->netdev); - /* kick off the service timer */ + /* kick off the service timer now */ hw->mac.get_host_state = 1; mod_timer(&interface->service_timer, jiffies); } @@ -1528,8 +1551,6 @@ void fm10k_down(struct fm10k_intfc *interface) /* disable polling routines */ fm10k_napi_disable_all(interface); - del_timer_sync(&interface->service_timer); - /* capture stats one last time before stopping interface */ fm10k_update_stats(interface); @@ -1655,6 +1676,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, (unsigned long)interface); INIT_WORK(&interface->service_task, fm10k_service_task); + /* kick off service timer now, even when interface is down */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); + /* Intitialize timestamp data */ fm10k_ts_init(interface); @@ -1871,6 +1895,8 @@ static void fm10k_remove(struct pci_dev *pdev) struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; + del_timer_sync(&interface->service_timer); + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); cancel_work_sync(&interface->service_task); @@ -1984,7 +2010,8 @@ static int fm10k_resume(struct pci_dev *pdev) * a sleep state. The fm10k hardware does not support wake on lan so the * driver simply needs to shut down the device so it is in a low power state. **/ -static int fm10k_suspend(struct pci_dev *pdev, pm_message_t state) +static int fm10k_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7e4711958e46..891e21874b2a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -234,8 +234,7 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) vid = (vid << 17) >> 17; /* verify the reserved 0 fields are 0 */ - if (len >= FM10K_VLAN_TABLE_VID_MAX || - vid >= FM10K_VLAN_TABLE_VID_MAX) + if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* Loop through the table updating all required VLANs */ @@ -312,7 +311,7 @@ bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort) } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_xc_addr_pf - Update device addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -330,6 +329,9 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, struct fm10k_mac_update mac_update; u32 msg[5]; + /* clear set bit from VLAN ID */ + vid &= ~FM10K_VLAN_CLEAR; + /* if glort or vlan are not valid return error */ if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; @@ -356,7 +358,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_uc_addr_pf - Update device unicast addresss + * fm10k_update_uc_addr_pf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: base resource tag for this request * @mac: MAC address to add/remove from table @@ -454,7 +456,7 @@ static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw) break; } - /* always reset VFITR2[0] to point to last enabled PF vector*/ + /* always reset VFITR2[0] to point to last enabled PF vector */ fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i); /* reset ITR2[0] to point to last enabled PF vector */ @@ -677,7 +679,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, /* loop through unallocated rings assigning them back to PF */ for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) { fm10k_write_reg(hw, FM10K_TXDCTL(i), 0); - fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | vid); + fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF | + FM10K_TXQCTL_UNLIMITED_BW | vid); fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF); } @@ -812,7 +815,7 @@ static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx) if (vf_idx >= hw->iov.num_vfs) return FM10K_ERR_PARAM; - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -951,7 +954,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, if (vf_info->mbx.ops.disconnect) vf_info->mbx.ops.disconnect(hw, &vf_info->mbx); - /* determine vector offset and count*/ + /* determine vector offset and count */ vf_v_idx = fm10k_vf_vector_index(hw, vf_idx); vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw); @@ -1035,7 +1038,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, ((u32)vf_info->mac[2]); } - /* map queue pairs back to VF from last to first*/ + /* map queue pairs back to VF from last to first */ for (i = queues_per_pool; i--;) { fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal); fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah); @@ -1141,7 +1144,7 @@ static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, * * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1160,7 +1163,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, * * This function is a default handler for MAC/VLAN requests from the VF. * The assumption is that in this case it is acceptable to just directly - * hand off the message form the VF to the underlying shared code. + * hand off the message from the VF to the underlying shared code. **/ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) @@ -1250,8 +1253,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, } /* notify switch of request for new multicast address */ - err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, - !(vlan & FM10K_VLAN_CLEAR), 0); + err = hw->mac.ops.update_mc_addr(hw, vf_info->glort, mac, vlan, + !(vlan & FM10K_VLAN_CLEAR)); } return err; @@ -1404,7 +1407,7 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw, &stats->vlan_drop); loopback_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_LOOPBACK_DROP, - &stats->loopback_drop); + &stats->loopback_drop); nodesc_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_NODESC_DROP, &stats->nodesc_drop); @@ -1573,7 +1576,7 @@ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) s32 ret_val = 0; u32 dma_ctrl2; - /* verify the switch is ready for interraction */ + /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) goto out; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c index d966044e017a..9043633c3e50 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -285,7 +285,7 @@ static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct fm10k_intfc *interface; unsigned long flags; @@ -297,17 +297,17 @@ static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) now = fm10k_systime_read(interface) + interface->ptp_adjust; read_unlock_irqrestore(&interface->systime_lock, flags); - *ts = ns_to_timespec(now); + *ts = ns_to_timespec64(now); return 0; } static int fm10k_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct fm10k_intfc *interface; unsigned long flags; - u64 ns = timespec_to_ns(ts); + u64 ns = timespec64_to_ns(ts); interface = container_of(ptp, struct fm10k_intfc, ptp_caps); @@ -319,7 +319,8 @@ static int fm10k_ptp_settime(struct ptp_clock_info *ptp, } static int fm10k_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) + struct ptp_clock_request *rq, + int __always_unused on) { struct ptp_clock_time *t = &rq->perout.period; struct fm10k_intfc *interface; @@ -419,8 +420,8 @@ void fm10k_ptp_register(struct fm10k_intfc *interface) ptp_caps->max_adj = 976562; ptp_caps->adjfreq = fm10k_ptp_adjfreq; ptp_caps->adjtime = fm10k_ptp_adjtime; - ptp_caps->gettime = fm10k_ptp_gettime; - ptp_caps->settime = fm10k_ptp_settime; + ptp_caps->gettime64 = fm10k_ptp_gettime; + ptp_caps->settime64 = fm10k_ptp_settime; /* provide pins if BAR4 is accessible */ if (interface->sw_addr) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index fd0a05f011a8..9b29d7b0377a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -710,7 +710,7 @@ void fm10k_tlv_msg_test_create(u32 *msg, u32 attr_flags) /** * fm10k_tlv_msg_test - Validate all results on test message receive * @hw: Pointer to hardware structure - * @results: Pointer array to attributes in the mesage + * @results: Pointer array to attributes in the message * @mbx: Pointer to mailbox information structure * * This function does a check to verify all attributes match what the test diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 7c6d9d5a8ae5..4af96686c584 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -356,6 +356,9 @@ struct fm10k_hw; #define FM10K_QUEUE_DISABLE_TIMEOUT 100 #define FM10K_RESET_TIMEOUT 150 +/* Maximum supported combined inner and outer header length for encapsulation */ +#define FM10K_TUNNEL_HEADER_LENGTH 184 + /* VF registers */ #define FM10K_VFCTRL 0x00000 #define FM10K_VFCTRL_RST 0x00000008 @@ -593,7 +596,7 @@ struct fm10k_vf_info { u16 sw_vid; /* Switch API assigned VLAN */ u16 pf_vid; /* PF assigned Default VLAN */ u8 mac[ETH_ALEN]; /* PF Default MAC address */ - u8 vsi; /* VSI idenfifier */ + u8 vsi; /* VSI identifier */ u8 vf_idx; /* which VF this is */ u8 vf_flags; /* flags indicating what modes * are supported for the port diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f0aa0f97b4a9..94f0f6a146d9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ /* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -37,7 +37,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) if (err) return err; - /* If permenant address is set then we need to restore it */ + /* If permanent address is set then we need to restore it */ if (is_valid_ether_addr(perm_addr)) { bal = (((u32)perm_addr[3]) << 24) | (((u32)perm_addr[4]) << 16) | @@ -65,7 +65,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) * fm10k_reset_hw_vf - VF hardware reset * @hw: pointer to hardware structure * - * This function should return the hardare to a state similar to the + * This function should return the hardware to a state similar to the * one it is in after just being initialized. **/ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) @@ -124,6 +124,10 @@ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) /* record maximum queue count */ hw->mac.max_queues = i; + /* fetch default VLAN */ + hw->mac.default_vid = (fm10k_read_reg(hw, FM10K_TXQCTL(0)) & + FM10K_TXQCTL_VID_MASK) >> FM10K_TXQCTL_VID_SHIFT; + return 0; } @@ -252,7 +256,7 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) } /** - * fm10k_update_uc_addr_vf - Update device unicast address + * fm10k_update_uc_addr_vf - Update device unicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -282,7 +286,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -295,7 +299,7 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, } /** - * fm10k_update_mc_addr_vf - Update device multicast address + * fm10k_update_mc_addr_vf - Update device multicast addresses * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table @@ -319,7 +323,7 @@ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, if (!is_multicast_ether_addr(mac)) return FM10K_ERR_PARAM; - /* add bit to notify us if this is a set of clear operation */ + /* add bit to notify us if this is a set or clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; @@ -515,7 +519,7 @@ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) * @hw: pointer to the hardware structure * * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanosecods. In order to guarantee the value is accurate + * value measured in nanoseconds. In order to guarantee the value is accurate * we check the 32 most significant bits both before and after reading the * 32 least significant bits to verify they didn't change as we were reading * the registers. diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index c40581999121..b4729ba57c9c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 - 2014 Intel Corporation. +# Copyright(c) 2013 - 2015 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2b65cdcad6ba..33c35d3b7420 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -36,6 +36,7 @@ #include <linux/aer.h> #include <linux/netdevice.h> #include <linux/ioport.h> +#include <linux/iommu.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> @@ -49,6 +50,7 @@ #include <net/ip6_checksum.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> @@ -70,6 +72,7 @@ #define I40E_MAX_NUM_DESCRIPTORS 4096 #define I40E_MAX_REGISTER 0x800000 +#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024) #define I40E_DEFAULT_NUM_DESCRIPTORS 512 #define I40E_REQ_DESCRIPTOR_MULTIPLE 32 #define I40E_MIN_NUM_DESCRIPTORS 64 @@ -94,6 +97,9 @@ #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 9) +/* Ethtool Private Flags */ +#define I40E_PRIV_FLAGS_NPAR_FLAG (1 << 0) + #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) #define I40E_NVM_VERSION_HI_SHIFT 12 @@ -140,6 +146,7 @@ enum i40e_state_t { __I40E_CORE_RESET_REQUESTED, __I40E_GLOBAL_RESET_REQUESTED, __I40E_EMP_RESET_REQUESTED, + __I40E_EMP_RESET_INTR_RECEIVED, __I40E_FILTER_OVERFLOW_PROMISC, __I40E_SUSPENDED, __I40E_PTP_TX_IN_PROGRESS, @@ -168,6 +175,9 @@ struct i40e_lump_tracking { #define I40E_FDIR_MAX_RAW_PACKET_SIZE 512 #define I40E_FDIR_BUFFER_FULL_MARGIN 10 #define I40E_FDIR_BUFFER_HEAD_ROOM 32 +#define I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR (I40E_FDIR_BUFFER_HEAD_ROOM * 4) + +#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -232,17 +242,17 @@ struct i40e_pf { bool fc_autoneg_status; u16 eeprom_version; - u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */ + u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ - u16 num_req_vfs; /* num vfs requested for this vf */ - u16 num_vf_qps; /* num queue pairs per vf */ + u16 num_req_vfs; /* num VFs requested for this VF */ + u16 num_vf_qps; /* num queue pairs per VF */ #ifdef I40E_FCOE - u16 num_fcoe_qps; /* num fcoe queues this pf has set up */ + u16 num_fcoe_qps; /* num fcoe queues this PF has set up */ u16 num_fcoe_msix; /* num queue vectors per fcoe pool */ #endif /* I40E_FCOE */ - u16 num_lan_qps; /* num lan queues this pf has set up */ - u16 num_lan_msix; /* num queue vectors for the base pf vsi */ + u16 num_lan_qps; /* num lan queues this PF has set up */ + u16 num_lan_msix; /* num queue vectors for the base PF vsi */ int queues_left; /* queues left unclaimed */ u16 rss_size; /* num queues in the RSS array */ u16 rss_size_max; /* HW defined max RSS queues */ @@ -269,7 +279,7 @@ struct i40e_pf { enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; - u16 msg_enable; + u32 msg_enable; char int_name[I40E_INT_NAME_STR_LEN]; u16 adminq_work_limit; /* num of admin receive queue desc to process */ unsigned long service_timer_period; @@ -383,6 +393,9 @@ struct i40e_pf { bool ptp_tx; bool ptp_rx; u16 rss_table_size; + /* These are only valid in NPAR modes */ + u32 npar_max_bw; + u32 npar_min_bw; }; struct i40e_mac_filter { @@ -405,6 +418,7 @@ struct i40e_veb { u16 uplink_seid; u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; + u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ u16 flags; u16 bw_limit; u8 bw_max_quanta; @@ -461,6 +475,9 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; + u16 rss_table_size; + u16 rss_size; + u16 max_frame; u16 rx_hdr_len; u16 rx_buf_len; @@ -478,6 +495,7 @@ struct i40e_vsi { u16 base_queue; /* vsi's first queue in hw array */ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */ + u16 req_queue_pairs; /* User requested queue pairs */ u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ @@ -504,6 +522,9 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* current rxnfc data */ + struct ethtool_rxnfc rxnfc; /* current rss hash opts */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -544,14 +565,14 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw) static char buf[32]; snprintf(buf, sizeof(buf), - "f%d.%d a%d.%d n%02x.%02x e%08x", - hw->aq.fw_maj_ver, hw->aq.fw_min_ver, + "f%d.%d.%05d a%d.%d n%x.%02x e%x", + hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> I40E_NVM_VERSION_HI_SHIFT, (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> I40E_NVM_VERSION_LO_SHIFT, - hw->nvm.eetrack); + (hw->nvm.eetrack & 0xffffff)); return buf; } @@ -593,7 +614,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) /** * i40e_get_fd_cnt_all - get the total FD filter space available - * @pf: pointer to the pf struct + * @pf: pointer to the PF struct **/ static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) { @@ -607,6 +628,7 @@ extern const char i40e_driver_name[]; extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags); +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); @@ -618,9 +640,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, int i40e_add_del_fdir(struct i40e_vsi *vsi, struct i40e_fdir_filter *input, bool add); void i40e_fdir_check_and_reenable(struct i40e_pf *pf); -int i40e_get_current_fd_count(struct i40e_pf *pf); -int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); -int i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_current_fd_count(struct i40e_pf *pf); +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf); +u32 i40e_get_global_fd_count(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, @@ -680,6 +703,7 @@ int i40e_vlan_rx_add_vid(struct net_device *netdev, int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid); #endif +int i40e_open(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); @@ -690,7 +714,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE -int i40e_open(struct net_device *netdev); int i40e_close(struct net_device *netdev); int i40e_setup_tc(struct net_device *netdev, u8 tc); void i40e_netpoll(struct net_device *netdev); @@ -712,6 +735,7 @@ void i40e_fcoe_handle_status(struct i40e_ring *rx_ring, void i40e_vlan_stripping_enable(struct i40e_vsi *vsi); #ifdef CONFIG_I40E_DCB void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); void i40e_dcbnl_set_all(struct i40e_vsi *vsi); void i40e_dcbnl_setup(struct i40e_vsi *vsi); @@ -727,4 +751,8 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr); int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf); +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 77f6254a89ac..3e0d20037675 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -592,6 +592,7 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) ret_code = i40e_aq_get_firmware_version(hw, &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver, + &hw->aq.fw_build, &hw->aq.api_maj_ver, &hw->aq.api_min_ver, NULL); @@ -605,7 +606,8 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_free_arq; /* get the NVM version info */ - i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version); + i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION, + &hw->nvm.version); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo); i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index de17b6fbcc4e..28e519a50de4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6aea65dae5ed..d596f6624025 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -51,6 +51,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -85,46 +86,53 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -534,7 +542,6 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure @@ -685,7 +692,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) /** * i40e_pre_tx_queue_cfg - pre tx queue configure * @hw: pointer to the HW structure - * @queue: target pf queue index + * @queue: target PF queue index * @enable: state change request * * Handles hw requirement to indicate intention to enable @@ -827,12 +834,15 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_40GBASE_AOC: + case I40E_PHY_TYPE_10GBASE_AOC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_20GBASE_KR2: media = I40E_MEDIA_TYPE_BACKPLANE; break; case I40E_PHY_TYPE_SGMII: @@ -849,7 +859,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) } #define I40E_PF_RESET_WAIT_COUNT_A0 200 -#define I40E_PF_RESET_WAIT_COUNT 110 +#define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -947,7 +957,7 @@ void i40e_clear_hw(struct i40e_hw *hw) u32 val; u32 eol = 0x7ff; - /* get number of interrupts, queues, and vfs */ + /* get number of interrupts, queues, and VFs */ val = rd32(hw, I40E_GLPCI_CNF2); num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; @@ -1076,8 +1086,11 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) return gpio_val; } -#define I40E_LED0 22 +#define I40E_COMBINED_ACTIVITY 0xA +#define I40E_FILTER_ACTIVITY 0xE #define I40E_LINK_ACTIVITY 0xC +#define I40E_MAC_ACTIVITY 0xD +#define I40E_LED0 22 /** * i40e_led_get - return current on/off mode @@ -1090,6 +1103,7 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) **/ u32 i40e_led_get(struct i40e_hw *hw) { + u32 current_mode = 0; u32 mode = 0; int i; @@ -1102,6 +1116,20 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; break; @@ -1121,6 +1149,7 @@ u32 i40e_led_get(struct i40e_hw *hw) **/ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) { + u32 current_mode = 0; int i; if (mode & 0xfffffff0) @@ -1135,6 +1164,20 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) if (!gpio_val) continue; + /* ignore gpio LED src mode entries related to the activity + * LEDs + */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) + >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + switch (current_mode) { + case I40E_COMBINED_ACTIVITY: + case I40E_FILTER_ACTIVITY: + case I40E_MAC_ACTIVITY: + continue; + default: + break; + } + gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; /* this & is a bit of paranoia, but serves as a range check */ gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & @@ -1298,14 +1341,14 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; } /* Update the link info */ - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) { /* Wait a little bit (on 40G cards it sometimes takes a really * long time for link to come back from the atomic reset) * and try once more */ msleep(1000); - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); } if (status) *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; @@ -1441,6 +1484,10 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, else hw_link_info->lse_enable = false; + if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && + hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) + hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + /* save link status information */ if (link) *link = *hw_link_info; @@ -1453,35 +1500,6 @@ aq_get_link_info_exit: } /** - * i40e_update_link_info - * @hw: pointer to the hw struct - * @enable_lse: enable/disable LinkStatusEvent reporting - * - * Returns the link status of the adapter - **/ -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse) -{ - struct i40e_aq_get_phy_abilities_resp abilities; - i40e_status status; - - status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL); - if (status) - return status; - - status = i40e_aq_get_phy_capabilities(hw, false, false, - &abilities, NULL); - if (status) - return status; - - if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED) - hw->phy.link_info.an_enabled = true; - else - hw->phy.link_info.an_enabled = false; - - return status; -} - -/** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct * @mask: interrupt mask to be set @@ -1760,6 +1778,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * @hw: pointer to the hw struct * @fw_major_version: firmware major version * @fw_minor_version: firmware minor version + * @fw_build: firmware build number * @api_major_version: major queue version * @api_minor_version: minor queue version * @cmd_details: pointer to command details structure or NULL @@ -1768,6 +1787,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, **/ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details) { @@ -1781,13 +1801,15 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - if (fw_major_version != NULL) + if (fw_major_version) *fw_major_version = le16_to_cpu(resp->fw_major); - if (fw_minor_version != NULL) + if (fw_minor_version) *fw_minor_version = le16_to_cpu(resp->fw_minor); - if (api_major_version != NULL) + if (fw_build) + *fw_build = le32_to_cpu(resp->fw_build); + if (api_major_version) *api_major_version = le16_to_cpu(resp->api_major); - if (api_minor_version != NULL) + if (api_minor_version) *api_minor_version = le16_to_cpu(resp->api_minor); } @@ -1817,7 +1839,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI); + desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); cmd->driver_major_ver = dv->major_version; cmd->driver_minor_ver = dv->minor_version; cmd->driver_build_ver = dv->build_version; @@ -1997,7 +2019,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; - buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data); + buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); @@ -2039,7 +2061,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; - buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data); + buf_size = count * sizeof(*mv_list); /* prep the rest of the request */ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); @@ -2061,7 +2083,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure - * @vfid: vf id to send msg + * @vfid: VF id to send msg * @v_opcode: opcodes for VF-PF communication * @v_retval: return error code * @msg: pointer to the msg buffer @@ -2106,7 +2128,7 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, * Read the register using the admin queue commands **/ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, - u32 reg_addr, u64 *reg_val, + u32 reg_addr, u64 *reg_val, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2117,17 +2139,15 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, if (reg_val == NULL) return I40E_ERR_PARAM; - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_debug_read_reg); + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); cmd_resp->address = cpu_to_le32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) { - *reg_val = ((u64)cmd_resp->value_high << 32) | - (u64)cmd_resp->value_low; - *reg_val = le64_to_cpu(*reg_val); + *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | + (u64)le32_to_cpu(cmd_resp->value_low); } return status; @@ -3377,6 +3397,47 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } /** + * i40e_aq_alternate_read + * @hw: pointer to the hardware structure + * @reg_addr0: address of first dword to be read + * @reg_val0: pointer for data read from 'reg_addr0' + * @reg_addr1: address of second dword to be read + * @reg_val1: pointer for data read from 'reg_addr1' + * + * Read one or two dwords from alternate structure. Fields are indicated + * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer + * is not passed then only register at 'reg_addr0' is read. + * + **/ +static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, + u32 reg_addr0, u32 *reg_val0, + u32 reg_addr1, u32 *reg_val1) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_alternate_write *cmd_resp = + (struct i40e_aqc_alternate_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val0) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); + cmd_resp->address0 = cpu_to_le32(reg_addr0); + cmd_resp->address1 = cpu_to_le32(reg_addr1); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); + + if (!status) { + *reg_val0 = le32_to_cpu(cmd_resp->data0); + + if (reg_val1) + *reg_val1 = le32_to_cpu(cmd_resp->data1); + } + + return status; +} + +/** * i40e_aq_resume_port_tx * @hw: pointer to the hardware structure * @cmd_details: pointer to command details structure or NULL @@ -3440,3 +3501,79 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) break; } } + +/** + * i40e_read_bw_from_alt_ram + * @hw: pointer to the hardware structure + * @max_bw: pointer for max_bw read + * @min_bw: pointer for min_bw read + * @min_valid: pointer for bool that is true if min_bw is a valid value + * @max_valid: pointer for bool that is true if max_bw is a valid value + * + * Read bw from the alternate ram for the given pf + **/ +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, + bool *min_valid, bool *max_valid) +{ + i40e_status status; + u32 max_bw_addr, min_bw_addr; + + /* Calculate the address of the min/max bw registers */ + max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MAX_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + + I40E_ALT_STRUCT_MIN_BW_OFFSET + + (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); + + /* Read the bandwidths from alt ram */ + status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, + min_bw_addr, min_bw); + + if (*min_bw & I40E_ALT_BW_VALID_MASK) + *min_valid = true; + else + *min_valid = false; + + if (*max_bw & I40E_ALT_BW_VALID_MASK) + *max_valid = true; + else + *max_valid = false; + + return status; +} + +/** + * i40e_aq_configure_partition_bw + * @hw: pointer to the hardware structure + * @bw_data: Buffer holding valid pfs and bw limits + * @cmd_details: pointer to command details + * + * Configure partitions guaranteed/max bw + **/ +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details) +{ + i40e_status status; + struct i40e_aq_desc desc; + u16 bwd_size = sizeof(*bw_data); + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_configure_partition_bw); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + if (bwd_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + + desc.datalen = cpu_to_le16(bwd_size); + + status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, + cmd_details); + + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 3ce43588592d..6e1466756760 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -459,7 +459,7 @@ static void i40e_cee_to_dcb_v1_config( sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0; /* Add APPs if Error is False and Oper/Sync is True */ - if (!err && sync && oper) { + if (!err) { /* CEE operating configuration supports FCoE/iSCSI/FIP only */ dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index a11c70ca5a28..bd5079d5c1b6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -178,6 +178,10 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return; + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + dcbxcfg = &hw->local_dcbx_config; /* Set up all the App TLVs if DCBx is negotiated */ @@ -223,7 +227,7 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, /** * i40e_dcbnl_del_app - Delete APP on all VSIs - * @pf: the corresponding pf + * @pf: the corresponding PF * @app: APP to delete * * Delete given APP from all the VSIs for given PF @@ -268,23 +272,26 @@ static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg, /** * i40e_dcbnl_flush_apps - Delete all removed APPs - * @pf: the corresponding pf + * @pf: the corresponding PF + * @old_cfg: old DCBX configuration data * @new_cfg: new DCBX configuration data * * Find and delete all APPs that are not present in the passed * DCB configuration **/ void i40e_dcbnl_flush_apps(struct i40e_pf *pf, + struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg) { struct i40e_dcb_app_priority_table app; - struct i40e_dcbx_config *dcbxcfg; - struct i40e_hw *hw = &pf->hw; int i; - dcbxcfg = &hw->local_dcbx_config; - for (i = 0; i < dcbxcfg->numapps; i++) { - app = dcbxcfg->app[i]; + /* MFP mode but not an iSCSI PF so return */ + if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) + return; + + for (i = 0; i < old_cfg->numapps; i++) { + app = old_cfg->app[i]; /* The APP is not available anymore delete it */ if (!i40e_dcbnl_find_app(new_cfg, &app)) i40e_dcbnl_del_app(pf, &app); @@ -306,9 +313,7 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) return; - /* Do not setup DCB NL ops for MFP mode */ - if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - dev->dcbnl_ops = &dcbnl_ops; + dev->dcbnl_ops = &dcbnl_ops; /* Set initial IEEE DCB settings */ i40e_dcbnl_set_all(vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c17ee77100d3..daa88263af66 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -35,7 +35,7 @@ static struct dentry *i40e_dbg_root; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid - * @pf - the pf structure to search for the vsi + * @pf - the PF structure to search for the vsi * @seid - seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) @@ -54,7 +54,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) /** * i40e_dbg_find_veb - searches for the veb with the given seid - * @pf - the pf structure to search for the veb + * @pf - the PF structure to search for the veb * @seid - seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) @@ -112,7 +112,7 @@ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, /** * i40e_dbg_prep_dump_buf - * @pf: the pf we're working with + * @pf: the PF we're working with * @buflen: the desired buffer length * * Return positive if success, 0 if failed @@ -318,7 +318,7 @@ static const struct file_operations i40e_dbg_dump_fops = { * setup, adding or removing filters, or other things. Many of * these will be useful for some forms of unit testing. **************************************************************/ -static char i40e_dbg_command_buf[256] = "hello world"; +static char i40e_dbg_command_buf[256] = ""; /** * i40e_dbg_command_read - read for command datum @@ -390,6 +390,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", vsi->netdev_registered, vsi->current_netdev_flags, vsi->state, vsi->flags); + if (vsi == pf->vsi[pf->lan_vsi]) + dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + pf->hw.mac.addr, + pf->hw.mac.san_addr, + pf->hw.mac.port_addr); list_for_each_entry(f, &vsi->mac_filter_list, list) { dev_info(&pf->pdev->dev, " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", @@ -675,7 +680,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); if (vsi->back) - dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back); + dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); dev_info(&pf->pdev->dev, " tc_config: numtc = %d, enabled_tc = 0x%x\n", @@ -921,9 +926,10 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, - "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n", + "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, - veb->uplink_seid); + veb->uplink_seid, + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); i40e_dbg_dump_eth_stats(pf, &veb->stats); } @@ -945,7 +951,7 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) /** * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR - * @pf: the pf that would be altered + * @pf: the PF that would be altered * @flag: flag that needs enabling or disabling * @enable: Enable/disable FD SD/ATR **/ @@ -957,7 +963,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) pf->flags &= ~flag; pf->auto_disable_flags |= flag; } - dev_info(&pf->pdev->dev, "requesting a pf reset\n"); + dev_info(&pf->pdev->dev, "requesting a PF reset\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } @@ -1487,11 +1493,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); } - } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { - i40e_pf_reset_stats(pf); - dev_info(&pf->pdev->dev, "pf clear stats called\n"); + } else if (strncmp(&cmd_buf[12], "port", 4) == 0) { + if (pf->hw.partition_id == 1) { + i40e_pf_reset_stats(pf); + dev_info(&pf->pdev->dev, "port stats cleared\n"); + } else { + dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); + } } else { - dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); + dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; @@ -1897,7 +1907,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " read <reg>\n"); dev_info(&pf->pdev->dev, " write <reg> <value>\n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); - dev_info(&pf->pdev->dev, " clear_stats pf\n"); + dev_info(&pf->pdev->dev, " clear_stats port\n"); dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); @@ -1935,7 +1945,7 @@ static const struct file_operations i40e_dbg_command_fops = { * The netdev_ops entry in debugfs is for giving the driver commands * to be executed from the netdev operations. **************************************************************/ -static char i40e_dbg_netdev_ops_buf[256] = "hello world"; +static char i40e_dbg_netdev_ops_buf[256] = ""; /** * i40e_dbg_netdev_ops - read for netdev_ops datum @@ -2123,8 +2133,8 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = { }; /** - * i40e_dbg_pf_init - setup the debugfs directory for the pf - * @pf: the pf that is starting up + * i40e_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { @@ -2160,8 +2170,8 @@ create_failed: } /** - * i40e_dbg_pf_exit - clear out the pf's debugfs entries - * @pf: the pf that is stopping + * i40e_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping **/ void i40e_dbg_pf_exit(struct i40e_pf *pf) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b8230dc205ec..c848b1862512 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -113,7 +113,6 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), I40E_PF_STAT("tx_errors", stats.eth.tx_errors), I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), - I40E_PF_STAT("tx_dropped", stats.eth.tx_discards), I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), I40E_PF_STAT("crc_errors", stats.crc_errors), I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), @@ -218,6 +217,13 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { + "NPAR", +}; + +#define I40E_PRIV_FLAGS_STR_LEN \ + (sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN) + /** * i40e_partition_setting_complaint - generic complaint for MFP restriction * @pf: the PF struct @@ -229,73 +235,20 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) } /** - * i40e_get_settings - Get Link Speed and Duplex settings + * i40e_get_settings_link_up - Get the Link settings for when link is up + * @hw: hw structure + * @ecmd: ethtool command to fill in * @netdev: network interface device structure - * @ecmd: ethtool command * - * Reports speed/duplex settings based on media_type **/ -static int i40e_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static void i40e_get_settings_link_up(struct i40e_hw *hw, + struct ethtool_cmd *ecmd, + struct net_device *netdev) { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_pf *pf = np->vsi->back; - struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; - bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; u32 link_speed = hw_link_info->link_speed; - /* hardware is either in 40G mode or 10G mode - * NOTE: this section initializes supported and advertising - */ - if (!link_up) { - /* link is down and the driver needs to fall back on - * device ID to determine what kinds of info to display, - * it's mostly a guess that may change when link is up - */ - switch (hw->device_id) { - case I40E_DEV_ID_QSFP_A: - case I40E_DEV_ID_QSFP_B: - case I40E_DEV_ID_QSFP_C: - /* pluggable QSFP */ - ecmd->supported = SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseLR4_Full; - ecmd->advertising = ADVERTISED_40000baseSR4_Full | - ADVERTISED_40000baseCR4_Full | - ADVERTISED_40000baseLR4_Full; - break; - case I40E_DEV_ID_KX_B: - /* backplane 40G */ - ecmd->supported = SUPPORTED_40000baseKR4_Full; - ecmd->advertising = ADVERTISED_40000baseKR4_Full; - break; - case I40E_DEV_ID_KX_C: - /* backplane 10G */ - ecmd->supported = SUPPORTED_10000baseKR_Full; - ecmd->advertising = ADVERTISED_10000baseKR_Full; - break; - case I40E_DEV_ID_10G_BASE_T: - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - default: - /* all the rest are 10G/1G */ - ecmd->supported = SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full; - break; - } - - /* skip phy_type use as it is zero when link is down */ - goto no_valid_phy_type; - } - + /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: @@ -304,6 +257,11 @@ static int i40e_get_settings(struct net_device *netdev, ecmd->advertising = ADVERTISED_Autoneg | ADVERTISED_40000baseCR4_Full; break; + case I40E_PHY_TYPE_XLAUI: + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ecmd->supported = SUPPORTED_40000baseCR4_Full; + break; case I40E_PHY_TYPE_40GBASE_KR4: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_40000baseKR4_Full; @@ -311,13 +269,17 @@ static int i40e_get_settings(struct net_device *netdev, ADVERTISED_40000baseKR4_Full; break; case I40E_PHY_TYPE_40GBASE_SR4: - case I40E_PHY_TYPE_XLPPI: - case I40E_PHY_TYPE_XLAUI: ecmd->supported = SUPPORTED_40000baseSR4_Full; break; case I40E_PHY_TYPE_40GBASE_LR4: ecmd->supported = SUPPORTED_40000baseLR4_Full; break; + case I40E_PHY_TYPE_20GBASE_KR2: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_20000baseKR2_Full; + break; case I40E_PHY_TYPE_10GBASE_KX4: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseKX4_Full; @@ -334,55 +296,56 @@ static int i40e_get_settings(struct net_device *netdev, case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->supported |= SUPPORTED_1000baseT_Full; + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_1000baseKX_Full; + ecmd->advertising = ADVERTISED_Autoneg | + ADVERTISED_1000baseKX_Full; break; - case I40E_PHY_TYPE_10GBASE_CR1_CU: - case I40E_PHY_TYPE_10GBASE_CR1: case I40E_PHY_TYPE_10GBASE_T: + case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ecmd->supported = SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + ADVERTISED_10000baseT_Full; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_SFI: case I40E_PHY_TYPE_10GBASE_SFPP_CU: + case I40E_PHY_TYPE_10GBASE_AOC: ecmd->supported = SUPPORTED_10000baseT_Full; break; - case I40E_PHY_TYPE_1000BASE_KX: - case I40E_PHY_TYPE_1000BASE_T: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; - case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; - break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full; - ecmd->advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full | - ADVERTISED_100baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; break; default: /* if we got here and link is up something bad is afoot */ @@ -390,8 +353,126 @@ static int i40e_get_settings(struct net_device *netdev, hw_link_info->phy_type); } -no_valid_phy_type: - /* this is if autoneg is enabled or disabled */ + /* Set speed and duplex */ + switch (link_speed) { + case I40E_LINK_SPEED_40GB: + /* need a SPEED_40000 in ethtool.h */ + ethtool_cmd_speed_set(ecmd, 40000); + break; + case I40E_LINK_SPEED_20GB: + ethtool_cmd_speed_set(ecmd, SPEED_20000); + break; + case I40E_LINK_SPEED_10GB: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; + case I40E_LINK_SPEED_1GB: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case I40E_LINK_SPEED_100MB: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; + default: + break; + } + ecmd->duplex = DUPLEX_FULL; +} + +/** + * i40e_get_settings_link_down - Get the Link settings for when link is down + * @hw: hw structure + * @ecmd: ethtool command to fill in + * + * Reports link settings that can be determined when link is down + **/ +static void i40e_get_settings_link_down(struct i40e_hw *hw, + struct ethtool_cmd *ecmd) +{ + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + + /* link is down and the driver needs to fall back on + * device ID to determine what kinds of info to display, + * it's mostly a guess that may change when link is up + */ + switch (hw->device_id) { + case I40E_DEV_ID_QSFP_A: + case I40E_DEV_ID_QSFP_B: + case I40E_DEV_ID_QSFP_C: + /* pluggable QSFP */ + ecmd->supported = SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseLR4_Full; + ecmd->advertising = ADVERTISED_40000baseSR4_Full | + ADVERTISED_40000baseCR4_Full | + ADVERTISED_40000baseLR4_Full; + break; + case I40E_DEV_ID_KX_B: + /* backplane 40G */ + ecmd->supported = SUPPORTED_40000baseKR4_Full; + ecmd->advertising = ADVERTISED_40000baseKR4_Full; + break; + case I40E_DEV_ID_KX_C: + /* backplane 10G */ + ecmd->supported = SUPPORTED_10000baseKR_Full; + ecmd->advertising = ADVERTISED_10000baseKR_Full; + break; + case I40E_DEV_ID_10G_BASE_T: + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; + break; + case I40E_DEV_ID_20G_KR2: + /* backplane 20G */ + ecmd->supported = SUPPORTED_20000baseKR2_Full; + ecmd->advertising = ADVERTISED_20000baseKR2_Full; + break; + default: + /* all the rest are 10G/1G */ + ecmd->supported = SUPPORTED_10000baseT_Full | + SUPPORTED_1000baseT_Full; + /* Figure out what has been requested */ + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + break; + } + + /* With no link speed and duplex are unknown */ + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; +} + +/** + * i40e_get_settings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ecmd: ethtool command + * + * Reports speed/duplex settings based on media_type + **/ +static int i40e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + struct i40e_link_status *hw_link_info = &hw->phy.link_info; + bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; + + if (link_up) + i40e_get_settings_link_up(hw, ecmd, netdev); + else + i40e_get_settings_link_down(hw, ecmd); + + /* Now set the settings that don't rely on link being up/down */ + + /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -424,11 +505,13 @@ no_valid_phy_type: break; } + /* Set transceiver */ ecmd->transceiver = XCVR_EXTERNAL; + /* Set flow control settings */ ecmd->supported |= SUPPORTED_Pause; - switch (hw->fc.current_mode) { + switch (hw->fc.requested_mode) { case I40E_FC_FULL: ecmd->advertising |= ADVERTISED_Pause; break; @@ -445,30 +528,6 @@ no_valid_phy_type: break; } - if (link_up) { - switch (link_speed) { - case I40E_LINK_SPEED_40GB: - /* need a SPEED_40000 in ethtool.h */ - ethtool_cmd_speed_set(ecmd, 40000); - break; - case I40E_LINK_SPEED_10GB: - ethtool_cmd_speed_set(ecmd, SPEED_10000); - break; - case I40E_LINK_SPEED_1GB: - ethtool_cmd_speed_set(ecmd, SPEED_1000); - break; - case I40E_LINK_SPEED_100MB: - ethtool_cmd_speed_set(ecmd, SPEED_100); - break; - default: - break; - } - ecmd->duplex = DUPLEX_FULL; - } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; - } - return 0; } @@ -588,6 +647,8 @@ static int i40e_set_settings(struct net_device *netdev, advertise & ADVERTISED_10000baseKX4_Full || advertise & ADVERTISED_10000baseKR_Full) config.link_speed |= I40E_LINK_SPEED_10GB; + if (advertise & ADVERTISED_20000baseKR2_Full) + config.link_speed |= I40E_LINK_SPEED_20GB; if (advertise & ADVERTISED_40000baseKR4_Full || advertise & ADVERTISED_40000baseCR4_Full || advertise & ADVERTISED_40000baseSR4_Full || @@ -601,6 +662,8 @@ static int i40e_set_settings(struct net_device *netdev, config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; + /* save the requested speeds */ + hw->phy.link_info.requested_speeds = config.link_speed; /* set link and auto negotiation so changes take effect */ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; /* If link is up put link down */ @@ -621,7 +684,7 @@ static int i40e_set_settings(struct net_device *netdev, return -EAGAIN; } - status = i40e_update_link_info(hw, true); + status = i40e_aq_get_link_info(hw, true, NULL, NULL); if (status) netdev_info(netdev, "Updating link info failed with error %d\n", status); @@ -767,7 +830,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { - netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n", + netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n", status, hw->aq.asq_last_status); err = -EAGAIN; } @@ -870,7 +933,9 @@ static int i40e_get_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val) + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) || + (hw->debug_mask & I40E_DEBUG_NVM))) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -974,7 +1039,10 @@ static int i40e_set_eeprom(struct net_device *netdev, cmd = (struct i40e_nvm_access *)eeprom; ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) + if (ret_val && + ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM && + hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) || + (hw->debug_mask & I40E_DEBUG_NVM))) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -998,6 +1066,7 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1176,7 +1245,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_TEST: return I40E_TEST_LEN; case ETH_SS_STATS: - if (vsi == pf->vsi[pf->lan_vsi]) { + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { int len = I40E_PF_STATS_LEN(netdev); if (pf->lan_veb != I40E_NO_VEB) @@ -1185,6 +1254,8 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } else { return I40E_VSI_STATS_LEN(netdev); } + case ETH_SS_PRIV_FLAGS: + return I40E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1247,7 +1318,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i += 2; } rcu_read_unlock(); - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; if (pf->lan_veb != I40E_NO_VEB) { @@ -1320,7 +1391,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); p += ETH_GSTRING_LEN; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; if (pf->lan_veb != I40E_NO_VEB) { @@ -1358,6 +1429,15 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + default: + break; } } @@ -1473,6 +1553,7 @@ static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); + bool if_running = netif_running(netdev); struct i40e_pf *pf = np->vsi->back; if (eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -1480,6 +1561,12 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + /* If the device is online then take it offline */ + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); /* Link test performed before hardware reset * so autoneg doesn't interfere with test result @@ -1502,6 +1589,9 @@ static void i40e_diag_test(struct net_device *netdev, clear_bit(__I40E_TESTING, &pf->state); i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + + if (if_running) + dev_open(netdev); } else { /* Online tests */ netif_info(pf, drv, netdev, "online testing starting\n"); @@ -1599,6 +1689,8 @@ static int i40e_set_phys_id(struct net_device *netdev, case ETHTOOL_ID_INACTIVE: i40e_led_set(hw, pf->led_status, false); break; + default: + break; } return 0; @@ -1703,6 +1795,11 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) { cmd->data = 0; + if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { + cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; + cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; + return 0; + } /* Report default options for RSS on i40e */ switch (cmd->flow_type) { case TCP_V4_FLOW: @@ -1974,6 +2071,9 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); + /* Save setting for future output/update */ + pf->vsi[pf->lan_vsi]->rxnfc = *nfc; + return 0; } @@ -2281,10 +2381,6 @@ static int i40e_set_channels(struct net_device *dev, /* update feature limits from largest to smallest supported values */ /* TODO: Flow director limit, DCB etc */ - /* cap RSS limit */ - if (count > pf->rss_size_max) - count = pf->rss_size_max; - /* use rss_reconfig to rebuild with new queue count and update traffic * class queue mapping */ @@ -2295,6 +2391,133 @@ static int i40e_set_channels(struct net_device *dev, return -EINVAL; } +#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +/** + * i40e_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_key_size(struct net_device *netdev) +{ + return I40E_HKEY_ARRAY_SIZE; +} + +/** + * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) +{ + return I40E_HLUT_ARRAY_SIZE; +} + +static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HLUT(i)); + indir[j++] = reg_val & 0xff; + indir[j++] = (reg_val >> 8) & 0xff; + indir[j++] = (reg_val >> 16) & 0xff; + indir[j++] = (reg_val >> 24) & 0xff; + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = rd32(hw, I40E_PFQF_HKEY(i)); + key[j++] = (u8)(reg_val & 0xff); + key[j++] = (u8)((reg_val >> 8) & 0xff); + key[j++] = (u8)((reg_val >> 16) & 0xff); + key[j++] = (u8)((reg_val >> 24) & 0xff); + } + } + return 0; +} + +/** + * i40e_set_rxfh - set the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * + * Returns -EINVAL if the table specifies an inavlid queue id, otherwise + * returns 0 after programming the table. + **/ +static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 reg_val; + int i, j; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir) + return 0; + + for (i = 0, j = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + reg_val = indir[j++]; + reg_val |= indir[j++] << 8; + reg_val |= indir[j++] << 16; + reg_val |= indir[j++] << 24; + wr32(hw, I40E_PFQF_HLUT(i), reg_val); + } + + if (key) { + for (i = 0, j = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg_val = key[j++]; + reg_val |= key[j++] << 8; + reg_val |= key[j++] << 16; + reg_val |= key[j++] << 24; + wr32(hw, I40E_PFQF_HKEY(i), reg_val); + } + } + return 0; +} + +/** + * i40e_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40e_get_priv_flags(struct net_device *dev) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + u32 ret_flags = 0; + + ret_flags |= pf->hw.func_caps.npar_enable ? + I40E_PRIV_FLAGS_NPAR_FLAG : 0; + + return ret_flags; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_settings = i40e_get_settings, .set_settings = i40e_set_settings, @@ -2323,9 +2546,14 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_ethtool_stats = i40e_get_ethtool_stats, .get_coalesce = i40e_get_coalesce, .set_coalesce = i40e_set_coalesce, + .get_rxfh_key_size = i40e_get_rxfh_key_size, + .get_rxfh_indir_size = i40e_get_rxfh_indir_size, + .get_rxfh = i40e_get_rxfh, + .set_rxfh = i40e_set_rxfh, .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, .get_ts_info = i40e_get_ts_info, + .get_priv_flags = i40e_get_priv_flags, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 27c206e62da7..1803afeef23e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -24,7 +24,6 @@ * ******************************************************************************/ - #include <linux/if_ether.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -150,7 +149,7 @@ static inline bool i40e_fcoe_xid_is_valid(u16 xid) /** * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated - * @pf: pointer to pf + * @pf: pointer to PF * @ddp: sw DDP context * * Unmap the scatter-gather list associated with the given SW DDP context @@ -269,7 +268,7 @@ out: /** * i40e_fcoe_sw_init - sets up the HW for FCoE - * @pf: pointer to pf + * @pf: pointer to PF * * Returns 0 if FCoE is supported otherwise the error code **/ @@ -329,7 +328,7 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf) /** * i40e_get_fcoe_tc_map - Return TC map for FCoE APP - * @pf: pointer to pf + * @pf: pointer to PF * **/ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) @@ -381,12 +380,11 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) ctxt->pf_num = hw->pf_id; ctxt->vf_num = 0; ctxt->uplink_seid = vsi->uplink_seid; - ctxt->connection_type = 0x1; + ctxt->connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt->flags = I40E_AQ_VSI_TYPE_PF; /* FCoE VSI would need the following sections */ - info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID | - I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); + info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); /* FCoE VSI does not need these sections */ info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID | @@ -395,7 +393,12 @@ int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt) I40E_AQ_VSI_PROP_INGRESS_UP_VALID | I40E_AQ_VSI_PROP_EGRESS_UP_VALID)); - info->switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + info->valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + info->switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } enabled_tc = i40e_get_fcoe_tc_map(pf); i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true); @@ -1303,8 +1306,7 @@ static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring, /* MACLEN is ether header length in words not bytes */ td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; - return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); + i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); } /** @@ -1443,7 +1445,6 @@ static int i40e_fcoe_set_features(struct net_device *netdev, return 0; } - static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -1470,6 +1471,11 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_set_features = i40e_fcoe_set_features, }; +/* fcoe network device type */ +static struct device_type fcoe_netdev_type = { + .name = "fcoe", +}; + /** * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI * @vsi: pointer to the associated VSI struct @@ -1503,6 +1509,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1); netdev->mtu = FCOE_MTU; SET_NETDEV_DEV(netdev, &pf->pdev->dev); + SET_NETDEV_DEVTYPE(netdev, &fcoe_netdev_type); /* set different dev_port value 1 for FCoE netdev than the default * zero dev_port value for PF netdev, this helps biosdevname user * tool to differentiate them correctly while both attached to the @@ -1523,7 +1530,7 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) /** * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI - * @pf: the pf that VSI is associated with + * @pf: the PF that VSI is associated with * **/ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) @@ -1550,7 +1557,7 @@ void i40e_fcoe_vsi_setup(struct i40e_pf *pf) vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); if (vsi) { dev_dbg(&pf->pdev->dev, - "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n", + "Successfully created FCoE VSI seid %d id %d uplink_seid %d PF seid %d\n", vsi->seid, vsi->id, vsi->uplink_seid, seid); } else { dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h index 21e0f582031c..0d49e2d15d40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h @@ -37,7 +37,6 @@ #define I40E_FILTER_CONTEXT_DESC(R, i) \ (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i])) - /* receive queue descriptor filter status for FCoE */ #define I40E_RX_DESC_FLTSTAT_FCMASK 0x3 #define I40E_RX_DESC_FLTSTAT_NOMTCH 0x0 /* no ddp context match */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index 4627588f4613..0079ad7bcd0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -856,7 +856,7 @@ static void i40e_write_dword(u8 *hmc_bits, if (ce_info->width < 32) mask = ((u32)1 << ce_info->width) - 1; else - mask = 0xFFFFFFFF; + mask = ~(u32)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines @@ -908,7 +908,7 @@ static void i40e_write_qword(u8 *hmc_bits, if (ce_info->width < 64) mask = ((u64)1 << ce_info->width) - 1; else - mask = 0xFFFFFFFFFFFFFFFF; + mask = ~(u64)0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dadda3c5d658..63de3f4b7a94 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,8 +38,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 6 +#define DRV_VERSION_MINOR 3 +#define DRV_VERSION_BUILD 1 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -75,6 +75,7 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, /* required last entry */ {0, } }; @@ -249,6 +250,22 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) } /** + * i40e_find_vsi_from_id - searches for the vsi with the given id + * @pf - the pf structure to search for the vsi + * @id - id of the vsi it is searching for + **/ +struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) +{ + int i; + + for (i = 0; i < pf->num_alloc_vsi; i++) + if (pf->vsi[i] && (pf->vsi[i]->id == id)) + return pf->vsi[i]; + + return NULL; +} + +/** * i40e_service_event_schedule - Schedule the service task to wake up * @pf: board private structure * @@ -450,7 +467,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) } /** - * i40e_pf_reset_stats - Reset all of the stats for the given pf + * i40e_pf_reset_stats - Reset all of the stats for the given PF * @pf: the PF to be reset **/ void i40e_pf_reset_stats(struct i40e_pf *pf) @@ -896,7 +913,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) } /** - * i40e_update_pf_stats - Update the pf statistics counters. + * i40e_update_pf_stats - Update the PF statistics counters. * @pf: the PF to be updated **/ static void i40e_update_pf_stats(struct i40e_pf *pf) @@ -919,11 +936,6 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); - i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), - pf->stat_offsets_loaded, - &osd->eth.tx_discards, - &nsd->eth.tx_discards); - i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, @@ -1133,7 +1145,7 @@ void i40e_update_stats(struct i40e_vsi *vsi) * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL @@ -1161,7 +1173,7 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, * i40e_find_mac - Find a mac addr in the macvlan filters list * @vsi: the VSI to be searched * @macaddr: the MAC address we are searching for - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns the first filter with the provided MAC address or NULL if @@ -1209,7 +1221,7 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans * @vsi: the VSI to be searched * @macaddr: the mac address to be filtered - * @is_vf: true if it is a vf + * @is_vf: true if it is a VF * @is_netdev: true if it is a netdev * * Goes through all the macvlan filters and adds a @@ -1270,7 +1282,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure its a vf filter, else doesn't matter + * @is_vf: make sure its a VF filter, else doesn't matter * @is_netdev: make sure its a netdev filter, else doesn't matter * * Returns ptr to the filter object or NULL when no memory available. @@ -1330,7 +1342,7 @@ add_filter_out: * @vsi: the VSI to be searched * @macaddr: the MAC address * @vlan: the vlan - * @is_vf: make sure it's a vf filter, else doesn't matter + * @is_vf: make sure it's a VF filter, else doesn't matter * @is_netdev: make sure it's a netdev filter, else doesn't matter **/ void i40e_del_filter(struct i40e_vsi *vsi, @@ -1357,7 +1369,7 @@ void i40e_del_filter(struct i40e_vsi *vsi, f->counter--; } } else { - /* make sure we don't remove a filter in use by vf or netdev */ + /* make sure we don't remove a filter in use by VF or netdev */ int min_f = 0; min_f += (f->is_vf ? 1 : 0); min_f += (f->is_netdev ? 1 : 0); @@ -1546,7 +1558,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.tc_info[i].qoffset = offset; vsi->tc_config.tc_info[i].qcount = qcount; - /* find the power-of-2 of the number of queue pairs */ + /* find the next higher power-of-2 of num queue pairs */ num_qps = qcount; pow = 0; while (num_qps && ((1 << pow) < qcount)) { @@ -1576,6 +1588,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, /* Set actual Tx/Rx queue pairs */ vsi->num_queue_pairs = offset; + if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) { + if (vsi->req_queue_pairs > 0) + vsi->num_queue_pairs = vsi->req_queue_pairs; + else + vsi->num_queue_pairs = pf->num_lan_msix; + } /* Scheduler section valid can only be set for ADD VSI */ if (is_add) { @@ -1967,7 +1985,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi) I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; ctxt.seid = vsi->seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, @@ -1996,7 +2014,7 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi) I40E_AQ_VSI_PVLAN_EMOD_NOTHING; ctxt.seid = vsi->seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, @@ -2280,7 +2298,7 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid) I40E_AQ_VSI_PVLAN_EMOD_STR; ctxt.seid = vsi->seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (aq_ret) { dev_info(&vsi->back->pdev->dev, @@ -2398,20 +2416,20 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) struct i40e_vsi *vsi = ring->vsi; cpumask_var_t mask; - if (ring->q_vector && ring->netdev) { - /* Single TC mode enable XPS */ - if (vsi->tc_config.numtc <= 1 && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + if (!ring->q_vector || !ring->netdev) + return; + + /* Single TC mode enable XPS */ + if (vsi->tc_config.numtc <= 1) { + if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, ring->queue_index); - } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - /* Disable XPS to allow selection based on TC */ - bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); - netif_set_xps_queue(ring->netdev, mask, - ring->queue_index); - free_cpumask_var(mask); - } + } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + /* Disable XPS to allow selection based on TC */ + bitmap_zero(cpumask_bits(mask), nr_cpumask_bits); + netif_set_xps_queue(ring->netdev, mask, ring->queue_index); + free_cpumask_var(mask); } } @@ -2596,7 +2614,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (ring_is_ps_enabled(ring)) { + i40e_alloc_rx_headers(ring); + i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); + } else { + i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); + } return 0; } @@ -3183,13 +3206,16 @@ static irqreturn_t i40e_intr(int irq, void *data) pf->globr_count++; } else if (val == I40E_RESET_EMPR) { pf->empr_count++; - set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); } } if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) { icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK; dev_info(&pf->pdev->dev, "HMC error interrupt\n"); + dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", + rd32(hw, I40E_PFHMC_ERRORINFO), + rd32(hw, I40E_PFHMC_ERRORDATA)); } if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { @@ -3825,6 +3851,8 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) pci_disable_msix(pf->pdev); kfree(pf->msix_entries); pf->msix_entries = NULL; + kfree(pf->irq_pile); + pf->irq_pile = NULL; } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { pci_disable_msi(pf->pdev); } @@ -4021,7 +4049,7 @@ static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) #endif /** * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP - * @pf: pointer to pf + * @pf: pointer to PF * * Get TC map for ISCSI PF type that will include iSCSI TC * and LAN TC. @@ -4119,7 +4147,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) if (pf->hw.func_caps.iscsi) enabled_tc = i40e_get_iscsi_tc_map(pf); else - enabled_tc = pf->hw.func_caps.enabled_tcmap; + return 1; /* Only TC0 */ /* At least have TC0 */ enabled_tc = (enabled_tc ? enabled_tc : 0x1); @@ -4169,11 +4197,11 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); - /* MPF enabled and iSCSI PF type */ + /* MFP enabled and iSCSI PF type */ if (pf->hw.func_caps.iscsi) return i40e_get_iscsi_tc_map(pf); else - return pf->hw.func_caps.enabled_tcmap; + return i40e_pf_get_default_tc(pf); } /** @@ -4196,7 +4224,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi bw config, err %d, aq_err %d\n", + "couldn't get PF vsi bw config, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); return -EINVAL; } @@ -4206,7 +4234,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) NULL); if (aq_ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi ets bw config, err %d, aq_err %d\n", + "couldn't get PF vsi ets bw config, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); return -EINVAL; } @@ -4383,7 +4411,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ctxt.pf_num = vsi->back->hw.pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info)); + ctxt.info = vsi->info; i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); /* Update the VSI after updating the VSI queue-mapping information */ @@ -4563,6 +4591,11 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err = 0; + /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) + goto out; + /* Get the initial DCB configuration */ err = i40e_init_dcb(hw); if (!err) { @@ -4626,6 +4659,9 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) case I40E_LINK_SPEED_40GB: strlcpy(speed, "40 Gbps", SPEED_SIZE); break; + case I40E_LINK_SPEED_20GB: + strncpy(speed, "20 Gbps", SPEED_SIZE); + break; case I40E_LINK_SPEED_10GB: strlcpy(speed, "10 Gbps", SPEED_SIZE); break; @@ -4853,11 +4889,7 @@ exit: * * Returns 0 on success, negative value on failure **/ -#ifdef I40E_FCOE int i40e_open(struct net_device *netdev) -#else -static int i40e_open(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -4967,7 +4999,7 @@ err_setup_tx: /** * i40e_fdir_filter_exit - Cleans up the Flow Director accounting - * @pf: Pointer to pf + * @pf: Pointer to PF * * This function destroys the hlist where all the Flow Director * filters were saved. @@ -5055,24 +5087,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) { - - /* Request a Firmware Reset - * - * Same as Global reset, plus restarting the - * embedded firmware engine. - */ - /* enable EMP Reset */ - val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP); - val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK; - wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val); - - /* force the reset */ - val = rd32(&pf->hw, I40E_GLGEN_RTRIG); - val |= I40E_GLGEN_RTRIG_EMPFWR_MASK; - wr32(&pf->hw, I40E_GLGEN_RTRIG, val); - i40e_flush(&pf->hw); - } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) { /* Request a PF Reset @@ -5195,7 +5209,6 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, struct i40e_aqc_lldp_get_mib *mib = (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; struct i40e_hw *hw = &pf->hw; - struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; struct i40e_dcbx_config tmp_dcbx_cfg; bool need_reconfig = false; int ret = 0; @@ -5226,10 +5239,11 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, goto exit; } - memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg)); /* Store the old configuration */ - tmp_dcbx_cfg = *dcbx_cfg; + tmp_dcbx_cfg = hw->local_dcbx_config; + /* Reset the old DCBx configuration data */ + memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); /* Get updated DCBX data from firmware */ ret = i40e_get_dcb_config(&pf->hw); if (ret) { @@ -5238,20 +5252,22 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, } /* No change detected in DCBX configs */ - if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { + if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, + sizeof(tmp_dcbx_cfg))) { dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); goto exit; } - need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, dcbx_cfg); + need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, + &hw->local_dcbx_config); - i40e_dcbnl_flush_apps(pf, dcbx_cfg); + i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); if (!need_reconfig) goto exit; /* Enable DCB tagging only when more than one TC */ - if (i40e_dcb_get_num_tc(dcbx_cfg) > 1) + if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; else pf->flags &= ~I40E_FLAG_DCB_ENABLED; @@ -5351,9 +5367,9 @@ static void i40e_service_event_complete(struct i40e_pf *pf) * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters * @pf: board private structure **/ -int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) +u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) { - int val, fcnt_prog; + u32 val, fcnt_prog; val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK); @@ -5361,12 +5377,13 @@ int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) } /** - * i40e_get_current_fd_count - Get the count of total FD filters programmed + * i40e_get_current_fd_count - Get total FD filters programmed for this PF * @pf: board private structure **/ -int i40e_get_current_fd_count(struct i40e_pf *pf) +u32 i40e_get_current_fd_count(struct i40e_pf *pf) { - int val, fcnt_prog; + u32 val, fcnt_prog; + val = rd32(&pf->hw, I40E_PFQF_FDSTAT); fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) + ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> @@ -5375,6 +5392,21 @@ int i40e_get_current_fd_count(struct i40e_pf *pf) } /** + * i40e_get_global_fd_count - Get total FD filters programmed on device + * @pf: board private structure + **/ +u32 i40e_get_global_fd_count(struct i40e_pf *pf) +{ + u32 val, fcnt_prog; + + val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); + fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) + + ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >> + I40E_GLQF_FDCNT_0_BESTCNT_SHIFT); + return fcnt_prog; +} + +/** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure **/ @@ -5388,7 +5420,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) /* Check if, FD SB or ATR was auto disabled and if there is enough room * to re-enable */ - fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); + fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || (pf->fd_add_err == 0) || @@ -5410,13 +5442,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) } #define I40E_MIN_FD_FLUSH_INTERVAL 10 +#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30 /** * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB * @pf: board private structure **/ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) { + unsigned long min_flush_time; int flush_wait_retry = 50; + bool disable_atr = false; + int fd_room; int reg; if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) @@ -5424,9 +5460,20 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (time_after(jiffies, pf->fd_flush_timestamp + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { - set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + /* If the flush is happening too quick and we have mostly + * SB rules we should not re-enable ATR for some time. + */ + min_flush_time = pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ); + fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; + + if (!(time_after(jiffies, min_flush_time)) && + (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + disable_atr = true; + } + pf->fd_flush_timestamp = jiffies; - pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; /* flush all filters */ wr32(&pf->hw, I40E_PFQF_CTL_1, @@ -5446,10 +5493,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) } else { /* replay sideband filters */ i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); - - pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + if (!disable_atr) + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } @@ -5460,7 +5505,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed * @pf: board private structure **/ -int i40e_get_current_atr_cnt(struct i40e_pf *pf) +u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) { return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } @@ -5486,9 +5531,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) return; - if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && - (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && - (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fdir_flush_and_replay(pf); i40e_fdir_check_and_reenable(pf); @@ -5757,11 +5800,9 @@ static void i40e_handle_link_event(struct i40e_pf *pf, struct i40e_hw *hw = &pf->hw; struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; - struct i40e_link_status *hw_link_info = &hw->phy.link_info; /* save off old link status information */ - memcpy(&pf->hw.phy.link_info_old, hw_link_info, - sizeof(pf->hw.phy.link_info_old)); + hw->phy.link_info_old = hw->phy.link_info; /* Do a new status request to re-enable LSE reporting * and load new status information into the hw struct @@ -5875,6 +5916,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) case i40e_aqc_opc_send_msg_to_peer: dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); break; + case i40e_aqc_opc_nvm_erase: + case i40e_aqc_opc_nvm_update: + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + break; default: dev_info(&pf->pdev->dev, "ARQ Error: Unknown event 0x%04x received\n", @@ -5919,6 +5964,94 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) } /** + * i40e_enable_pf_switch_lb + * @pf: pointer to the PF structure + * + * enable switch loop back or die - no point in a return value + **/ +static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_disable_pf_switch_lb + * @pf: pointer to the PF structure + * + * disable switch loop back or die - no point in a return value + **/ +static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi_context ctxt; + int aq_ret; + + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s couldn't get PF vsi config, err %d, aq_err %d\n", + __func__, aq_ret, pf->hw.aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "%s: update vsi switch failed, aq_err=%d\n", + __func__, vsi->back->hw.aq.asq_last_status); + } +} + +/** + * i40e_config_bridge_mode - Configure the HW bridge mode + * @veb: pointer to the bridge instance + * + * Configure the loop back mode for the LAN VSI that is downlink to the + * specified HW bridge instance. It is expected this function is called + * when a new HW bridge is instantiated. + **/ +static void i40e_config_bridge_mode(struct i40e_veb *veb) +{ + struct i40e_pf *pf = veb->pf; + + dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", + veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + if (veb->bridge_mode & BRIDGE_MODE_VEPA) + i40e_disable_pf_switch_lb(pf); + else + i40e_enable_pf_switch_lb(pf); +} + +/** * i40e_reconstitute_veb - rebuild the VEB and anything connected to it * @veb: pointer to the VEB instance * @@ -5964,8 +6097,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; - /* Enable LB mode for the main VSI now that it is on a VEB */ - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ for (v = 0; v < pf->num_alloc_vsi; v++) { @@ -6137,7 +6269,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * - * Close up the VFs and other things in prep for pf Reset. + * Close up the VFs and other things in prep for PF Reset. **/ static void i40e_prep_for_reset(struct i40e_pf *pf) { @@ -6222,10 +6354,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } /* re-verify the eeprom if we just had an EMP reset */ - if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) { - clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) i40e_verify_eeprom(pf); - } i40e_clear_pxe_mode(hw); ret = i40e_get_capabilities(pf); @@ -6335,13 +6465,14 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) } } - msleep(75); - ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (ret) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (ret) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* reinit the misc interrupt */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) ret = i40e_setup_misc_vector(pf); @@ -6364,7 +6495,7 @@ clear_recovery: } /** - * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild + * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild * @pf: board private structure * * Close up the VFs and other things in prep for a Core Reset, @@ -6378,7 +6509,7 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf) /** * i40e_handle_mdd_event - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * Called from the MDD irq handler to identify possibly malicious vfs **/ @@ -6407,7 +6538,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_QUEUE_SHIFT) - pf->hw.func_caps.base_queue; if (netif_msg_tx_err(pf)) - dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; @@ -6493,7 +6624,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; i40e_status ret; - u8 filter_index; __be16 port; int i; @@ -6506,22 +6636,20 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) if (pf->pending_vxlan_bitmap & (1 << i)) { pf->pending_vxlan_bitmap &= ~(1 << i); port = pf->vxlan_ports[i]; - ret = port ? - i40e_aq_add_udp_tunnel(hw, ntohs(port), + if (port) + ret = i40e_aq_add_udp_tunnel(hw, ntohs(port), I40E_AQC_TUNNEL_TYPE_VXLAN, - &filter_index, NULL) - : i40e_aq_del_udp_tunnel(hw, i, NULL); + NULL, NULL); + else + ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n", - port ? "adding" : "deleting", - ntohs(port), port ? i : i); - + dev_info(&pf->pdev->dev, + "%s vxlan port %d, index %d failed, err %d, aq_err %d\n", + port ? "add" : "delete", + ntohs(port), i, ret, + pf->hw.aq.asq_last_status); pf->vxlan_ports[i] = 0; - } else { - dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n", - port ? "Added" : "Deleted", - ntohs(port), port ? i : filter_index); } } } @@ -6728,6 +6856,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) vsi->idx = vsi_idx; vsi->rx_itr_setting = pf->rx_itr_default; vsi->tx_itr_setting = pf->tx_itr_default; + vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? + pf->rss_table_size : 64; vsi->netdev_registered = false; vsi->work_limit = I40E_DEFAULT_IRQ_WORK; INIT_LIST_HEAD(&vsi->mac_filter_list); @@ -6808,7 +6938,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) goto unlock_vsi; } - /* updates the pf for this cleared vsi */ + /* updates the PF for this cleared vsi */ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); @@ -6921,15 +7051,14 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) * * Work with the OS to set up the MSIX vectors needed. * - * Returns 0 on success, negative on failure + * Returns the number of vectors reserved or negative on failure **/ static int i40e_init_msix(struct i40e_pf *pf) { - i40e_status err = 0; struct i40e_hw *hw = &pf->hw; - int other_vecs = 0; + int vectors_left; int v_budget, i; - int vec; + int v_actual; if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) return -ENODEV; @@ -6951,24 +7080,62 @@ static int i40e_init_msix(struct i40e_pf *pf) * If we can't get what we want, we'll simplify to nearly nothing * and try again. If that still fails, we punt. */ - pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size); - pf->num_vmdq_msix = pf->num_vmdq_qps; - other_vecs = 1; - other_vecs += (pf->num_vmdq_vsis * pf->num_vmdq_msix); - if (pf->flags & I40E_FLAG_FD_SB_ENABLED) - other_vecs++; - - /* Scale down if necessary, and the rings will share vectors */ - pf->num_lan_msix = min_t(int, pf->num_lan_msix, - (hw->func_caps.num_msix_vectors - other_vecs)); - v_budget = pf->num_lan_msix + other_vecs; + vectors_left = hw->func_caps.num_msix_vectors; + v_budget = 0; + + /* reserve one vector for miscellaneous handler */ + if (vectors_left) { + v_budget++; + vectors_left--; + } + + /* reserve vectors for the main PF traffic queues */ + pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); + vectors_left -= pf->num_lan_msix; + v_budget += pf->num_lan_msix; + + /* reserve one vector for sideband flow director */ + if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { + if (vectors_left) { + v_budget++; + vectors_left--; + } else { + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + } + } #ifdef I40E_FCOE + /* can we reserve enough for FCoE? */ if (pf->flags & I40E_FLAG_FCOE_ENABLED) { - pf->num_fcoe_msix = pf->num_fcoe_qps; + if (!vectors_left) + pf->num_fcoe_msix = 0; + else if (vectors_left >= pf->num_fcoe_qps) + pf->num_fcoe_msix = pf->num_fcoe_qps; + else + pf->num_fcoe_msix = 1; v_budget += pf->num_fcoe_msix; + vectors_left -= pf->num_fcoe_msix; } + #endif + /* any vectors left over go for VMDq support */ + if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { + int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); + + /* if we're short on vectors for what's desired, we limit + * the queues per vmdq. If this is still more than are + * available, the user will need to change the number of + * queues/vectors used by the PF later with the ethtool + * channels command + */ + if (vmdq_vecs < vmdq_vecs_wanted) + pf->num_vmdq_qps = 1; + pf->num_vmdq_msix = pf->num_vmdq_qps; + + v_budget += vmdq_vecs; + vectors_left -= vmdq_vecs; + } pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); @@ -6977,9 +7144,9 @@ static int i40e_init_msix(struct i40e_pf *pf) for (i = 0; i < v_budget; i++) pf->msix_entries[i].entry = i; - vec = i40e_reserve_msix_vectors(pf, v_budget); + v_actual = i40e_reserve_msix_vectors(pf, v_budget); - if (vec != v_budget) { + if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable @@ -6992,26 +7159,30 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_vmdq_msix = 0; } - if (vec < I40E_MIN_MSIX) { + if (v_actual < I40E_MIN_MSIX) { pf->flags &= ~I40E_FLAG_MSIX_ENABLED; kfree(pf->msix_entries); pf->msix_entries = NULL; return -ENODEV; - } else if (vec == I40E_MIN_MSIX) { + } else if (v_actual == I40E_MIN_MSIX) { /* Adjust for minimal MSIX use */ pf->num_vmdq_vsis = 0; pf->num_vmdq_qps = 0; pf->num_lan_qps = 1; pf->num_lan_msix = 1; - } else if (vec != v_budget) { + } else if (v_actual != v_budget) { + int vec; + /* reserve the misc vector */ - vec--; + vec = v_actual - 1; /* Scale vector usage down */ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ pf->num_vmdq_vsis = 1; + pf->num_vmdq_qps = 1; + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; /* partition out the remaining vectors */ switch (vec) { @@ -7037,10 +7208,8 @@ static int i40e_init_msix(struct i40e_pf *pf) vec--; } #endif - pf->num_lan_msix = min_t(int, (vec / 2), - pf->num_lan_qps); - pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix), - I40E_DEFAULT_NUM_VMDQ_VSI); + /* give the rest to the PF */ + pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); break; } } @@ -7057,7 +7226,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_FCOE_ENABLED; } #endif - return err; + return v_actual; } /** @@ -7134,11 +7303,12 @@ err_out: **/ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) { - int err = 0; + int vectors = 0; + ssize_t size; if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - err = i40e_init_msix(pf); - if (err) { + vectors = i40e_init_msix(pf); + if (vectors < 0) { pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | #ifdef I40E_FCOE I40E_FLAG_FCOE_ENABLED | @@ -7158,18 +7328,26 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf) if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && (pf->flags & I40E_FLAG_MSI_ENABLED)) { dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); - err = pci_enable_msi(pf->pdev); - if (err) { - dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); + vectors = pci_enable_msi(pf->pdev); + if (vectors < 0) { + dev_info(&pf->pdev->dev, "MSI init failed - %d\n", + vectors); pf->flags &= ~I40E_FLAG_MSI_ENABLED; } + vectors = 1; /* one MSI or Legacy vector */ } if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); + /* set up vector assignment tracking */ + size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + pf->irq_pile->num_entries = vectors; + pf->irq_pile->search_hint = 0; + /* track first vector for misc interrupts */ - err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); + (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); } /** @@ -7219,6 +7397,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) static int i40e_config_rss(struct i40e_pf *pf) { u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; u32 lut = 0; int i, j; @@ -7236,15 +7415,14 @@ static int i40e_config_rss(struct i40e_pf *pf) wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + /* Check capability and Set table size and register per hw expectation*/ reg_val = rd32(hw, I40E_PFQF_CTL_0); - if (hw->func_caps.rss_table_size == 512) { + if (pf->rss_table_size == 512) reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; - pf->rss_table_size = 512; - } else { - pf->rss_table_size = 128; + else reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; - } wr32(hw, I40E_PFQF_CTL_0, reg_val); /* Populate the LUT with max no. of queues in round robin fashion */ @@ -7257,7 +7435,7 @@ static int i40e_config_rss(struct i40e_pf *pf) * If LAN VSI is the only consumer for RSS then this requirement * is not necessary. */ - if (j == pf->rss_size) + if (j == vsi->rss_size) j = 0; /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (j & @@ -7281,15 +7459,19 @@ static int i40e_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + int new_rss_size; + if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; - queue_count = min_t(int, queue_count, pf->rss_size_max); + new_rss_size = min_t(int, queue_count, pf->rss_size_max); - if (queue_count != pf->rss_size) { + if (queue_count != vsi->num_queue_pairs) { + vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); - pf->rss_size = queue_count; + pf->rss_size = new_rss_size; i40e_reset_and_rebuild(pf, true); i40e_config_rss(pf); @@ -7299,6 +7481,128 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) } /** + * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) +{ + i40e_status status; + bool min_valid, max_valid; + u32 max_bw, min_bw; + + status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, + &min_valid, &max_valid); + + if (!status) { + if (min_valid) + pf->npar_min_bw = min_bw; + if (max_valid) + pf->npar_max_bw = max_bw; + } + + return status; +} + +/** + * i40e_set_npar_bw_setting - Set BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) +{ + struct i40e_aqc_configure_partition_bw_data bw_data; + i40e_status status; + + /* Set the valid bit for this PF */ + bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); + bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; + bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; + + /* Set the new bandwidths */ + status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); + + return status; +} + +/** + * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition + * @pf: board private structure + **/ +i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) +{ + /* Commit temporary BW setting to permanent NVM image */ + enum i40e_admin_queue_err last_aq_status; + i40e_status ret; + u16 nvm_word; + + if (pf->hw.partition_id != 1) { + dev_info(&pf->pdev->dev, + "Commit BW only works on partition 1! This is partition %d", + pf->hw.partition_id); + ret = I40E_NOT_SUPPORTED; + goto bw_commit_out; + } + + /* Acquire NVM for read access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for read access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Read word 0x10 of NVM - SW compatibility word 1 */ + ret = i40e_aq_read_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), &nvm_word, + false, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) { + dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + + /* Wait a bit for NVM release to complete */ + msleep(50); + + /* Acquire NVM for write access */ + ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot acquire NVM for write access, err %d: aq_err %d\n", + ret, last_aq_status); + goto bw_commit_out; + } + /* Write it back out unchanged to initiate update NVM, + * which will force a write of the shadow (alt) RAM to + * the NVM - thus storing the bandwidth values permanently. + */ + ret = i40e_aq_update_nvm(&pf->hw, + I40E_SR_NVM_CONTROL_WORD, + 0x10, sizeof(nvm_word), + &nvm_word, true, NULL); + /* Save off last admin queue command status before releasing + * the NVM + */ + last_aq_status = pf->hw.aq.asq_last_status; + i40e_release_nvm(&pf->hw); + if (ret) + dev_info(&pf->pdev->dev, + "BW settings NOT SAVED, err %d aq_err %d\n", + ret, last_aq_status); +bw_commit_out: + + return ret; +} + +/** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize * @@ -7324,8 +7628,12 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | - I40E_FLAG_MSIX_ENABLED | - I40E_FLAG_RX_1BUF_ENABLED; + I40E_FLAG_MSIX_ENABLED; + + if (iommu_present(&pci_bus_type)) + pf->flags |= I40E_FLAG_RX_PS_ENABLED; + else + pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; @@ -7336,6 +7644,7 @@ static int i40e_sw_init(struct i40e_pf *pf) */ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; pf->rss_size = 1; + pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_size_max = min_t(int, pf->rss_size_max, pf->hw.func_caps.num_tx_qp); if (pf->hw.func_caps.rss) { @@ -7347,6 +7656,13 @@ static int i40e_sw_init(struct i40e_pf *pf) if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { pf->flags |= I40E_FLAG_MFP_ENABLED; dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); + if (i40e_get_npar_bw_setting(pf)) + dev_warn(&pf->pdev->dev, + "Could not get NPAR bw settings\n"); + else + dev_info(&pf->pdev->dev, + "Min BW = %8.8x, Max BW = %8.8x\n", + pf->npar_min_bw, pf->npar_max_bw); } /* FW/NVM is not yet fixed in this regard */ @@ -7354,11 +7670,11 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per pf */ + /* Setup a counter for fd_atr per PF */ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per pf */ + /* Setup a counter for fd_sb per PF */ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); } else { dev_info(&pf->pdev->dev, @@ -7406,22 +7722,14 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; pf->qp_pile->search_hint = 0; - /* set up vector assignment tracking */ - size = sizeof(struct i40e_lump_tracking) - + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors); - pf->irq_pile = kzalloc(size, GFP_KERNEL); - if (!pf->irq_pile) { - kfree(pf->qp_pile); - err = -ENOMEM; - goto sw_init_done; - } - pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors; - pf->irq_pile->search_hint = 0; - pf->tx_timeout_recovery_level = 1; mutex_init(&pf->switch_mutex); + /* If NPAR is enabled nudge the Tx scheduler */ + if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) + i40e_set_npar_bw_setting(pf); + sw_init_done: return err; } @@ -7534,7 +7842,8 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "Port %d already offloaded\n", ntohs(port)); + netdev_info(netdev, "vxlan port %d already offloaded\n", + ntohs(port)); return; } @@ -7542,7 +7851,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, next_idx = i40e_get_vxlan_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n", + netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", ntohs(port)); return; } @@ -7550,8 +7859,9 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->vxlan_ports[next_idx] = port; pf->pending_vxlan_bitmap |= (1 << next_idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); } /** @@ -7579,12 +7889,13 @@ static void i40e_del_vxlan_port(struct net_device *netdev, * and make it pending */ pf->vxlan_ports[idx] = 0; - pf->pending_vxlan_bitmap |= (1 << idx); - pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; + + dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", + ntohs(port)); } else { - netdev_warn(netdev, "Port %d was not found, not deleting\n", + netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } } @@ -7653,6 +7964,118 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return err; } +#ifdef HAVE_BRIDGE_ATTRIBS +/** + * i40e_ndo_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * + * Inserts a new hardware bridge if not already created and + * enables the bridging mode requested (VEB or VEPA). If the + * hardware bridge has already been inserted and the request + * is to change the mode then that requires a PF reset to + * allow rebuild of the components with required hardware + * bridge mode enabled. + **/ +static int i40e_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + struct nlattr *attr, *br_spec; + int i, rem; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if ((mode != BRIDGE_MODE_VEPA) && + (mode != BRIDGE_MODE_VEB)) + return -EINVAL; + + /* Insert a new HW bridge */ + if (!veb) { + veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + vsi->tc_config.enabled_tc); + if (veb) { + veb->bridge_mode = mode; + i40e_config_bridge_mode(veb); + } else { + /* No Bridge HW offload available */ + return -ENOENT; + } + break; + } else if (mode != veb->bridge_mode) { + /* Existing HW bridge but different mode needs reset */ + veb->bridge_mode = mode; + i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + break; + } + } + + return 0; +} + +/** + * i40e_ndo_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq # + * @dev: the netdev being configured + * @filter_mask: unused + * + * Return the mode in which the hardware bridge is operating in + * i.e VEB or VEPA. + **/ +#ifdef HAVE_BRIDGE_FILTER +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __always_unused filter_mask) +#else +static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +#endif /* HAVE_BRIDGE_FILTER */ +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_veb *veb = NULL; + int i; + + /* Only for PF VSI for now */ + if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + return -EOPNOTSUPP; + + /* Find the HW bridge for the PF VSI */ + for (i = 0; i < I40E_MAX_VEB && !veb; i++) { + if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) + veb = pf->veb[i]; + } + + if (!veb) + return 0; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); +} +#endif /* HAVE_BRIDGE_ATTRIBS */ + static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -7687,6 +8110,10 @@ static const struct net_device_ops i40e_netdev_ops = { #endif .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, +#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_getlink = i40e_ndo_bridge_getlink, + .ndo_bridge_setlink = i40e_ndo_bridge_setlink, +#endif /* HAVE_BRIDGE_ATTRIBS */ }; /** @@ -7799,6 +8226,30 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi) } /** + * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB + * @vsi: the VSI being queried + * + * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode + **/ +int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) +{ + struct i40e_veb *veb; + struct i40e_pf *pf = vsi->back; + + /* Uplink is not a bridge so default to VEB */ + if (vsi->veb_idx == I40E_NO_VEB) + return 1; + + veb = pf->veb[vsi->veb_idx]; + /* Uplink is a bridge in VEPA mode */ + if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA)) + return 0; + + /* Uplink is a bridge in VEB mode */ + return 1; +} + +/** * i40e_add_vsi - Add a VSI to the switch * @vsi: the VSI being configured * @@ -7830,11 +8281,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.flags = I40E_AQ_VSI_TYPE_PF; if (ret) { dev_info(&pf->pdev->dev, - "couldn't get pf vsi config, err %d, aq_err %d\n", + "couldn't get PF vsi config, err %d, aq_err %d\n", ret, pf->hw.aq.asq_last_status); return -ENOENT; } - memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; @@ -7883,12 +8334,14 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections |= + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id = + ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -7896,16 +8349,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = 0; - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } /* Setup the VSI tx/rx queue map for TC0 only for now */ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); @@ -7915,15 +8370,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.pf_num = hw->pf_id; ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; ctxt.uplink_seid = vsi->uplink_seid; - ctxt.connection_type = 0x1; /* regular data port */ + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_VF; - ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - /* This VSI is connected to VEB so the switch_id * should be set to zero by default. */ - ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + if (i40e_is_vsi_uplink_mode_veb(vsi)) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; @@ -7961,7 +8419,7 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ret = -ENOENT; goto err; } - memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); + vsi->info = ctxt.info; vsi->info.valid_sections = 0; vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; @@ -8281,7 +8739,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } - i40e_enable_pf_switch_lb(pf); + i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) @@ -8724,7 +9182,7 @@ err_alloc: } /** - * i40e_setup_pf_switch_element - set pf vars based on switch type + * i40e_setup_pf_switch_element - set PF vars based on switch type * @pf: board private structure * @ele: element we are building info from * @num_reported: total number of elements @@ -8930,15 +9388,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_config_rss(pf); /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); - i40e_link_event(pf); - - /* Initialize user-specific link properties */ - pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & - I40E_AQ_AN_COMPLETED) ? true : false); - - /* fill in link information and enable LSE reporting */ - i40e_update_link_info(&pf->hw, true); + i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_link_event(pf); /* Initialize user-specific link properties */ @@ -9008,7 +9458,11 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) pf->flags &= ~I40E_FLAG_DCB_CAPABLE; dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } - pf->num_lan_qps = pf->rss_size_max; + pf->num_lan_qps = max_t(int, pf->rss_size_max, + num_online_cpus()); + pf->num_lan_qps = min_t(int, pf->num_lan_qps, + pf->hw.func_caps.num_tx_qp); + queues_left -= pf->num_lan_qps; } @@ -9061,7 +9515,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) * i40e_setup_pf_filter_control - Setup PF static filter control * @pf: PF to be setup * - * i40e_setup_pf_filter_control sets up a pf's initial filter control + * i40e_setup_pf_filter_control sets up a PF's initial filter control * settings. If PE/FCoE are enabled then it will also set the per PF * based filter sizes required for them. It also enables Flow director, * ethertype and macvlan type filter settings for the pf. @@ -9106,8 +9560,10 @@ static void i40e_print_features(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); #endif - buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); + buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ", + pf->hw.func_caps.num_vsis, + pf->vsi[pf->lan_vsi]->num_queue_pairs, + pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); if (pf->flags & I40E_FLAG_RSS_ENABLED) buf += sprintf(buf, "RSS "); @@ -9136,14 +9592,16 @@ static void i40e_print_features(struct i40e_pf *pf) * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl * - * i40e_probe initializes a pf identified by a pci_dev structure. - * The OS initialization, configuring of the pf private structure, + * i40e_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, * and a hardware reset occur. * * Returns 0 on success, negative on failure **/ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct i40e_aq_get_phy_abilities_resp abilities; + unsigned long ioremap_len; struct i40e_pf *pf; struct i40e_hw *hw; static u16 pfs_found; @@ -9195,8 +9653,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &pf->hw; hw->back = pf; - hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + + ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0), + I40E_MAX_CSR_SPACE); + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len); if (!hw->hw_addr) { err = -EIO; dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", @@ -9274,7 +9735,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); - i40e_verify_eeprom(pf); /* Rev 0 hardware was never productized */ @@ -9409,13 +9869,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); - msleep(75); - err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); - if (err) { - dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", - pf->hw.aq.asq_last_status); + if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4)) { + msleep(75); + err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); + if (err) + dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); } - /* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector * ends up disabled forever. @@ -9499,6 +9960,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } + /* get the requested speeds from the fw */ + err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL); + if (err) + dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", + err); + pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* print a string summarizing features */ i40e_print_features(pf); @@ -9517,7 +9985,6 @@ err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); err_init_lan_hmc: kfree(pf->qp_pile); - kfree(pf->irq_pile); err_sw_init: err_adminq_setup: (void)i40e_shutdown_adminq(hw); @@ -9617,7 +10084,6 @@ static void i40e_remove(struct pci_dev *pdev) } kfree(pf->qp_pile); - kfree(pf->irq_pile); kfree(pf->vsi); iounmap(pf->hw.hw_addr); @@ -9760,6 +10226,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); + rtnl_lock(); i40e_prep_for_reset(pf); rtnl_unlock(); @@ -9844,6 +10312,7 @@ static int __init i40e_init_module(void) pr_info("%s: %s - version %s\n", i40e_driver_name, i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + i40e_dbg_init(); return pci_register_driver(&i40e_driver); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 5defe0d63514..e49acd2accd3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -164,15 +164,15 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } /** - * i40e_read_nvm_word - Reads Shadow RAM + * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) +static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, + u16 *data) { i40e_status ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; @@ -212,7 +212,21 @@ read_nvm_exit: } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * i40e_read_nvm_word - Reads Shadow RAM + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + return i40e_read_nvm_word_srctl(hw, offset, data); +} + +/** + * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). * @words: (in) number of words to read; (out) number of words actually read @@ -222,8 +236,8 @@ read_nvm_exit: * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { i40e_status ret_code = 0; u16 index, word; @@ -231,7 +245,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, /* Loop thru the selected region */ for (word = 0; word < *words; word++) { index = offset + word; - ret_code = i40e_read_nvm_word(hw, index, &data[word]); + ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]); if (ret_code) break; } @@ -243,6 +257,23 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, } /** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} + +/** * i40e_write_nvm_aq - Writes Shadow RAM. * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning @@ -302,11 +333,18 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) { i40e_status ret_code = 0; + struct i40e_virt_mem vmem; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module = 0; - u16 word = 0; - u32 i = 0; + u16 *data; + u16 i = 0; + + ret_code = i40e_allocate_virt_mem(hw, &vmem, + I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16)); + if (ret_code) + goto i40e_calc_nvm_checksum_exit; + data = (u16 *)vmem.va; /* read pointer to VPD area */ ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); @@ -317,7 +355,7 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, /* read pointer to PCIe Alt Auto-load module */ ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, - &pcie_alt_module); + &pcie_alt_module); if (ret_code) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; @@ -327,33 +365,40 @@ static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->nvm.sr_size; i++) { + /* Read SR page */ + if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) { + u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS; + + ret_code = i40e_read_nvm_buffer(hw, i, &words, data); + if (ret_code) { + ret_code = I40E_ERR_NVM_CHECKSUM; + goto i40e_calc_nvm_checksum_exit; + } + } + /* Skip Checksum word */ if (i == I40E_SR_SW_CHECKSUM_WORD) - i++; + continue; /* Skip VPD module (convert byte size to word count) */ - if (i == (u32)vpd_module) { - i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)vpd_module) && + (i < ((u32)vpd_module + + (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) { + continue; } /* Skip PCIe ALT module (convert byte size to word count) */ - if (i == (u32)pcie_alt_module) { - i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2); - if (i >= hw->nvm.sr_size) - break; + if ((i >= (u32)pcie_alt_module) && + (i < ((u32)pcie_alt_module + + (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) { + continue; } - ret_code = i40e_read_nvm_word(hw, (u16)i, &word); - if (ret_code) { - ret_code = I40E_ERR_NVM_CHECKSUM; - goto i40e_calc_nvm_checksum_exit; - } - checksum_local += word; + checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local; i40e_calc_nvm_checksum_exit: + i40e_free_virt_mem(hw, &vmem); return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 68e852a96680..fea0d37ecc72 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -66,6 +66,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, u16 *fw_major_version, u16 *fw_minor_version, + u32 *fw_build, u16 *api_major_version, u16 *api_minor_version, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, @@ -97,7 +98,6 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, bool enable_lse, struct i40e_link_status *link, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse); i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw, u64 advt_reg, struct i40e_asq_cmd_details *cmd_details); @@ -247,6 +247,12 @@ void i40e_clear_hw(struct i40e_hw *hw); void i40e_clear_pxe_mode(struct i40e_hw *hw); bool i40e_get_link_status(struct i40e_hw *hw); i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr); +i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, + u32 *max_bw, u32 *min_bw, bool *min_valid, + bool *max_valid); +i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, + struct i40e_aqc_configure_partition_bw_data *bw_data, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); @@ -260,8 +266,6 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw); i40e_status i40e_acquire_nvm(struct i40e_hw *hw, enum i40e_aq_resource_access_type access); void i40e_release_nvm(struct i40e_hw *hw); -i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset, - u16 *data); i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data); i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index fabcfa1b45b2..a92b7725dec3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -57,7 +57,7 @@ * timespec. However, since the registers are 64 bits of nanoseconds, we must * convert the result to a timespec before we can return. **/ -static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) { struct i40e_hw *hw = &pf->hw; u32 hi, lo; @@ -69,7 +69,7 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) ns = (((u64)hi) << 32) | lo; - *ts = ns_to_timespec(ns); + *ts = ns_to_timespec64(ns); } /** @@ -81,10 +81,10 @@ static void i40e_ptp_read(struct i40e_pf *pf, struct timespec *ts) * we receive a timespec from the stack, we must convert that timespec into * nanoseconds before programming the registers. **/ -static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec *ts) +static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) { struct i40e_hw *hw = &pf->hw; - u64 ns = timespec_to_ns(ts); + u64 ns = timespec64_to_ns(ts); /* The timer will not update until the high register is written, so * write the low register first. @@ -159,14 +159,14 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec now, then = ns_to_timespec(delta); + struct timespec64 now, then = ns_to_timespec64(delta); unsigned long flags; spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_read(pf, &now); - now = timespec_add(now, then); - i40e_ptp_write(pf, (const struct timespec *)&now); + now = timespec64_add(now, then); + i40e_ptp_write(pf, (const struct timespec64 *)&now); spin_unlock_irqrestore(&pf->tmreg_lock, flags); @@ -181,7 +181,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) * Read the device clock and return the correct value on ns, after converting it * into a timespec struct. **/ -static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); unsigned long flags; @@ -202,7 +202,7 @@ static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * to ns happens in the write function. **/ static int i40e_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); unsigned long flags; @@ -613,8 +613,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->ptp_caps.pps = 0; pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; pf->ptp_caps.adjtime = i40e_ptp_adjtime; - pf->ptp_caps.gettime = i40e_ptp_gettime; - pf->ptp_caps.settime = i40e_ptp_settime; + pf->ptp_caps.gettime64 = i40e_ptp_gettime; + pf->ptp_caps.settime64 = i40e_ptp_settime; pf->ptp_caps.enable = i40e_ptp_feature_enable; /* Attempt to register the clock before enabling the hardware. */ @@ -673,7 +673,7 @@ void i40e_ptp_init(struct i40e_pf *pf) dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", __func__); } else { - struct timespec ts; + struct timespec64 ts; u32 regval; dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, @@ -695,7 +695,7 @@ void i40e_ptp_init(struct i40e_pf *pf) i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); /* Set the clock value. */ - ts = ktime_to_timespec(ktime_get_real()); + ts = ktime_to_timespec64(ktime_get_real()); i40e_ptp_settime(&pf->ptp_caps, &ts); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 65d3c8bb2d5b..522d6df51330 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index bbf1b1247ac4..4bd3a80aba82 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40e.h" #include "i40e_prototype.h" @@ -44,7 +45,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, * i40e_program_fdir_filter - Program a Flow Director filter * @fdir_data: Packet data that will be filter parameters * @raw_packet: the pre-allocated packet buffer for FDir - * @pf: The pf pointer + * @pf: The PF pointer * @add: True for add/update, False for remove **/ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, @@ -227,7 +228,7 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", @@ -302,7 +303,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", fd_data->pctype, fd_data->fd_id); @@ -375,7 +376,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", fd_data->pctype, fd_data->fd_id, ret); err = true; - } else { + } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { if (add) dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d\n", @@ -470,12 +471,27 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", rx_desc->wb.qword0.hi_dword.fd_id); + /* Check if the programming error is for ATR. + * If so, auto disable ATR and set a state for + * flush in progress. Next time we come here if flush is in + * progress do nothing, once flush is complete the state will + * be cleared. + */ + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + pf->fd_add_err++; /* store the current atr filter count */ pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); + if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && + (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { + pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + } + /* filter programming failed most likely due to table full */ - fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); + fcnt_prog = i40e_get_global_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; /* If ATR is running fcnt_prog can quickly change, * if we are very close to full, it makes sense to disable @@ -754,6 +770,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -841,6 +859,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -1031,6 +1050,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -1089,6 +1124,37 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40e_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1148,11 +1214,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_alloc_rx_buffers - Replace used receive buffers; packet split + * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1192,40 +1323,8 @@ void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -1279,10 +1378,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -1410,13 +1509,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -1432,25 +1531,54 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (budget <= 0) return 0; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); - goto next_desc; + I40E_RX_INCREMENT(rx_ring, i); + continue; } rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -1465,40 +1593,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1520,22 +1638,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1544,7 +1656,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1570,33 +1682,149 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) #ifdef I40E_FCOE if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { dev_kfree_skb_any(skb); - goto next_desc; + continue; } #endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers(rx_ring, cleaned_count); + i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + if (i40e_rx_is_programming_status(qword)) { + i40e_clean_programming_status(rx_ring, rx_desc); + I40E_RX_INCREMENT(rx_ring, i); + continue; + } + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { + i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & + I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); + rx_ring->last_rx_timestamp = jiffies; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1604,10 +1832,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40e_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1628,6 +1853,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1647,8 +1873,14 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1715,6 +1947,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) return; + if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) + return; + /* if sampling is disabled do nothing */ if (!tx_ring->atr_sample_rate) return; @@ -1822,6 +2057,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, __be16 protocol = skb->protocol; u32 tx_flags = 0; + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; @@ -1838,6 +2086,9 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } + if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) + goto out; + /* Insert 802.1p priority into VLAN header */ if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) || (skb->priority != TC_PRIO_CONTROL)) { @@ -1858,6 +2109,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_HW_VLAN; } } + +out: *flags = tx_flags; return 0; } @@ -1982,8 +2235,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -2006,8 +2267,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index dff0baeb1ecc..4b0b8102cdc3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -152,6 +160,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -224,8 +233,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -281,7 +290,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index e9901ef06a63..67c7bc9e9c21 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -44,6 +44,7 @@ #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_10G_BASE_T 0x1586 +#define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -175,12 +176,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -1143,7 +1144,7 @@ struct i40e_hw_port_stats { #define I40E_SR_EMP_MODULE_PTR 0x0F #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D @@ -1401,6 +1402,19 @@ struct i40e_lldp_variables { u16 crc8; }; +/* Offsets into Alternate Ram */ +#define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ +#define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ +#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ +#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ +#define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ +#define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ + +/* Alternate Ram Bandwidth Masks */ +#define I40E_ALT_BW_VALUE_MASK 0xFF +#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 +#define I40E_ALT_BW_VALID_MASK 0x80000000 + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index 61dd1b187624..2d20af290fbf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 40f042af4131..4d69e1f04901 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -30,8 +30,8 @@ /** * i40e_vc_disable_vf - * @pf: pointer to the pf info - * @vf: pointer to the vf info + * @pf: pointer to the PF info + * @vf: pointer to the VF info * * Disable the VF through a SW reset **/ @@ -48,38 +48,40 @@ static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) /** * i40e_vc_isvalid_vsi_id - * @vf: pointer to the vf info - * @vsi_id: vf relative vsi id + * @vf: pointer to the VF info + * @vsi_id: VF relative VSI id * - * check for the valid vsi id + * check for the valid VSI id **/ -static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id) +static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) { struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); - return pf->vsi[vsi_id]->vf_id == vf->vf_id; + return (vsi && (vsi->vf_id == vf->vf_id)); } /** * i40e_vc_isvalid_queue_id - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @vsi_id: vsi id * @qid: vsi relative queue id * * check for the valid queue id **/ -static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, +static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, u8 qid) { struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); - return qid < pf->vsi[vsi_id]->alloc_queue_pairs; + return (vsi && (qid < vsi->alloc_queue_pairs)); } /** * i40e_vc_isvalid_vector_id - * @vf: pointer to the vf info - * @vector_id: vf relative vector id + * @vf: pointer to the VF info + * @vector_id: VF relative vector id * * check for the valid vector id **/ @@ -94,19 +96,22 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) /** * i40e_vc_get_pf_queue_id - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue id * - * return pf relative queue id + * return PF relative queue id **/ -static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, +static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id, u8 vsi_queue_id) { struct i40e_pf *pf = vf->pf; - struct i40e_vsi *vsi = pf->vsi[vsi_idx]; + struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); u16 pf_queue_id = I40E_QUEUE_END_OF_LIST; + if (!vsi) + return pf_queue_id; + if (le16_to_cpu(vsi->info.mapping_flags) & I40E_AQ_VSI_QUE_MAP_NONCONTIG) pf_queue_id = @@ -120,13 +125,13 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx, /** * i40e_config_irq_link_list - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as given by the FW * @vecmap: irq map info * * configure irq link list from the map **/ -static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, +static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, struct i40e_virtchnl_vector_map *vecmap) { unsigned long linklistmap = 0, tempmap; @@ -171,7 +176,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, I40E_VIRTCHNL_SUPPORTED_QTYPES)); vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id); wr32(hw, reg_idx, reg); @@ -198,7 +203,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx, (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); } else { pf_queue_id = I40E_QUEUE_END_OF_LIST; @@ -220,25 +225,27 @@ irq_list_done: /** * i40e_config_vsi_tx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue index * @info: config. info * * configure tx queue **/ -static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, +static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct i40e_virtchnl_txq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_hmc_obj_txq tx_ctx; + struct i40e_vsi *vsi; u16 pf_queue_id; u32 qtx_ctl; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); + vsi = i40e_find_vsi_from_id(pf, vsi_id); /* clear the context structure first */ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq)); @@ -246,7 +253,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx, /* only set the required fields */ tx_ctx.base = info->dma_ring_addr / 128; tx_ctx.qlen = info->ring_len; - tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]); tx_ctx.rdylist_act = 0; tx_ctx.head_wb_ena = info->headwb_enabled; tx_ctx.head_wb_addr = info->dma_headwb_addr; @@ -287,14 +294,14 @@ error_context: /** * i40e_config_vsi_rx_queue - * @vf: pointer to the vf info - * @vsi_idx: index of VSI in PF struct + * @vf: pointer to the VF info + * @vsi_id: id of VSI as provided by the FW * @vsi_queue_id: vsi relative queue index * @info: config. info * * configure rx queue **/ -static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, +static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, struct i40e_virtchnl_rxq_info *info) { @@ -304,7 +311,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, u16 pf_queue_id; int ret = 0; - pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id); + pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); @@ -378,10 +385,10 @@ error_param: /** * i40e_alloc_vsi_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @type: type of VSI to allocate * - * alloc vf vsi context & resources + * alloc VF vsi context & resources **/ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) { @@ -394,18 +401,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) if (!vsi) { dev_err(&pf->pdev->dev, - "add vsi failed for vf %d, aq_err %d\n", + "add vsi failed for VF %d, aq_err %d\n", vf->vf_id, pf->hw.aq.asq_last_status); ret = -ENOENT; goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - vf->lan_vsi_index = vsi->idx; + vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; - dev_info(&pf->pdev->dev, - "VF %d assigned LAN VSI index %d, VSI id %d\n", - vf->vf_id, vsi->idx, vsi->id); /* If the port VLAN has been configured and then the * VF driver was removed then the VSI port VLAN * configuration was destroyed. Check if there is @@ -446,9 +450,9 @@ error_alloc_vsi_res: /** * i40e_enable_vf_mappings - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * enable vf mappings + * enable VF mappings **/ static void i40e_enable_vf_mappings(struct i40e_vf *vf) { @@ -469,8 +473,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); /* map PF queues to VF queues */ - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); + for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j); reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); total_queue_pairs++; @@ -478,13 +482,13 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /* map PF queues to VSI */ for (j = 0; j < 7; j++) { - if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { + if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { reg = 0x07FF07FF; /* unused */ } else { - u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, + u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j * 2); reg = qid; - qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, + qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, (j * 2) + 1); reg |= qid << 16; } @@ -496,9 +500,9 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /** * i40e_disable_vf_mappings - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * disable vf mappings + * disable VF mappings **/ static void i40e_disable_vf_mappings(struct i40e_vf *vf) { @@ -516,9 +520,9 @@ static void i40e_disable_vf_mappings(struct i40e_vf *vf) /** * i40e_free_vf_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * free vf resources + * free VF resources **/ static void i40e_free_vf_res(struct i40e_vf *vf) { @@ -528,9 +532,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf) int i, msix_vf; /* free vsi & disconnect it from the parent uplink */ - if (vf->lan_vsi_index) { - i40e_vsi_release(pf->vsi[vf->lan_vsi_index]); - vf->lan_vsi_index = 0; + if (vf->lan_vsi_idx) { + i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); + vf->lan_vsi_idx = 0; vf->lan_vsi_id = 0; } msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -571,9 +575,9 @@ static void i40e_free_vf_res(struct i40e_vf *vf) /** * i40e_alloc_vf_res - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * allocate vf resources + * allocate VF resources **/ static int i40e_alloc_vf_res(struct i40e_vf *vf) { @@ -585,15 +589,15 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); if (ret) goto error_alloc; - total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; + total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime - * vf req validation + * VF req validation */ vf->num_queue_pairs = total_queue_pairs; - /* vf is now completely initialized */ + /* VF is now completely initialized */ set_bit(I40E_VF_STAT_INIT, &vf->vf_states); error_alloc: @@ -607,7 +611,7 @@ error_alloc: #define VF_TRANS_PENDING_MASK 0x20 /** * i40e_quiesce_vf_pci - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * * Wait for VF PCI transactions to be cleared after reset. Returns -EIO * if the transactions never clear. @@ -634,10 +638,10 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) /** * i40e_reset_vf - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * reset the vf + * reset the VF **/ void i40e_reset_vf(struct i40e_vf *vf, bool flr) { @@ -657,7 +661,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * just need to clean up, so don't hit the VFRTRIG register. */ if (!flr) { - /* reset vf using VPGEN_VFRTRIG reg */ + /* reset VF using VPGEN_VFRTRIG reg */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); @@ -695,12 +699,12 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); /* On initial reset, we won't have any queues */ - if (vf->lan_vsi_index == 0) + if (vf->lan_vsi_idx == 0) goto complete_reset; - i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_index], false); + i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); complete_reset: - /* reallocate vf resources to reset the VSI state */ + /* reallocate VF resources to reset the VSI state */ i40e_free_vf_res(vf); i40e_alloc_vf_res(vf); i40e_enable_vf_mappings(vf); @@ -713,78 +717,10 @@ complete_reset: } /** - * i40e_enable_pf_switch_lb - * @pf: pointer to the pf structure - * - * enable switch loop back or die - no point in a return value - **/ -void i40e_enable_pf_switch_lb(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - struct i40e_vsi_context ctxt; - int aq_ret; - - ctxt.seid = pf->main_vsi_seid; - ctxt.pf_num = pf->hw.pf_id; - ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s couldn't get pf vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); - return; - } - ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); - } -} - -/** - * i40e_disable_pf_switch_lb - * @pf: pointer to the pf structure - * - * disable switch loop back or die - no point in a return value - **/ -static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) -{ - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; - struct i40e_vsi_context ctxt; - int aq_ret; - - ctxt.seid = pf->main_vsi_seid; - ctxt.pf_num = pf->hw.pf_id; - ctxt.vf_num = 0; - aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s couldn't get pf vsi config, err %d, aq_err %d\n", - __func__, aq_ret, pf->hw.aq.asq_last_status); - return; - } - ctxt.flags = I40E_AQ_VSI_TYPE_PF; - ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); - ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); - - aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); - if (aq_ret) { - dev_info(&pf->pdev->dev, - "%s: update vsi switch failed, aq_err=%d\n", - __func__, vsi->back->hw.aq.asq_last_status); - } -} - -/** * i40e_free_vfs - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * - * free vf resources + * free VF resources **/ void i40e_free_vfs(struct i40e_pf *pf) { @@ -803,10 +739,12 @@ void i40e_free_vfs(struct i40e_pf *pf) */ if (!pci_vfs_assigned(pf->pdev)) pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); msleep(20); /* let any messages in transit get finished up */ - /* free up vf resources */ + /* free up VF resources */ tmp = pf->num_alloc_vfs; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { @@ -832,10 +770,6 @@ void i40e_free_vfs(struct i40e_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); } - i40e_disable_pf_switch_lb(pf); - } else { - dev_warn(&pf->pdev->dev, - "unable to disable SR-IOV because VFs are assigned.\n"); } clear_bit(__I40E_VF_DISABLE, &pf->state); } @@ -843,10 +777,10 @@ void i40e_free_vfs(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV /** * i40e_alloc_vfs - * @pf: pointer to the pf structure - * @num_alloc_vfs: number of vfs to allocate + * @pf: pointer to the PF structure + * @num_alloc_vfs: number of VFs to allocate * - * allocate vf resources + * allocate VF resources **/ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) { @@ -883,15 +817,14 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) /* assign default capabilities */ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps); vfs[i].spoofchk = true; - /* vf resources get allocated during reset */ + /* VF resources get allocated during reset */ i40e_reset_vf(&vfs[i], false); - /* enable vf vplan_qtable mappings */ + /* enable VF vplan_qtable mappings */ i40e_enable_vf_mappings(&vfs[i]); } pf->num_alloc_vfs = num_alloc_vfs; - i40e_enable_pf_switch_lb(pf); err_alloc: if (ret) i40e_free_vfs(pf); @@ -905,7 +838,7 @@ err_iov: /** * i40e_pci_sriov_enable * @pdev: pointer to a pci_dev structure - * @num_vfs: number of vfs to allocate + * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs **/ @@ -945,7 +878,7 @@ err_out: /** * i40e_pci_sriov_configure * @pdev: pointer to a pci_dev structure - * @num_vfs: number of vfs to allocate + * @num_vfs: number of VFs to allocate * * Enable or change the number of VFs. Called when the user updates the number * of VFs in sysfs. @@ -970,13 +903,13 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) /** * i40e_vc_send_msg_to_vf - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length * - * send msg to vf + * send msg to VF **/ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) @@ -1025,11 +958,11 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /** * i40e_vc_send_resp_to_vf - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @opcode: operation code * @retval: return value * - * send resp msg to vf + * send resp msg to VF **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, enum i40e_virtchnl_ops opcode, @@ -1040,9 +973,9 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, /** * i40e_vc_get_version_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * - * called from the vf to request the API version used by the PF + * called from the VF to request the API version used by the PF **/ static int i40e_vc_get_version_msg(struct i40e_vf *vf) { @@ -1058,11 +991,11 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf) /** * i40e_vc_get_vf_resources_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to request its resources + * called from the VF to request its resources **/ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) { @@ -1090,18 +1023,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) } vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; - if (vf->lan_vsi_index) { - vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; + if (vf->lan_vsi_idx) { + vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; vfres->vsi_res[i].num_queue_pairs = - pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; + pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; memcpy(vfres->vsi_res[i].default_mac_addr, vf->default_lan_addr.addr, ETH_ALEN); i++; @@ -1109,7 +1042,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); err: - /* send the response back to the vf */ + /* send the response back to the VF */ ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, (u8 *)vfres, len); @@ -1119,13 +1052,13 @@ err: /** * i40e_vc_reset_vf_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to reset itself, - * unlike other virtchnl messages, pf driver - * doesn't send the response back to the vf + * called from the VF to reset itself, + * unlike other virtchnl messages, PF driver + * doesn't send the response back to the VF **/ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) { @@ -1135,12 +1068,12 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) /** * i40e_vc_config_promiscuous_mode_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the promiscuous mode of - * vf vsis + * called from the VF to configure the promiscuous mode of + * VF vsis **/ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1153,21 +1086,21 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, bool allmulti = false; i40e_status aq_ret; + vsi = i40e_find_vsi_from_id(pf, info->vsi_id); if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) { + (vsi->type != I40E_VSI_FCOE)) { aq_ret = I40E_ERR_PARAM; goto error_param; } - vsi = pf->vsi[info->vsi_id]; if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) allmulti = true; aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, NULL); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, aq_ret); @@ -1175,11 +1108,11 @@ error_param: /** * i40e_vc_config_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the rx/tx + * called from the VF to configure the rx/tx * queues **/ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1221,22 +1154,22 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - /* set vsi num_queue_pairs in use to num configured by vf */ - pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; + /* set vsi num_queue_pairs in use to num configured by VF */ + pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret); } /** * i40e_vc_config_irq_map_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to configure the irq to + * called from the VF to configure the irq to * queue map **/ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1288,18 +1221,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_config_irq_link_list(vf, vsi_id, map); } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret); } /** * i40e_vc_enable_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to enable all or specific queue(s) + * called from the VF to enable all or specific queue(s) **/ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { @@ -1323,21 +1256,22 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_PARAM; goto error_param; } - if (i40e_vsi_control_rings(pf->vsi[vsi_id], true)) + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) aq_ret = I40E_ERR_TIMEOUT; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, aq_ret); } /** * i40e_vc_disable_queues_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to disable all or specific + * called from the VF to disable all or specific * queue(s) **/ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) @@ -1345,7 +1279,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_virtchnl_queue_select *vqs = (struct i40e_virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; - u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { @@ -1362,22 +1295,23 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) aq_ret = I40E_ERR_PARAM; goto error_param; } - if (i40e_vsi_control_rings(pf->vsi[vsi_id], false)) + + if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) aq_ret = I40E_ERR_TIMEOUT; error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, aq_ret); } /** * i40e_vc_get_stats_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * - * called from the vf to get vsi stats + * called from the VF to get vsi stats **/ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { @@ -1400,7 +1334,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - vsi = pf->vsi[vqs->vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1409,14 +1343,14 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) stats = vsi->eth_stats; error_param: - /* send the response back to the vf */ + /* send the response back to the VF */ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, (u8 *)&stats, sizeof(stats)); } /** * i40e_check_vf_permission - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @macaddr: pointer to the MAC Address being checked * * Check if the VF has permission to add or delete unicast MAC address @@ -1450,7 +1384,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) /** * i40e_vc_add_mac_addr_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1478,7 +1412,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) if (ret) goto error_param; } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; /* add new addresses to the list */ for (i = 0; i < al->num_elements; i++) { @@ -1507,14 +1441,14 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, ret); } /** * i40e_vc_del_mac_addr_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1546,7 +1480,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; /* delete addresses from the list */ for (i = 0; i < al->num_elements; i++) @@ -1558,14 +1492,14 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, ret); } /** * i40e_vc_add_vlan_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1596,7 +1530,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1613,13 +1547,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); } /** * i40e_vc_remove_vlan_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1649,7 +1583,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } } - vsi = pf->vsi[vsi_id]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (vsi->info.pvid) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1664,13 +1598,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) } error_param: - /* send the response to the vf */ + /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); } /** * i40e_vc_validate_vf_msg - * @vf: pointer to the vf info + * @vf: pointer to the VF info * @msg: pointer to the msg buffer * @msglen: msg length * @msghndl: msg handle @@ -1776,14 +1710,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, /** * i40e_vc_process_vf_msg - * @pf: pointer to the pf structure - * @vf_id: source vf id + * @pf: pointer to the PF structure + * @vf_id: source VF id * @msg: pointer to the msg buffer * @msglen: msg length * @msghndl: msg handle * * called from the common aeq/arq handler to - * process request from vf + * process request from VF **/ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) @@ -1801,7 +1735,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); if (ret) { - dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n", + dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); return ret; } @@ -1849,7 +1783,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, break; case I40E_VIRTCHNL_OP_UNKNOWN: default: - dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n", + dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); ret = i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_NOT_IMPLEMENTED); @@ -1861,10 +1795,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, /** * i40e_vc_process_vflr_event - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * called from the vlfr irq handler to - * free up vf resources and state variables + * free up VF resources and state variables **/ int i40e_vc_process_vflr_event(struct i40e_pf *pf) { @@ -1885,7 +1819,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32; bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; - /* read GLGEN_VFLRSTAT register to find out the flr vfs */ + /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); if (reg & (1 << bit_idx)) { @@ -1902,7 +1836,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /** * i40e_vc_vf_broadcast - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * @opcode: operation code * @retval: return value * @msg: pointer to the msg buffer @@ -1921,7 +1855,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* Not all vfs are enabled so skip the ones that are not */ + /* Not all VFs are enabled so skip the ones that are not */ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) continue; @@ -1936,7 +1870,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, /** * i40e_vc_notify_link_state - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * send a link status message to all VFs on a given PF **/ @@ -1969,7 +1903,7 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf) /** * i40e_vc_notify_reset - * @pf: pointer to the pf structure + * @pf: pointer to the PF structure * * indicate a pending reset to all VFs on a given PF **/ @@ -1985,7 +1919,7 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) /** * i40e_vc_notify_vf_reset - * @vf: pointer to the vf structure + * @vf: pointer to the VF structure * * indicate a pending reset to the given VF **/ @@ -2015,10 +1949,10 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) /** * i40e_ndo_set_vf_mac * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @mac: mac address * - * program vf mac address + * program VF mac address **/ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { @@ -2038,7 +1972,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } vf = &(pf->vf[vf_id]); - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); @@ -2083,11 +2017,11 @@ error_param: /** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @vlan_id: mac address * @qos: priority setting * - * program vf vlan id and/or qos + * program VF vlan id and/or qos **/ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos) @@ -2112,7 +2046,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, } vf = &(pf->vf[vf_id]); - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); ret = -EINVAL; @@ -2196,10 +2130,10 @@ error_pvid: /** * i40e_ndo_set_vf_bw * @netdev: network interface device structure - * @vf_id: vf identifier - * @tx_rate: tx rate + * @vf_id: VF identifier + * @tx_rate: Tx rate * - * configure vf tx rate + * configure VF Tx rate **/ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) @@ -2219,13 +2153,13 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } if (min_tx_rate) { - dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n", + dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", min_tx_rate, vf_id); return -EINVAL; } vf = &(pf->vf[vf_id]); - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); ret = -EINVAL; @@ -2247,7 +2181,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, } if (max_tx_rate > speed) { - dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.", + dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", max_tx_rate, vf->vf_id); ret = -EINVAL; goto error; @@ -2276,10 +2210,10 @@ error: /** * i40e_ndo_get_vf_config * @netdev: network interface device structure - * @vf_id: vf identifier - * @ivi: vf configuration structure + * @vf_id: VF identifier + * @ivi: VF configuration structure * - * return vf configuration + * return VF configuration **/ int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) @@ -2299,7 +2233,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, vf = &(pf->vf[vf_id]); /* first vsi is always the LAN vsi */ - vsi = pf->vsi[vf->lan_vsi_index]; + vsi = pf->vsi[vf->lan_vsi_idx]; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); ret = -EINVAL; @@ -2331,7 +2265,7 @@ error_param: /** * i40e_ndo_set_vf_link_state * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @link: required link state * * Set the link state of a specified VF, regardless of physical link state @@ -2394,7 +2328,7 @@ error_out: /** * i40e_ndo_set_vf_spoofchk * @netdev: network interface device structure - * @vf_id: vf identifier + * @vf_id: VF identifier * @enable: flag to enable or disable feature * * Enable or disable VF spoof checking @@ -2423,11 +2357,12 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) vf->spoofchk = enable; memset(&ctxt, 0, sizeof(ctxt)); - ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid; + ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; ctxt.pf_num = pf->hw.pf_id; ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); if (enable) - ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK | + I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK); ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 9452f5247cff..09043c1aae54 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -71,12 +71,12 @@ enum i40e_vf_capabilities { struct i40e_vf { struct i40e_pf *pf; - /* vf id in the pf space */ + /* VF id in the PF space */ u16 vf_id; - /* all vf vsis connect to the same parent */ + /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; - /* vf Port Extender (PE) stag if used */ + /* VF Port Extender (PE) stag if used */ u16 stag; struct i40e_virtchnl_ether_addr default_lan_addr; @@ -88,10 +88,10 @@ struct i40e_vf { * When assigned, these will be non-zero, because VSI 0 is always * the main LAN VSI for the PF. */ - u8 lan_vsi_index; /* index into PF struct */ + u8 lan_vsi_idx; /* index into PF struct */ u8 lan_vsi_id; /* ID as used by firmware */ - u8 num_queue_pairs; /* num of qps assigned to vf vsis */ + u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */ u64 num_valid_msgs; /* num of valid msgs detected */ @@ -100,7 +100,7 @@ struct i40e_vf { unsigned long vf_states; /* vf's runtime states */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; - bool link_up; /* only valid if vf link is forced */ + bool link_up; /* only valid if VF link is forced */ bool spoofchk; }; @@ -113,7 +113,7 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf); void i40e_reset_vf(struct i40e_vf *vf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); -/* vf configuration related iplink handlers */ +/* VF configuration related iplink handlers */ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); @@ -126,6 +126,5 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); -void i40e_enable_pf_switch_lb(struct i40e_pf *pf); #endif /* _I40E_VIRTCHNL_PF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index 60f04e96a80e..ef43d68f67b3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -93,6 +93,7 @@ struct i40e_adminq_info { u16 asq_buf_size; /* send queue buffer size */ u16 fw_maj_ver; /* firmware major version */ u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ bool nvm_release_on_done; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 28c40c57d4f5..39fcb1dc4ea6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -51,6 +51,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: case I40E_DEV_ID_10G_BASE_T: + case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -85,46 +86,53 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; u16 len = le16_to_cpu(aq_desc->datalen); - u8 *aq_buffer = (u8 *)buffer; - u32 data[4]; - u32 i = 0; + u8 *buf = (u8 *)buffer; + u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - aq_desc->opcode, aq_desc->flags, aq_desc->datalen, - aq_desc->retval); + le16_to_cpu(aq_desc->opcode), + le16_to_cpu(aq_desc->flags), + le16_to_cpu(aq_desc->datalen), + le16_to_cpu(aq_desc->retval)); i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", - aq_desc->cookie_high, aq_desc->cookie_low); + le32_to_cpu(aq_desc->cookie_high), + le32_to_cpu(aq_desc->cookie_low)); i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", - aq_desc->params.internal.param0, - aq_desc->params.internal.param1); + le32_to_cpu(aq_desc->params.internal.param0), + le32_to_cpu(aq_desc->params.internal.param1)); i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", - aq_desc->params.external.addr_high, - aq_desc->params.external.addr_low); + le32_to_cpu(aq_desc->params.external.addr_high), + le32_to_cpu(aq_desc->params.external.addr_low)); if ((buffer != NULL) && (aq_desc->datalen != 0)) { - memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); if (buf_len < len) len = buf_len; - for (i = 0; i < len; i++) { - data[((i % 16) / 4)] |= - ((u32)aq_buffer[i]) << (8 * (i % 4)); - if ((i % 16) == 15) { - i40e_debug(hw, mask, - "\t0x%04X %08X %08X %08X %08X\n", - i - 15, data[0], data[1], data[2], - data[3]); - memset(data, 0, sizeof(data)); - } + /* write the full 16-byte chunks */ + for (i = 0; i < (len - 16); i += 16) + i40e_debug(hw, mask, + "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", + i, buf[i], buf[i + 1], buf[i + 2], + buf[i + 3], buf[i + 4], buf[i + 5], + buf[i + 6], buf[i + 7], buf[i + 8], + buf[i + 9], buf[i + 10], buf[i + 11], + buf[i + 12], buf[i + 13], buf[i + 14], + buf[i + 15]); + /* write whatever's left over without overrunning the buffer */ + if (i < len) { + char d_buf[80]; + int j = 0; + + memset(d_buf, 0, sizeof(d_buf)); + j += sprintf(d_buf, "\t0x%04X ", i); + while (i < len) + j += sprintf(&d_buf[j], " %02X", buf[i++]); + i40e_debug(hw, mask, "%s\n", d_buf); } - if ((i % 16) != 0) - i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n", - i - (i % 16), data[0], data[1], data[2], - data[3]); } } @@ -535,7 +543,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; - /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 9173834825ac..58e37a44b80a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -59,8 +59,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, - bool unloading); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); i40e_status i40e_set_mac_type(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c1f6a59bfea0..3cc737629bf7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -310,6 +310,10 @@ #define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 #define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) +#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 +#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 +#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) #define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 #define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) @@ -421,6 +425,8 @@ #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 +#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) #define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) @@ -484,7 +490,9 @@ #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 +#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 @@ -548,9 +556,6 @@ #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) -#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */ -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0 -#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) @@ -1066,7 +1071,7 @@ #define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 #define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */ +#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ @@ -1171,7 +1176,7 @@ #define I40E_VFINT_ITRN_MAX_INDEX 2 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) @@ -1803,9 +1808,6 @@ #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) -#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */ -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0 -#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 #define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) @@ -1902,6 +1904,11 @@ #define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) +#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ +#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 +#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) +#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 +#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) @@ -2374,20 +2381,20 @@ #define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 +#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 +#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 +#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0 -#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT) +#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 +#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_CRCERRS_MAX_INDEX 3 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 @@ -2620,10 +2627,6 @@ #define I40E_GLPRT_TDOLD_MAX_INDEX 3 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) -#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDPC_MAX_INDEX 3 -#define I40E_GLPRT_TDPC_TDPC_SHIFT 0 -#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_GLPRT_UPRCH_MAX_INDEX 3 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 @@ -2990,9 +2993,6 @@ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) -#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */ -#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0 -#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3258,7 +3258,7 @@ #define I40E_VFINT_ITRN1_MAX_INDEX 2 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 #define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */ +#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) #define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 708891571dae..b077e02a0cc7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -25,6 +25,7 @@ ******************************************************************************/ #include <linux/prefetch.h> +#include <net/busy_poll.h> #include "i40evf.h" #include "i40e_prototype.h" @@ -288,6 +289,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_desc = I40E_TX_DESC(tx_ring, 0); } + prefetch(tx_desc); + /* update budget accounting */ budget--; } while (likely(budget)); @@ -368,6 +371,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; /* allow 00 to be written to the index */ @@ -529,6 +533,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (ring_is_ps_enabled(rx_ring)) { + int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; + + rx_bi = &rx_ring->rx_bi[0]; + if (rx_bi->hdr_buf) { + dma_free_coherent(dev, + bufsz, + rx_bi->hdr_buf, + rx_bi->dma); + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = 0; + rx_bi->hdr_buf = NULL; + } + } + } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { rx_bi = &rx_ring->rx_bi[i]; @@ -587,6 +607,37 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** + * i40evf_alloc_rx_headers - allocate rx header buffers + * @rx_ring: ring to alloc buffers + * + * Allocate rx header buffers for the entire ring. As these are static, + * this is only called when setting up a new ring. + **/ +void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + struct i40e_rx_buffer *rx_bi; + dma_addr_t dma; + void *buffer; + int buf_size; + int i; + + if (rx_ring->rx_bi[0].hdr_buf) + return; + /* Make sure the buffers don't cross cache line boundaries. */ + buf_size = ALIGN(rx_ring->rx_hdr_len, 256); + buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, + &dma, GFP_KERNEL); + if (!buffer) + return; + for (i = 0; i < rx_ring->count; i++) { + rx_bi = &rx_ring->rx_bi[i]; + rx_bi->dma = dma + (i * buf_size); + rx_bi->hdr_buf = buffer + (i * buf_size); + } +} + +/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -646,11 +697,76 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers - Replace used receive buffers; packet split + * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +{ + u16 i = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return; + + while (cleaned_count--) { + rx_desc = I40E_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_bi[i]; + + if (bi->skb) /* desc is in use */ + goto no_buffers; + if (!bi->page) { + bi->page = alloc_page(GFP_ATOMIC); + if (!bi->page) { + rx_ring->rx_stats.alloc_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(rx_ring->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(rx_ring->dev, + bi->page_dma)) { + rx_ring->rx_stats.alloc_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + dma_sync_single_range_for_device(rx_ring->dev, + bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); + i++; + if (i == rx_ring->count) + i = 0; + } + +no_buffers: + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); +} + +/** + * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ -void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -690,40 +806,8 @@ void i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) } } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; i++; if (i == rx_ring->count) i = 0; @@ -777,10 +861,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct iphdr *iph; __sum16 csum; - ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); skb->ip_summed = CHECKSUM_NONE; @@ -831,9 +915,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel && - (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && - !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { + if (ipv4_tunnel) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -843,15 +925,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, skb->protocol == htons(ETH_P_8021AD)) ? VLAN_HLEN : 0; - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + (skb->len - + skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); + + if (udp_hdr(skb)->check != csum) + goto checksum_fail; - if (udp_hdr(skb)->check != csum) - goto checksum_fail; + } /* else its GRE and so no outer UDP header */ } skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -906,13 +992,13 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) } /** - * i40e_clean_rx_irq - Reclaim resources after receive completes + * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * @rx_ring: rx ring to clean * @budget: how many cleans we're allowed * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -925,20 +1011,49 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u8 rx_ptype; u64 qword; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - - while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { - union i40e_rx_desc *next_rxd; + do { struct i40e_rx_buffer *rx_bi; struct sk_buff *skb; u16 vlan_tag; + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + cleaned_count = 0; + } + + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; - prefetch(skb->data); + if (likely(!skb)) { + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->dma, + 0, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); + } rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> @@ -953,40 +1068,30 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + prefetch(rx_bi->page); rx_bi->skb = NULL; - - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * STATUS_DD bit is set - */ - rmb(); - - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - if (rx_bi->dma) { - u16 len; - + cleaned_count++; + if (rx_hbo || rx_sph) { + int len; if (rx_hbo) len = I40E_RX_HDR_SIZE; - else if (rx_sph) - len = rx_header_len; - else if (rx_packet_len) - len = rx_packet_len; /* 1buf/no split found */ else - len = rx_header_len; /* split always mode */ - - skb_put(skb, len); - dma_unmap_single(rx_ring->dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; + len = rx_header_len; + memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); + } else if (skb->len == 0) { + int len; + + len = (rx_packet_len > skb_headlen(skb) ? + skb_headlen(skb) : rx_packet_len); + memcpy(__skb_put(skb, len), + rx_bi->page + rx_bi->page_offset, + len); + rx_bi->page_offset += len; + rx_packet_len -= len; } /* Get the rest of the data if this was a header split */ - if (ring_is_ps_enabled(rx_ring) && rx_packet_len) { - + if (rx_packet_len) { skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_bi->page, rx_bi->page_offset, @@ -1008,22 +1113,16 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) DMA_FROM_DEVICE); rx_bi->page_dma = 0; } - I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd); + I40E_RX_INCREMENT(rx_ring, i); if (unlikely( !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { struct i40e_rx_buffer *next_buffer; next_buffer = &rx_ring->rx_bi[i]; - - if (ring_is_ps_enabled(rx_ring)) { - rx_bi->skb = next_buffer->skb; - rx_bi->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } + next_buffer->skb = skb; rx_ring->rx_stats.non_eop_descs++; - goto next_desc; + continue; } /* ERR_MASK will only have valid bits if EOP set */ @@ -1032,7 +1131,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* TODO: shouldn't we increment a counter indicating the * drop? */ - goto next_desc; + continue; } skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), @@ -1048,30 +1147,134 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; +#ifdef I40E_FCOE + if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + dev_kfree_skb_any(skb); + continue; + } +#endif + skb_mark_napi_id(skb, &rx_ring->q_vector->napi); i40e_receive_skb(rx_ring, skb, vlan_tag); rx_ring->netdev->last_rx = jiffies; - budget--; -next_desc: rx_desc->wb.qword1.status_error_len = 0; - if (!budget) - break; - cleaned_count++; + } while (likely(total_rx_packets < budget)); + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + + return total_rx_packets; +} + +/** + * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer + * @rx_ring: rx ring to clean + * @budget: how many cleans we're allowed + * + * Returns number of packets cleaned + **/ +static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct i40e_vsi *vsi = rx_ring->vsi; + union i40e_rx_desc *rx_desc; + u32 rx_error, rx_status; + u16 rx_packet_len; + u8 rx_ptype; + u64 qword; + u16 i; + + do { + struct i40e_rx_buffer *rx_bi; + struct sk_buff *skb; + u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); + i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); cleaned_count = 0; } - /* use prefetched values */ - rx_desc = next_rxd; + i = rx_ring->next_to_clean; + rx_desc = I40E_RX_DESC(rx_ring, i); qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; - } + I40E_RXD_QW1_STATUS_SHIFT; + + if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * DD bit is set. + */ + dma_rmb(); + + rx_bi = &rx_ring->rx_bi[i]; + skb = rx_bi->skb; + prefetch(skb->data); + + rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT); + + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + rx_bi->skb = NULL; + cleaned_count++; + + /* Get the header and possibly the whole packet + * If this is an skb from previous receive dma will be 0 + */ + skb_put(skb, rx_packet_len); + dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + rx_bi->dma = 0; + + I40E_RX_INCREMENT(rx_ring, i); + + if (unlikely( + !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) { + rx_ring->rx_stats.non_eop_descs++; + continue; + } + + /* ERR_MASK will only have valid bits if EOP set */ + if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + dev_kfree_skb_any(skb); + /* TODO: shouldn't we increment a counter indicating the + * drop? + */ + continue; + } + + skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), + i40e_ptype_to_hash(rx_ptype)); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + + i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + + vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) + ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) + : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + + rx_ring->netdev->last_rx = jiffies; + rx_desc->wb.qword1.status_error_len = 0; + } while (likely(total_rx_packets < budget)); - rx_ring->next_to_clean = i; u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1079,10 +1282,7 @@ next_desc: rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - i40evf_alloc_rx_buffers(rx_ring, cleaned_count); - - return budget > 0; + return total_rx_packets; } /** @@ -1103,6 +1303,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) bool clean_complete = true; bool arm_wb = false; int budget_per_ring; + int cleaned; if (test_bit(__I40E_DOWN, &vsi->state)) { napi_complete(napi); @@ -1122,8 +1323,14 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) */ budget_per_ring = max(budget/q_vector->num_ringpairs, 1); - i40e_for_each_ring(ring, q_vector->rx) - clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); + i40e_for_each_ring(ring, q_vector->rx) { + if (ring_is_ps_enabled(ring)) + cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); + else + cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + /* if we didn't clean as many as budgeted, we must be done */ + clean_complete &= (budget_per_ring != cleaned); + } /* If work not completed, return budget and polling will return */ if (!clean_complete) { @@ -1163,6 +1370,19 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, __be16 protocol = skb->protocol; u32 tx_flags = 0; + if (protocol == htons(ETH_P_8021Q) && + !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off by the user the + * stack sets the protocol to 8021q so that the driver + * can take any steps required to support the SW only + * VLAN handling. In our case the driver doesn't need + * to take any further steps so just set the protocol + * to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + goto out; + } + /* if we have a HW VLAN tag being added, default to the HW one */ if (skb_vlan_tag_present(skb)) { tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT; @@ -1179,6 +1399,7 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, tx_flags |= I40E_TX_FLAGS_SW_VLAN; } +out: *flags = tx_flags; return 0; } @@ -1262,8 +1483,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + u32 l4_tunnel = 0; if (skb->encapsulation) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_UDP: + l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + break; + default: + return; + } network_hdr_len = skb_inner_network_header_len(skb); this_ip_hdr = inner_ip_hdr(skb); this_ipv6_hdr = inner_ipv6_hdr(skb); @@ -1286,8 +1515,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - I40E_TXD_CTX_UDP_TUNNELING | + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | + l4_tunnel | ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c950a038237c..1e49bb1fbac1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -96,6 +96,14 @@ enum i40e_dyn_idx_t { /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_INCREMENT(r, i) \ + do { \ + (i)++; \ + if ((i) == (r)->count) \ + i = 0; \ + r->next_to_clean = i; \ + } while (0) + #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ (i)++; \ @@ -151,6 +159,7 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; + void *hdr_buf; dma_addr_t dma; struct page *page; dma_addr_t page_dma; @@ -223,8 +232,8 @@ struct i40e_ring { u16 rx_buf_len; u8 dtype; #define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 1 -#define I40E_RX_DTYPE_HEADER_SPLIT 2 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 @@ -278,7 +287,9 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 3d0fdaab5cc8..9c79cb6abb2b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -44,7 +44,8 @@ #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 #define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_VF 0x154C +#define I40E_DEV_ID_20G_KR2 0x1587 +#define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ @@ -175,12 +176,12 @@ struct i40e_link_status { u8 an_info; u8 ext_info; u8 loopback; - bool an_enabled; /* is Link Status Event notification to SW enabled */ bool lse_enable; u16 max_frame_size; bool crc_enable; u8 pacing; + u8 requested_speeds; }; struct i40e_phy_info { @@ -1116,7 +1117,7 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 #define I40E_SR_EMP_MODULE_PTR 0x0F -#define I40E_SR_NVM_IMAGE_VERSION 0x18 +#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index e0c8208138f4..59f62f0e65dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -59,31 +59,29 @@ * of the virtchnl_msg structure. */ enum i40e_virtchnl_ops { -/* VF sends req. to pf for the following - * ops. +/* The PF sends status change events to VFs using + * the I40E_VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. */ I40E_VIRTCHNL_OP_UNKNOWN = 0, I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, - I40E_VIRTCHNL_OP_ENABLE_QUEUES, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, - I40E_VIRTCHNL_OP_ADD_VLAN, - I40E_VIRTCHNL_OP_DEL_VLAN, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - I40E_VIRTCHNL_OP_GET_STATS, - I40E_VIRTCHNL_OP_FCOE, - I40E_VIRTCHNL_OP_CONFIG_RSS, -/* PF sends status change events to vfs using - * the following op. - */ - I40E_VIRTCHNL_OP_EVENT, + I40E_VIRTCHNL_OP_RESET_VF = 2, + I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, + I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, + I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, + I40E_VIRTCHNL_OP_ADD_VLAN = 12, + I40E_VIRTCHNL_OP_DEL_VLAN = 13, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + I40E_VIRTCHNL_OP_GET_STATS = 15, + I40E_VIRTCHNL_OP_FCOE = 16, + I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_CONFIG_RSS = 18, }; /* Virtual channel message descriptor. This overlays the admin queue diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 981224743c73..34c8565031f6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -272,6 +272,8 @@ void i40evf_update_stats(struct i40evf_adapter *adapter); void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter); int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter); void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask); +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); void i40e_napi_add_all(struct i40evf_adapter *adapter); void i40e_napi_del_all(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69b97bac182c..f4e77665bc54 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +29,6 @@ #include <linux/uaccess.h> - struct i40evf_stats { char stat_string[ETH_GSTRING_LEN]; int stat_offset; @@ -180,7 +179,7 @@ static u32 i40evf_get_msglevel(struct net_device *netdev) } /** - * i40evf_get_msglevel - Set debug message level + * i40evf_set_msglevel - Set debug message level * @netdev: network interface device structure * @data: message level * @@ -191,6 +190,8 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data) { struct i40evf_adapter *adapter = netdev_priv(netdev); + if (I40E_DEBUG_USER & data) + adapter->hw.debug_mask = data; adapter->msg_enable = data; } @@ -208,7 +209,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, i40evf_driver_name, 32); strlcpy(drvinfo->version, i40evf_driver_version, 32); - + strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); } @@ -640,12 +641,14 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); - indir[j++] = hlut_val & 0xff; - indir[j++] = (hlut_val >> 8) & 0xff; - indir[j++] = (hlut_val >> 16) & 0xff; - indir[j++] = (hlut_val >> 24) & 0xff; + if (indir) { + for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); + indir[j++] = hlut_val & 0xff; + indir[j++] = (hlut_val >> 8) & 0xff; + indir[j++] = (hlut_val >> 16) & 0xff; + indir[j++] = (hlut_val >> 24) & 0xff; + } } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8d8c201c63c1..6d5f3b21c68a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -28,15 +28,13 @@ #include "i40e_prototype.h" static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter); static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter); -static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter); -static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter); static int i40evf_close(struct net_device *netdev); char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "1.2.0" +#define DRV_VERSION "1.2.25" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; @@ -244,6 +242,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << (i - 1))) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); } } @@ -263,6 +262,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & 1) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); } @@ -270,6 +270,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) if (mask & (1 << i)) { dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); } @@ -524,7 +525,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter) int err; snprintf(adapter->misc_vector_name, - sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx"); + sizeof(adapter->misc_vector_name) - 1, "i40evf-%s:mbx", + dev_name(&adapter->pdev->dev)); err = request_irq(adapter->msix_entries[0].vector, &i40evf_msix_aq, 0, adapter->misc_vector_name, netdev); @@ -662,13 +664,21 @@ i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) static struct i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) { - struct i40evf_vlan_filter *f; + struct i40evf_vlan_filter *f = NULL; + int count = 50; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + goto out; + } f = i40evf_find_vlan(adapter, vlan); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); if (!f) - return NULL; + goto clearout; f->vlan = vlan; @@ -678,6 +688,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; } +clearout: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +out: return f; } @@ -689,12 +702,21 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) { struct i40evf_vlan_filter *f; + int count = 50; + + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) { + udelay(1); + if (--count == 0) + return; + } f = i40evf_find_vlan(adapter, vlan); if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; } + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } /** @@ -761,13 +783,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; + int count = 50; if (!macaddr) return NULL; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) + return NULL; + } f = i40evf_find_filter(adapter, macaddr); if (!f) { @@ -828,6 +854,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct i40evf_mac_filter *f, *ftmp; struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; + int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -838,8 +865,14 @@ static void i40evf_set_rx_mode(struct net_device *netdev) } while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + &adapter->crit_section)) { udelay(1); + if (--count == 0) { + dev_err(&adapter->pdev->dev, + "Failed to get lock in %s\n", __func__); + return; + } + } /* remove filter if not in netdev list */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { bool found = false; @@ -920,7 +953,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = adapter->rx_rings[i]; - i40evf_alloc_rx_buffers(ring, ring->count); + i40evf_alloc_rx_buffers_1buf(ring, ring->count); ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -958,6 +991,9 @@ void i40evf_down(struct i40evf_adapter *adapter) &adapter->crit_section)) usleep_range(500, 1000); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + i40evf_napi_disable_all(adapter); i40evf_irq_disable(adapter); /* remove all MAC filters */ @@ -981,15 +1017,7 @@ void i40evf_down(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; } - netif_tx_disable(netdev); - - netif_tx_stop_all_queues(netdev); - - i40evf_napi_disable_all(adapter); - - msleep(20); - netif_carrier_off(netdev); clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -1344,6 +1372,11 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { + i40evf_disable_queues(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_MAP_VECTORS) { i40evf_map_queues(adapter); goto watchdog_done; @@ -1369,11 +1402,6 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } - if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) { - i40evf_disable_queues(adapter); - goto watchdog_done; - } - if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_QUEUES) { i40evf_configure_queues(adapter); goto watchdog_done; @@ -1407,41 +1435,22 @@ restart_watchdog: } /** - * next_queue - increment to next available tx queue - * @adapter: board private structure - * @j: queue counter - * - * Helper function for RSS programming to increment through available - * queus. Returns the next queue value. - **/ -static int next_queue(struct i40evf_adapter *adapter, int j) -{ - j += 1; - - return j >= adapter->num_active_queues ? 0 : j; -} - -/** - * i40evf_configure_rss - Prepare for RSS if used + * i40evf_configure_rss - Prepare for RSS * @adapter: board private structure **/ static void i40evf_configure_rss(struct i40evf_adapter *adapter) { u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; struct i40e_hw *hw = &adapter->hw; + u32 cqueue = 0; u32 lut = 0; int i, j; u64 hena; - /* No RSS for single queue. */ - if (adapter->num_active_queues == 1) { - wr32(hw, I40E_VFQF_HENA(0), 0); - wr32(hw, I40E_VFQF_HENA(1), 0); - return; - } - /* Hash type is configured by the PF - we just supply the key */ netdev_rss_key_fill(rss_key, sizeof(rss_key)); + + /* Fill out hash function seed */ for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); @@ -1451,16 +1460,14 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ - j = adapter->num_active_queues; for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - j = next_queue(adapter, j); - lut = j; - j = next_queue(adapter, j); - lut |= j << 8; - j = next_queue(adapter, j); - lut |= j << 16; - j = next_queue(adapter, j); - lut |= j << 24; + lut = 0; + for (j = 0; j < 4; j++) { + if (cqueue == adapter->vsi_res->num_queue_pairs) + cqueue = 0; + lut |= ((cqueue) << (8 * j)); + cqueue++; + } wr32(hw, I40E_VFQF_HLUT(i), lut); } i40e_flush(hw); @@ -1481,9 +1488,11 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_adapter *adapter = container_of(work, struct i40evf_adapter, reset_task); + struct net_device *netdev = adapter->netdev; struct i40e_hw *hw = &adapter->hw; - int i = 0, err; + struct i40evf_mac_filter *f; uint32_t rstat_val; + int i = 0, err; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) @@ -1528,7 +1537,11 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_down(adapter); + i40evf_irq_disable(adapter); + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1560,22 +1573,38 @@ static void i40evf_reset_task(struct work_struct *work) continue_reset: adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - i40evf_down(adapter); + i40evf_irq_disable(adapter); + + if (netif_running(adapter->netdev)) { + i40evf_napi_disable_all(adapter); + netif_tx_disable(netdev); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + adapter->state = __I40EVF_RESETTING; /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) - dev_warn(&adapter->pdev->dev, - "%s: Failed to destroy the Admin Queue resources\n", - __func__); + dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n"); + adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; err = i40evf_init_adminq(hw); if (err) - dev_info(&adapter->pdev->dev, "%s: init_adminq failed: %d\n", - __func__, err); + dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", + err); - adapter->aq_pending = 0; - adapter->aq_required = 0; i40evf_map_queues(adapter); + + /* re-add all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) { + f->add = true; + } + /* re-add all VLAN filters */ + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + f->add = true; + } + adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1688,7 +1717,7 @@ out: * * Free all transmit software resources **/ -static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) +void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) { int i; @@ -1758,7 +1787,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) * * Free all receive software resources **/ -static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) +void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) { int i; @@ -1788,7 +1817,7 @@ static int i40evf_open(struct net_device *netdev) dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - if (adapter->state != __I40EVF_DOWN) + if (adapter->state != __I40EVF_DOWN || adapter->aq_required) return -EBUSY; /* allocate transmit descriptors */ @@ -1852,9 +1881,6 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN; i40evf_free_traffic_irqs(adapter); - i40evf_free_all_tx_resources(adapter); - i40evf_free_all_rx_resources(adapter); - return 0; } @@ -1977,7 +2003,7 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) * * This task completes the work that was begun in probe. Due to the nature * of VF-PF communications, we may need to wait tens of milliseconds to get - * reponses back from the PF. Rather than busy-wait in probe and bog down the + * responses back from the PF. Rather than busy-wait in probe and bog down the * whole system, we'll do it in a task so we can sleep. * This task only runs during driver init. Once we've established * communications with the PF driver and set up our netdev, the watchdog @@ -2368,7 +2394,7 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * i40evf_resume - Power managment resume routine + * i40evf_resume - Power management resume routine * @pdev: PCI device information struct * * Called when the system (VM) is resumed from sleep/suspend. @@ -2468,6 +2494,8 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); kfree(adapter->vf_res); /* If we got removed before an up/down sequence, we've got a filter diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 3f0c85ecbca6..4240a496dc50 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -761,6 +761,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, break; case I40E_VIRTCHNL_OP_DISABLE_QUEUES: adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES); + i40evf_free_all_tx_resources(adapter); + i40evf_free_all_rx_resources(adapter); break; case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f366b3b96d03..8457d0306e3a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1776,6 +1776,7 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); /* disable transmits in the hardware */ @@ -1797,12 +1798,9 @@ void igb_down(struct igb_adapter *adapter) } } - del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ spin_lock(&adapter->stats64_lock); igb_update_stats(adapter, &adapter->stats64); @@ -2095,6 +2093,7 @@ static const struct net_device_ops igb_netdev_ops = { #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, + .ndo_features_check = passthru_features_check, }; /** diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index d20fc8ed11f1..e3b9b63ad010 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -30,7 +30,7 @@ * * Neither the 82576 nor the 82580 offer registers wide enough to hold * nanoseconds time values for very long. For the 82580, SYSTIM always - * counts nanoseconds, but the upper 24 bits are not availible. The + * counts nanoseconds, but the upper 24 bits are not available. The * frequency is adjusted by changing the 32 bit fractional nanoseconds * register, TIMINCA. * @@ -116,7 +116,8 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) } /* SYSTIM read access for I210/I211 */ -static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) +static void igb_ptp_read_i210(struct igb_adapter *adapter, + struct timespec64 *ts) { struct e1000_hw *hw = &adapter->hw; u32 sec, nsec; @@ -134,7 +135,7 @@ static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) } static void igb_ptp_write_i210(struct igb_adapter *adapter, - const struct timespec *ts) + const struct timespec64 *ts) { struct e1000_hw *hw = &adapter->hw; @@ -269,13 +270,13 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); unsigned long flags; - struct timespec now, then = ns_to_timespec(delta); + struct timespec64 now, then = ns_to_timespec64(delta); spin_lock_irqsave(&igb->tmreg_lock, flags); igb_ptp_read_i210(igb, &now); - now = timespec_add(now, then); - igb_ptp_write_i210(igb, (const struct timespec *)&now); + now = timespec64_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec64 *)&now); spin_unlock_irqrestore(&igb->tmreg_lock, flags); @@ -283,13 +284,12 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) } static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, - struct timespec *ts) + struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); unsigned long flags; u64 ns; - u32 remainder; spin_lock_irqsave(&igb->tmreg_lock, flags); @@ -297,14 +297,13 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, spin_unlock_irqrestore(&igb->tmreg_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, - struct timespec *ts) + struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); @@ -320,15 +319,14 @@ static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, } static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); unsigned long flags; u64 ns; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(&igb->tmreg_lock, flags); @@ -340,7 +338,7 @@ static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, } static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); @@ -358,7 +356,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) { u32 *ptr = pin < 2 ? ctrl : ctrl_ext; - u32 mask[IGB_N_SDP] = { + static const u32 mask[IGB_N_SDP] = { E1000_CTRL_SDP0_DIR, E1000_CTRL_SDP1_DIR, E1000_CTRL_EXT_SDP2_DIR, @@ -373,16 +371,16 @@ static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext) static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) { - struct e1000_hw *hw = &igb->hw; - u32 aux0_sel_sdp[IGB_N_SDP] = { + static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, }; - u32 aux1_sel_sdp[IGB_N_SDP] = { + static const u32 aux1_sel_sdp[IGB_N_SDP] = { AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, }; - u32 ts_sdp_en[IGB_N_SDP] = { + static const u32 ts_sdp_en[IGB_N_SDP] = { TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, }; + struct e1000_hw *hw = &igb->hw; u32 ctrl, ctrl_ext, tssdp = 0; ctrl = rd32(E1000_CTRL); @@ -409,28 +407,28 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) { - struct e1000_hw *hw = &igb->hw; - u32 aux0_sel_sdp[IGB_N_SDP] = { + static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, }; - u32 aux1_sel_sdp[IGB_N_SDP] = { + static const u32 aux1_sel_sdp[IGB_N_SDP] = { AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3, }; - u32 ts_sdp_en[IGB_N_SDP] = { + static const u32 ts_sdp_en[IGB_N_SDP] = { TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN, }; - u32 ts_sdp_sel_tt0[IGB_N_SDP] = { + static const u32 ts_sdp_sel_tt0[IGB_N_SDP] = { TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0, TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0, }; - u32 ts_sdp_sel_tt1[IGB_N_SDP] = { + static const u32 ts_sdp_sel_tt1[IGB_N_SDP] = { TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, }; - u32 ts_sdp_sel_clr[IGB_N_SDP] = { + static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, }; + struct e1000_hw *hw = &igb->hw; u32 ctrl, ctrl_ext, tssdp = 0; ctrl = rd32(E1000_CTRL); @@ -468,7 +466,7 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; unsigned long flags; struct timespec ts; - int pin; + int pin = -1; s64 ns; switch (rq->type) { @@ -627,11 +625,12 @@ static void igb_ptp_overflow_check(struct work_struct *work) { struct igb_adapter *igb = container_of(work, struct igb_adapter, ptp_overflow_work.work); - struct timespec ts; + struct timespec64 ts; - igb->ptp_caps.gettime(&igb->ptp_caps, &ts); + igb->ptp_caps.gettime64(&igb->ptp_caps, &ts); - pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + pr_debug("igb overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); schedule_delayed_work(&igb->ptp_overflow_work, IGB_SYSTIM_OVERFLOW_PERIOD); @@ -989,8 +988,8 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime = igb_ptp_gettime_82576; - adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82576; adapter->cc.mask = CYCLECOUNTER_MASK(64); @@ -1009,8 +1008,8 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime = igb_ptp_gettime_82576; - adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82580; adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); @@ -1038,8 +1037,8 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; - adapter->ptp_caps.gettime = igb_ptp_gettime_i210; - adapter->ptp_caps.settime = igb_ptp_settime_i210; + adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210; + adapter->ptp_caps.settime64 = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; /* Enable the timer functions by clearing bit 31. */ @@ -1057,7 +1056,7 @@ void igb_ptp_init(struct igb_adapter *adapter) /* Initialize the clock and overflow work for devices that need it. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { - struct timespec ts = ktime_to_timespec(ktime_get_real()); + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); igb_ptp_settime_i210(&adapter->ptp_caps, &ts); } else { @@ -1171,7 +1170,7 @@ void igb_ptp_reset(struct igb_adapter *adapter) /* Re-initialize the timer. */ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { - struct timespec ts = ktime_to_timespec(ktime_get_real()); + struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); igb_ptp_write_i210(adapter, &ts); } else { diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index d9fa999b1685..ae3f28332fa0 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,94 +28,93 @@ #define _E1000_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* IVAR valid bit */ -#define E1000_IVAR_VALID 0x80 +#define E1000_IVAR_VALID 0x80 /* Receive Descriptor bit definitions */ -#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ -#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ -#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ -#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ -#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ - -#define E1000_RXDEXT_STATERR_LB 0x00040000 -#define E1000_RXDEXT_STATERR_CE 0x01000000 -#define E1000_RXDEXT_STATERR_SE 0x02000000 -#define E1000_RXDEXT_STATERR_SEQ 0x04000000 -#define E1000_RXDEXT_STATERR_CXE 0x10000000 -#define E1000_RXDEXT_STATERR_TCPE 0x20000000 -#define E1000_RXDEXT_STATERR_IPE 0x40000000 -#define E1000_RXDEXT_STATERR_RXE 0x80000000 - +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ - E1000_RXDEXT_STATERR_CE | \ - E1000_RXDEXT_STATERR_SE | \ - E1000_RXDEXT_STATERR_SEQ | \ - E1000_RXDEXT_STATERR_CXE | \ - E1000_RXDEXT_STATERR_RXE) + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) /* Device Control */ -#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ /* Device Status */ -#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ -#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ -#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ -#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ -#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ -#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ - -#define SPEED_10 10 -#define SPEED_100 100 -#define SPEED_1000 1000 -#define HALF_DUPLEX 1 -#define FULL_DUPLEX 2 +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 /* Transmit Descriptor bit definitions */ -#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Desc Done */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 /* 802.1q VLAN Packet Size */ -#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ /* Error Codes */ -#define E1000_SUCCESS 0 -#define E1000_ERR_CONFIG 3 -#define E1000_ERR_MAC_INIT 5 -#define E1000_ERR_MBX 15 +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 /* SRRCTL bit definitions */ -#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 -#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 -#define E1000_SRRCTL_DROP_EN 0x80000000 +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 -#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 /* Additional Descriptor Control definitions */ -#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Que */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ #endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 2178f87e9f61..c6996feb1cb4 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -36,7 +35,6 @@ #include "igbvf.h" #include <linux/if_vlan.h> - struct igbvf_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -74,7 +72,7 @@ static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { #define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) static int igbvf_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -111,18 +109,18 @@ static int igbvf_get_settings(struct net_device *netdev, } static int igbvf_set_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) + struct ethtool_cmd *ecmd) { return -EOPNOTSUPP; } static void igbvf_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { } static int igbvf_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { return -EOPNOTSUPP; } @@ -130,12 +128,14 @@ static int igbvf_set_pauseparam(struct net_device *netdev, static u32 igbvf_get_msglevel(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void igbvf_set_msglevel(struct net_device *netdev, u32 data) { struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -146,7 +146,7 @@ static int igbvf_get_regs_len(struct net_device *netdev) } static void igbvf_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -175,19 +175,19 @@ static int igbvf_get_eeprom_len(struct net_device *netdev) } static int igbvf_get_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static int igbvf_set_eeprom(struct net_device *netdev, - struct ethtool_eeprom *eeprom, u8 *bytes) + struct ethtool_eeprom *eeprom, u8 *bytes) { return -EOPNOTSUPP; } static void igbvf_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) + struct ethtool_drvinfo *drvinfo) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -201,7 +201,7 @@ static void igbvf_get_drvinfo(struct net_device *netdev, } static void igbvf_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -214,7 +214,7 @@ static void igbvf_get_ringparam(struct net_device *netdev, } static int igbvf_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) + struct ethtool_ringparam *ring) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; @@ -224,12 +224,12 @@ static int igbvf_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = max_t(u32, ring->rx_pending, IGBVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IGBVF_MAX_RXD); new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = max_t(u32, ring->tx_pending, IGBVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IGBVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring->count) && @@ -239,7 +239,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (!netif_running(adapter->netdev)) { adapter->tx_ring->count = new_tx_count; @@ -255,10 +255,9 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_down(adapter); - /* - * We can't just free everything and then setup again, + /* We can't just free everything and then setup again, * because the ISRs in MSI-X mode get passed pointers - * to the tx and rx ring structs. + * to the Tx and Rx ring structs. */ if (new_tx_count != adapter->tx_ring->count) { memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); @@ -283,7 +282,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, igbvf_free_rx_resources(adapter->rx_ring); - memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + memcpy(adapter->rx_ring, temp_ring, sizeof(struct igbvf_ring)); } err_setup: igbvf_up(adapter); @@ -307,14 +306,13 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) } static void igbvf_diag_test(struct net_device *netdev, - struct ethtool_test *eth_test, u64 *data) + struct ethtool_test *eth_test, u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); set_bit(__IGBVF_TESTING, &adapter->state); - /* - * Link test performed before hardware reset so autoneg doesn't + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ if (igbvf_link_test(adapter, &data[0])) @@ -325,20 +323,20 @@ static void igbvf_diag_test(struct net_device *netdev, } static void igbvf_get_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; } static int igbvf_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) + struct ethtool_wolinfo *wol) { return -EOPNOTSUPP; } static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -351,13 +349,13 @@ static int igbvf_get_coalesce(struct net_device *netdev, } static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && - (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { + (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { adapter->current_itr = ec->rx_coalesce_usecs << 2; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); @@ -366,8 +364,7 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = IGBVF_START_ITR; adapter->requested_itr = ec->rx_coalesce_usecs; } else if (ec->rx_coalesce_usecs == 0) { - /* - * The user's desire is to turn off interrupt throttling + /* The user's desire is to turn off interrupt throttling * altogether, but due to HW limitations, we can't do that. * Instead we set a very small value in EITR, which would * allow ~967k interrupts per second, but allow the adapter's @@ -376,8 +373,9 @@ static int igbvf_set_coalesce(struct net_device *netdev, adapter->current_itr = 4; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); - } else + } else { return -EINVAL; + } writel(adapter->current_itr, hw->hw_addr + adapter->rx_ring->itr_register); @@ -388,15 +386,15 @@ static int igbvf_set_coalesce(struct net_device *netdev, static int igbvf_nway_reset(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) igbvf_reinit_locked(adapter); return 0; } - static void igbvf_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, - u64 *data) + struct ethtool_stats *stats, + u64 *data) { struct igbvf_adapter *adapter = netdev_priv(netdev); int i; @@ -404,19 +402,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev, igbvf_update_stats(adapter); for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { char *p = (char *)adapter + - igbvf_gstrings_stats[i].stat_offset; + igbvf_gstrings_stats[i].stat_offset; char *b = (char *)adapter + - igbvf_gstrings_stats[i].base_stat_offset; + igbvf_gstrings_stats[i].base_stat_offset; data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : - (*(u32 *)p - *(u32 *)b)); + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); } - } static int igbvf_get_sset_count(struct net_device *dev, int stringset) { - switch(stringset) { + switch (stringset) { case ETH_SS_TEST: return IGBVF_TEST_LEN; case ETH_SS_STATS: @@ -427,7 +424,7 @@ static int igbvf_get_sset_count(struct net_device *dev, int stringset) } static void igbvf_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) + u8 *data) { u8 *p = data; int i; diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index 7d6a25c8f889..f166baab8d7e 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -43,10 +42,10 @@ struct igbvf_info; struct igbvf_adapter; /* Interrupt defines */ -#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ -#define IGBVF_4K_ITR 980 -#define IGBVF_20K_ITR 196 -#define IGBVF_70K_ITR 56 +#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ +#define IGBVF_4K_ITR 980 +#define IGBVF_20K_ITR 196 +#define IGBVF_70K_ITR 56 enum latency_range { lowest_latency = 0, @@ -55,56 +54,55 @@ enum latency_range { latency_invalid = 255 }; - /* Interrupt modes, as used by the IntMode parameter */ -#define IGBVF_INT_MODE_LEGACY 0 -#define IGBVF_INT_MODE_MSI 1 -#define IGBVF_INT_MODE_MSIX 2 +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 /* Tx/Rx descriptor defines */ -#define IGBVF_DEFAULT_TXD 256 -#define IGBVF_MAX_TXD 4096 -#define IGBVF_MIN_TXD 80 +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 -#define IGBVF_DEFAULT_RXD 256 -#define IGBVF_MAX_RXD 4096 -#define IGBVF_MIN_RXD 80 +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 -#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ -#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ /* RX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of - * descriptors available in its onboard memory. - * Setting this to 0 disables RX descriptor prefetch. + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. * HTHRESH - MAC will only prefetch if there are at least this many descriptors - * available in host memory. - * If PTHRESH is 0, this should also be 0. + * available in host memory. + * If PTHRESH is 0, this should also be 0. * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back - * descriptors until either it has this many to write back, or the - * ITR timer expires. + * descriptors until either it has this many to write back, or the + * ITR timer expires. */ -#define IGBVF_RX_PTHRESH 16 -#define IGBVF_RX_HTHRESH 8 -#define IGBVF_RX_WTHRESH 1 +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 /* this is the size past which hardware will drop packets when setting LPE=0 */ -#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 -#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ -#define IGBVF_TX_QUEUE_WAKE 32 +#define IGBVF_TX_QUEUE_WAKE 32 /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 -#define IGBVF_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 -#define IGBVF_MNG_VLAN_NONE (-1) +#define IGBVF_MNG_VLAN_NONE (-1) /* Number of packet split data buffers (not including the header buffer) */ -#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) enum igbvf_boards { board_vf, @@ -116,8 +114,7 @@ struct igbvf_queue_stats { u64 bytes; }; -/* - * wrappers around a pointer to a socket buffer, +/* wrappers around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igbvf_buffer { @@ -148,10 +145,10 @@ union igbvf_desc { struct igbvf_ring { struct igbvf_adapter *adapter; /* backlink */ - union igbvf_desc *desc; /* pointer to ring memory */ - dma_addr_t dma; /* phys address of ring */ - unsigned int size; /* length of ring in bytes */ - unsigned int count; /* number of desc. in ring */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ u16 next_to_use; u16 next_to_clean; @@ -202,9 +199,7 @@ struct igbvf_adapter { u32 requested_itr; /* ints/sec or adaptive */ u32 current_itr; /* Actual ITR register value, not ints/sec */ - /* - * Tx - */ + /* Tx */ struct igbvf_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; @@ -226,9 +221,7 @@ struct igbvf_adapter { u32 tx_fifo_size; u32 tx_dma_failed; - /* - * Rx - */ + /* Rx */ struct igbvf_ring *rx_ring; u32 rx_int_delay; @@ -249,7 +242,7 @@ struct igbvf_adapter { struct net_device *netdev; struct pci_dev *pdev; struct net_device_stats net_stats; - spinlock_t stats_lock; /* prevent concurrent stats updates */ + spinlock_t stats_lock; /* prevent concurrent stats updates */ /* structs defined in e1000_hw.h */ struct e1000_hw hw; @@ -286,16 +279,16 @@ struct igbvf_adapter { }; struct igbvf_info { - enum e1000_mac_type mac; - unsigned int flags; - u32 pba; - void (*init_ops)(struct e1000_hw *); - s32 (*get_variants)(struct igbvf_adapter *); + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); }; /* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) -#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) #define IGBVF_RX_DESC_ADV(R, i) \ (&((((R).desc))[i].rx_desc)) #define IGBVF_TX_DESC_ADV(R, i) \ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index b4b65bc9fc5d..7b6cb4c3764c 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -54,10 +53,10 @@ out: } /** - * e1000_poll_for_ack - Wait for message acknowledgement + * e1000_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns SUCCESS if it successfully received a message acknowledgement + * returns SUCCESS if it successfully received a message acknowledgment **/ static s32 e1000_poll_for_ack(struct e1000_hw *hw) { @@ -218,7 +217,7 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) s32 ret_val = -E1000_ERR_MBX; if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | - E1000_V2PMAILBOX_RSTI))) { + E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } @@ -239,7 +238,7 @@ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) /* Take ownership of the buffer */ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) ret_val = E1000_SUCCESS; @@ -283,7 +282,7 @@ out_no_write: } /** - * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * e1000_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -315,17 +314,18 @@ out_no_read: } /** - * e1000_init_mbx_params_vf - set initial values for vf mailbox + * e1000_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to being communications */ + * value to being communications + */ mbx->timeout = 0; mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; @@ -347,4 +347,3 @@ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) return E1000_SUCCESS; } - diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 24370bcb0e22..f800bf8eedae 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,44 +29,44 @@ #include "vf.h" -#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is E1000_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - clear to send requests */ +/* Messages below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* We have a total wait time of 1s for vf mailbox posted messages */ -#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ -#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mbx timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ -#define E1000_VT_MSGINFO_SHIFT 16 +#define E1000_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) -#define E1000_VF_RESET 0x01 /* VF requests reset */ -#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ void e1000_init_mbx_ops_generic(struct e1000_hw *hw); s32 e1000_init_mbx_params_vf(struct e1000_hw *); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ebf9d4a42fdd..95af14e139d7 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -66,26 +65,27 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *); static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); static struct igbvf_info igbvf_vf_info = { - .mac = e1000_vfadapt, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static struct igbvf_info igbvf_i350_vf_info = { - .mac = e1000_vfadapt_i350, - .flags = 0, - .pba = 10, - .init_ops = e1000_init_function_pointers_vf, + .mac = e1000_vfadapt_i350, + .flags = 0, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, }; static const struct igbvf_info *igbvf_info_tbl[] = { - [board_vf] = &igbvf_vf_info, - [board_i350_vf] = &igbvf_i350_vf_info, + [board_vf] = &igbvf_vf_info, + [board_i350_vf] = &igbvf_i350_vf_info, }; /** * igbvf_desc_unused - calculate if we have unused descriptors + * @rx_ring: address of receive ring structure **/ static int igbvf_desc_unused(struct igbvf_ring *ring) { @@ -103,9 +103,9 @@ static int igbvf_desc_unused(struct igbvf_ring *ring) * @skb: pointer to sk_buff to be indicated to stack **/ static void igbvf_receive_skb(struct igbvf_adapter *adapter, - struct net_device *netdev, - struct sk_buff *skb, - u32 status, u16 vlan) + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) { u16 vid; @@ -123,7 +123,7 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter, } static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, - u32 status_err, struct sk_buff *skb) + u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); @@ -153,7 +153,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, * @cleaned_count: number of buffers to repopulate **/ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, - int cleaned_count) + int cleaned_count) { struct igbvf_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; @@ -188,8 +188,8 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } buffer_info->page_dma = dma_map_page(&pdev->dev, buffer_info->page, - buffer_info->page_offset, - PAGE_SIZE / 2, + buffer_info->page_offset, + PAGE_SIZE / 2, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->page_dma)) { @@ -209,7 +209,7 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, buffer_info->skb = skb; buffer_info->dma = dma_map_single(&pdev->dev, skb->data, - bufsz, + bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { dev_kfree_skb(buffer_info->skb); @@ -219,14 +219,14 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, } } /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ + * each write-back erases this info. + */ if (adapter->rx_ps_hdr_size) { rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); } else { - rx_desc->read.pkt_addr = - cpu_to_le64(buffer_info->dma); + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); rx_desc->read.hdr_addr = 0; } @@ -247,7 +247,8 @@ no_buffers: /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); writel(i, adapter->hw.hw_addr + rx_ring->tail); } @@ -261,7 +262,7 @@ no_buffers: * is no guarantee that everything was cleaned **/ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, - int *work_done, int work_to_do) + int *work_done, int work_to_do) { struct igbvf_ring *rx_ring = adapter->rx_ring; struct net_device *netdev = adapter->netdev; @@ -292,8 +293,9 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, * that case, it fills the header buffer and spills the rest * into the page. */ - hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & - E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) + & E1000_RXDADV_HDRBUFLEN_MASK) >> + E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > adapter->rx_ps_hdr_size) hlen = adapter->rx_ps_hdr_size; @@ -306,7 +308,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, buffer_info->skb = NULL; if (!adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; skb_put(skb, length); @@ -315,21 +317,21 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, if (!skb_shinfo(skb)->nr_frags) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); skb_put(skb, hlen); } if (length) { dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - buffer_info->page, - buffer_info->page_offset, - length); + buffer_info->page, + buffer_info->page_offset, + length); if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || (page_count(buffer_info->page) != 1)) @@ -370,7 +372,7 @@ send_up: skb->protocol = eth_type_trans(skb, netdev); igbvf_receive_skb(adapter, netdev, skb, staterr, - rx_desc->wb.upper.vlan); + rx_desc->wb.upper.vlan); next_desc: rx_desc->wb.upper.status_error = 0; @@ -402,7 +404,7 @@ next_desc: } static void igbvf_put_txbuf(struct igbvf_adapter *adapter, - struct igbvf_buffer *buffer_info) + struct igbvf_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -431,7 +433,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, * Return 0 on success, negative on failure **/ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring) + struct igbvf_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -458,7 +460,7 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, err: vfree(tx_ring->buffer_info); dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -501,7 +503,7 @@ err: vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; dev_err(&adapter->pdev->dev, - "Unable to allocate memory for the receive descriptor ring\n"); + "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } @@ -578,13 +580,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if (buffer_info->dma) { - if (adapter->rx_ps_hdr_size){ + if (adapter->rx_ps_hdr_size) { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_ps_hdr_size, + adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); } else { dma_unmap_single(&pdev->dev, buffer_info->dma, - adapter->rx_buffer_len, + adapter->rx_buffer_len, DMA_FROM_DEVICE); } buffer_info->dma = 0; @@ -599,7 +601,7 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) if (buffer_info->page_dma) dma_unmap_page(&pdev->dev, buffer_info->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE / 2, DMA_FROM_DEVICE); put_page(buffer_info->page); buffer_info->page = NULL; @@ -638,7 +640,7 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) rx_ring->buffer_info = NULL; dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + rx_ring->dma); rx_ring->desc = NULL; } @@ -649,13 +651,12 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) * @packets: the number of packets during this measurement interval * @bytes: the number of bytes during this measurement interval * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte counts during the last + * interrupt. The advantage of per interrupt computation is faster updates + * and more accurate ITR for the current traffic pattern. Constants in this + * function were computed based on theoretical maximum wire speed and thresholds + * were set based on testing data as well as attempting to minimize response + * time while increasing bulk throughput. **/ static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, enum latency_range itr_setting, @@ -744,17 +745,15 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); - if (new_itr != adapter->tx_ring->itr_val) { u32 current_itr = adapter->tx_ring->itr_val; - /* - * this attempts to bias the interrupt rate towards Bulk + /* this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->tx_ring->itr_val = new_itr; adapter->tx_ring->set_itr = 1; @@ -772,9 +771,10 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) if (new_itr != adapter->rx_ring->itr_val) { u32 current_itr = adapter->rx_ring->itr_val; + new_itr = new_itr > current_itr ? - min(current_itr + (new_itr >> 2), new_itr) : - new_itr; + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; adapter->rx_ring->itr_val = new_itr; adapter->rx_ring->set_itr = 1; @@ -829,7 +829,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) segs = skb_shinfo(skb)->gso_segs ?: 1; /* multiply data chunks by size of headers */ bytecount = ((segs - 1) * skb_headlen(skb)) + - skb->len; + skb->len; total_packets += segs; total_bytes += bytecount; } @@ -849,9 +849,8 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) tx_ring->next_to_clean = i; - if (unlikely(count && - netif_carrier_ok(netdev) && - igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + if (unlikely(count && netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -902,8 +901,9 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; - /* auto mask will automatically reenable the interrupt when we write - * EICS */ + /* auto mask will automatically re-enable the interrupt when we write + * EICS + */ if (!igbvf_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(EICS, tx_ring->eims_value); @@ -941,15 +941,16 @@ static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) #define IGBVF_NO_QUEUE -1 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, - int tx_queue, int msix_vector) + int tx_queue, int msix_vector) { struct e1000_hw *hw = &adapter->hw; u32 ivar, index; /* 82576 uses a table-based method for assigning vectors. - Each queue has a single entry in the table to which we write - a vector number along with a "valid" bit. Sadly, the layout - of the table is somewhat counterintuitive. */ + * Each queue has a single entry in the table to which we write + * a vector number along with a "valid" bit. Sadly, the layout + * of the table is somewhat counterintuitive. + */ if (rx_queue > IGBVF_NO_QUEUE) { index = (rx_queue >> 1); ivar = array_er32(IVAR0, index); @@ -984,6 +985,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, /** * igbvf_configure_msix - Configure MSI-X hardware + * @adapter: board private structure * * igbvf_configure_msix sets up the hardware to properly * generate MSI-X interrupts. @@ -1027,6 +1029,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) /** * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * @adapter: board private structure * * Attempt to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1036,27 +1039,28 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) int err = -ENOMEM; int i; - /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + /* we allocate 3 vectors, 1 for Tx, 1 for Rx, one for PF messages */ adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), - GFP_KERNEL); + GFP_KERNEL); if (adapter->msix_entries) { for (i = 0; i < 3; i++) adapter->msix_entries[i].entry = i; err = pci_enable_msix_range(adapter->pdev, - adapter->msix_entries, 3, 3); + adapter->msix_entries, 3, 3); } if (err < 0) { /* MSI-X failed */ dev_err(&adapter->pdev->dev, - "Failed to initialize MSI-X interrupts.\n"); + "Failed to initialize MSI-X interrupts.\n"); igbvf_reset_interrupt_capability(adapter); } } /** * igbvf_request_msix - Initialize MSI-X interrupts + * @adapter: board private structure * * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the * kernel. @@ -1075,8 +1079,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) } err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_tx, 0, adapter->tx_ring->name, - netdev); + igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); if (err) goto out; @@ -1085,8 +1089,8 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_intr_msix_rx, 0, adapter->rx_ring->name, - netdev); + igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); if (err) goto out; @@ -1095,7 +1099,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) vector++; err = request_irq(adapter->msix_entries[vector].vector, - igbvf_msix_other, 0, netdev->name, netdev); + igbvf_msix_other, 0, netdev->name, netdev); if (err) goto out; @@ -1130,6 +1134,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter) /** * igbvf_request_irq - initialize interrupts + * @adapter: board private structure * * Attempts to configure interrupts using the best available * capabilities of the hardware and kernel. @@ -1146,7 +1151,7 @@ static int igbvf_request_irq(struct igbvf_adapter *adapter) return err; dev_err(&adapter->pdev->dev, - "Unable to allocate interrupt, Error: %d\n", err); + "Unable to allocate interrupt, Error: %d\n", err); return err; } @@ -1164,6 +1169,7 @@ static void igbvf_free_irq(struct igbvf_adapter *adapter) /** * igbvf_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure **/ static void igbvf_irq_disable(struct igbvf_adapter *adapter) { @@ -1177,6 +1183,7 @@ static void igbvf_irq_disable(struct igbvf_adapter *adapter) /** * igbvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure **/ static void igbvf_irq_enable(struct igbvf_adapter *adapter) { @@ -1252,7 +1259,7 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, - "Failed to remove vlan id %d\n", vid); + "Failed to remove vlan id %d\n", vid); return -EINVAL; } clear_bit(vid, adapter->active_vlans); @@ -1298,7 +1305,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* Turn off Relaxed Ordering on head write-backs. The writebacks * MUST be delivered in order or it will completely screw up - * our bookeeping. + * our bookkeeping. */ dca_txctrl = er32(DCA_TXCTRL(0)); dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; @@ -1325,15 +1332,15 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) u32 srrctl = 0; srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | - E1000_SRRCTL_BSIZEHDR_MASK | - E1000_SRRCTL_BSIZEPKT_MASK); + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); /* Enable queue drop to avoid head of line blocking */ srrctl |= E1000_SRRCTL_DROP_EN; /* Setup buffer sizes */ srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> - E1000_SRRCTL_BSIZEPKT_SHIFT; + E1000_SRRCTL_BSIZEPKT_SHIFT; if (adapter->rx_buffer_len < 2048) { adapter->rx_ps_hdr_size = 0; @@ -1341,7 +1348,7 @@ static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) } else { adapter->rx_ps_hdr_size = 128; srrctl |= adapter->rx_ps_hdr_size << - E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } @@ -1369,8 +1376,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - /* - * Setup the HW Rx Head and Tail Descriptor Pointers and + /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ rdba = rx_ring->dma; @@ -1441,10 +1447,11 @@ static void igbvf_configure(struct igbvf_adapter *adapter) igbvf_setup_srrctl(adapter); igbvf_configure_rx(adapter); igbvf_alloc_rx_buffers(adapter->rx_ring, - igbvf_desc_unused(adapter->rx_ring)); + igbvf_desc_unused(adapter->rx_ring)); } /* igbvf_reset - bring the hardware into a known good state + * @adapter: private board structure * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be @@ -1494,7 +1501,6 @@ int igbvf_up(struct igbvf_adapter *adapter) hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies + 1); - return 0; } @@ -1504,8 +1510,7 @@ void igbvf_down(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; u32 rxdctl, txdctl; - /* - * signal that we're down so the interrupt handler does not + /* signal that we're down so the interrupt handler does not * reschedule our watchdog timer */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -1514,6 +1519,7 @@ void igbvf_down(struct igbvf_adapter *adapter) rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + netif_carrier_off(netdev); netif_stop_queue(netdev); /* disable transmits in the hardware */ @@ -1530,8 +1536,6 @@ void igbvf_down(struct igbvf_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); - netif_carrier_off(netdev); - /* record the stats before reset*/ igbvf_update_stats(adapter); @@ -1547,7 +1551,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter) { might_sleep(); while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); igbvf_down(adapter); igbvf_up(adapter); clear_bit(__IGBVF_RESETTING, &adapter->state); @@ -1662,8 +1666,7 @@ static int igbvf_open(struct net_device *netdev) if (err) goto err_setup_rx; - /* - * before we allocate an interrupt, we must be ready to handle it. + /* before we allocate an interrupt, we must be ready to handle it. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt * as soon as we call pci_request_irq, so we have to setup our * clean_rx handler before we do so. @@ -1725,6 +1728,7 @@ static int igbvf_close(struct net_device *netdev) return 0; } + /** * igbvf_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -1753,15 +1757,15 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) return 0; } -#define UPDATE_VF_COUNTER(reg, name) \ - { \ - u32 current_counter = er32(reg); \ - if (current_counter < adapter->stats.last_##name) \ - adapter->stats.name += 0x100000000LL; \ - adapter->stats.last_##name = current_counter; \ - adapter->stats.name &= 0xFFFFFFFF00000000LL; \ - adapter->stats.name |= current_counter; \ - } +#define UPDATE_VF_COUNTER(reg, name) \ +{ \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ +} /** * igbvf_update_stats - Update the board statistics counters @@ -1772,8 +1776,7 @@ void igbvf_update_stats(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - /* - * Prevent stats update while adapter is being reset, link is down + /* Prevent stats update while adapter is being reset, link is down * or if the pci connection is down. */ if (adapter->link_speed == 0) @@ -1832,7 +1835,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) **/ static void igbvf_watchdog(unsigned long data) { - struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -1841,8 +1844,8 @@ static void igbvf_watchdog(unsigned long data) static void igbvf_watchdog_task(struct work_struct *work) { struct igbvf_adapter *adapter = container_of(work, - struct igbvf_adapter, - watchdog_task); + struct igbvf_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct e1000_mac_info *mac = &adapter->hw.mac; struct igbvf_ring *tx_ring = adapter->tx_ring; @@ -1855,8 +1858,8 @@ static void igbvf_watchdog_task(struct work_struct *work) if (link) { if (!netif_carrier_ok(netdev)) { mac->ops.get_link_up_info(&adapter->hw, - &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_speed, + &adapter->link_duplex); igbvf_print_link_info(adapter); netif_carrier_on(netdev); @@ -1876,10 +1879,9 @@ static void igbvf_watchdog_task(struct work_struct *work) igbvf_update_stats(adapter); } else { tx_pending = (igbvf_desc_unused(tx_ring) + 1 < - tx_ring->count); + tx_ring->count); if (tx_pending) { - /* - * We've lost link, so the controller stops DMA, + /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. * (Do the reset outside of interrupt context). @@ -1898,15 +1900,15 @@ static void igbvf_watchdog_task(struct work_struct *work) round_jiffies(jiffies + (2 * HZ))); } -#define IGBVF_TX_FLAGS_CSUM 0x00000001 -#define IGBVF_TX_FLAGS_VLAN 0x00000002 -#define IGBVF_TX_FLAGS_TSO 0x00000004 -#define IGBVF_TX_FLAGS_IPV4 0x00000008 -#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 -#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, __be16 protocol) { @@ -1930,17 +1932,18 @@ static int igbvf_tso(struct igbvf_adapter *adapter, if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, + IPPROTO_TCP, + 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; @@ -1984,7 +1987,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter, } static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, __be16 protocol) { @@ -2005,8 +2008,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) info |= (skb_transport_header(skb) - - skb_network_header(skb)); - + skb_network_header(skb)); context_desc->vlan_macip_lens = cpu_to_le32(info); @@ -2055,6 +2057,10 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again just in case room has been made available */ @@ -2067,11 +2073,11 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) return 0; } -#define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, struct sk_buff *skb) { struct igbvf_buffer *buffer_info; @@ -2093,7 +2099,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { const struct skb_frag_struct *frag; @@ -2111,7 +2116,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, buffer_info->time_stamp = jiffies; buffer_info->mapped_as_page = true; buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, - DMA_TO_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) goto dma_error; } @@ -2133,7 +2138,7 @@ dma_error: /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; @@ -2144,10 +2149,10 @@ dma_error: } static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, + struct igbvf_ring *tx_ring, int tx_flags, int count, unsigned int first, u32 paylen, - u8 hdr_len) + u8 hdr_len) { union e1000_adv_tx_desc *tx_desc = NULL; struct igbvf_buffer *buffer_info; @@ -2155,7 +2160,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, unsigned int i; cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | - E1000_ADVTXD_DCMD_DEXT); + E1000_ADVTXD_DCMD_DEXT); if (tx_flags & IGBVF_TX_FLAGS_VLAN) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; @@ -2182,7 +2187,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | buffer_info->length); + cpu_to_le32(cmd_type_len | buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) @@ -2193,14 +2198,16 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, - * such as IA-64). */ + * such as IA-64). + */ wmb(); tx_ring->buffer_info[first].next_to_watch = tx_desc; tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tail); /* we need this if more than one processor can write to our tail - * at a time, it syncronizes IO on IA64/Altix systems */ + * at a time, it synchronizes IO on IA64/Altix systems + */ mmiowb(); } @@ -2225,11 +2232,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, return NETDEV_TX_OK; } - /* - * need: count + 4 desc gap to keep tail from touching - * + 2 desc gap to keep tail from touching head, - * + 1 desc for skb->data, - * + 1 desc for context descriptor, + /* need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, * head, otherwise try next time */ if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { @@ -2258,11 +2264,10 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && - (skb->ip_summed == CHECKSUM_PARTIAL)) + (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; - /* - * count reflects descriptors mapped, if 0 then mapping error + /* count reflects descriptors mapped, if 0 then mapping error * has occurred and we need to rewind the descriptor queue */ count = igbvf_tx_map_adv(adapter, tx_ring, skb); @@ -2313,6 +2318,7 @@ static void igbvf_tx_timeout(struct net_device *netdev) static void igbvf_reset_task(struct work_struct *work) { struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); igbvf_reinit_locked(adapter); @@ -2356,14 +2362,13 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) } while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); /* igbvf_down has a dependency on max_frame_size */ adapter->max_frame_size = max_frame; if (netif_running(netdev)) igbvf_down(adapter); - /* - * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN * means we reserve 2 more, this pushes us to allocate from the next * larger slab size. * i.e. RXBUFFER_2048 --> size-4096 slab @@ -2382,15 +2387,14 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = PAGE_SIZE / 2; #endif - /* adjust allocation if LPE protects us, and we aren't using SBP */ if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || - (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + - ETH_FCS_LEN; + ETH_FCS_LEN; dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -2477,8 +2481,7 @@ static void igbvf_shutdown(struct pci_dev *pdev) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs +/* Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ @@ -2503,7 +2506,7 @@ static void igbvf_netpoll(struct net_device *netdev) * this device has been detected. */ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2583,7 +2586,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter) } static int igbvf_set_features(struct net_device *netdev, - netdev_features_t features) + netdev_features_t features) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -2596,21 +2599,21 @@ static int igbvf_set_features(struct net_device *netdev, } static const struct net_device_ops igbvf_netdev_ops = { - .ndo_open = igbvf_open, - .ndo_stop = igbvf_close, - .ndo_start_xmit = igbvf_xmit_frame, - .ndo_get_stats = igbvf_get_stats, - .ndo_set_rx_mode = igbvf_set_multi, - .ndo_set_mac_address = igbvf_set_mac, - .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, - .ndo_tx_timeout = igbvf_tx_timeout, - .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_rx_mode = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igbvf_netpoll, + .ndo_poll_controller = igbvf_netpoll, #endif - .ndo_set_features = igbvf_set_features, + .ndo_set_features = igbvf_set_features, }; /** @@ -2645,8 +2648,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); goto err_dma; } } @@ -2686,7 +2689,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + pci_resource_len(pdev, 0)); if (!adapter->hw.hw_addr) goto err_ioremap; @@ -2712,16 +2715,16 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; @@ -2742,7 +2745,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) dev_info(&pdev->dev, "Error reading MAC address.\n"); else if (is_zero_ether_addr(adapter->hw.mac.addr)) - dev_info(&pdev->dev, "MAC address not assigned by administrator.\n"); + dev_info(&pdev->dev, + "MAC address not assigned by administrator.\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); } @@ -2751,11 +2755,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); memcpy(adapter->hw.mac.addr, netdev->dev_addr, - netdev->addr_len); + netdev->addr_len); } setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, - (unsigned long) adapter); + (unsigned long)adapter); INIT_WORK(&adapter->reset_task, igbvf_reset_task); INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); @@ -2818,8 +2822,7 @@ static void igbvf_remove(struct pci_dev *pdev) struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* - * The watchdog timer may be rescheduled, so explicitly + /* The watchdog timer may be rescheduled, so explicitly * disable it from being rescheduled. */ set_bit(__IGBVF_DOWN, &adapter->state); @@ -2832,9 +2835,8 @@ static void igbvf_remove(struct pci_dev *pdev) igbvf_reset_interrupt_capability(adapter); - /* - * it is important to delete the napi struct prior to freeing the - * rx ring so that you do not end up with null pointer refs + /* it is important to delete the NAPI struct prior to freeing the + * Rx ring so that you do not end up with null pointer refs */ netif_napi_del(&adapter->rx_ring->napi); kfree(adapter->tx_ring); @@ -2866,17 +2868,17 @@ MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { - .name = igbvf_driver_name, - .id_table = igbvf_pci_tbl, - .probe = igbvf_probe, - .remove = igbvf_remove, + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = igbvf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, + .suspend = igbvf_suspend, + .resume = igbvf_resume, #endif - .shutdown = igbvf_shutdown, - .err_handler = &igbvf_err_handler + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler }; /** @@ -2888,6 +2890,7 @@ static struct pci_driver igbvf_driver = { static int __init igbvf_init_module(void) { int ret; + pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); pr_info("%s\n", igbvf_copyright); @@ -2909,7 +2912,6 @@ static void __exit igbvf_exit_module(void) } module_exit(igbvf_exit_module); - MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 7dc6341715dc..86a7c120b574 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,81 +27,81 @@ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ -#define E1000_CTRL 0x00000 /* Device Control - RW */ -#define E1000_STATUS 0x00008 /* Device Status - RO */ -#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ -#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ -#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) -#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ -#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ -#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ -#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ -#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ -#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ -#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ -/* - * Convenience macros +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ + +/* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ -#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ - (0x0C000 + ((_n) * 0x40))) -#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ - (0x0C004 + ((_n) * 0x40))) -#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ - (0x0C008 + ((_n) * 0x40))) -#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ - (0x0C00C + ((_n) * 0x40))) -#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ - (0x0C010 + ((_n) * 0x40))) -#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ - (0x0C018 + ((_n) * 0x40))) -#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ - (0x0C028 + ((_n) * 0x40))) -#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ - (0x0E000 + ((_n) * 0x40))) -#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ - (0x0E004 + ((_n) * 0x40))) -#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ - (0x0E008 + ((_n) * 0x40))) -#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ - (0x0E010 + ((_n) * 0x40))) -#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ - (0x0E018 + ((_n) * 0x40))) -#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ - (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) -#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ - (0x054E0 + ((_i - 16) * 8))) -#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ - (0x054E4 + ((_i - 16) * 8))) +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) /* Statistics registers */ -#define E1000_VFGPRC 0x00F10 -#define E1000_VFGORC 0x00F18 -#define E1000_VFMPRC 0x00F3C -#define E1000_VFGPTC 0x00F14 -#define E1000_VFGOTC 0x00F34 -#define E1000_VFGOTLBC 0x00F50 -#define E1000_VFGPTLBC 0x00F44 -#define E1000_VFGORLBC 0x00F48 -#define E1000_VFGPRLBC 0x00F40 +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 /* These act per VF so an array friendly macro is used */ -#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) -#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) /* Define macros for handling registers */ -#define er32(reg) readl(hw->hw_addr + E1000_##reg) -#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) #define array_er32(reg, offset) \ readl(hw->hw_addr + E1000_##reg + (offset << 2)) #define array_ew32(reg, offset, val) \ writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) -#define e1e_flush() er32(STATUS) +#define e1e_flush() er32(STATUS) #endif diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 955ad8c2c534..a13baa90ae20 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,17 +24,16 @@ *******************************************************************************/ - #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex); + u16 *duplex); static s32 e1000_init_hw_vf(struct e1000_hw *hw); static s32 e1000_reset_hw_vf(struct e1000_hw *hw); static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, - u32, u32, u32); + u32, u32, u32); static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); static s32 e1000_read_mac_addr_vf(struct e1000_hw *); static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); @@ -94,7 +92,7 @@ void e1000_init_function_pointers_vf(struct e1000_hw *hw) * the status register's data which is often stale and inaccurate. **/ static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, - u16 *duplex) + u16 *duplex) { s32 status; @@ -130,7 +128,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) u8 *addr = (u8 *)(&msgbuf[1]); u32 ctrl; - /* assert vf queue/interrupt reset */ + /* assert VF queue/interrupt reset */ ctrl = er32(CTRL); ew32(CTRL, ctrl | E1000_CTRL_RST); @@ -144,7 +142,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* mailbox timeout can now become active */ mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; - /* notify pf of vf reset completion */ + /* notify PF of VF reset completion */ msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); @@ -153,7 +151,8 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) memcpy(hw->mac.perm_addr, addr, ETH_ALEN); else ret_val = -E1000_ERR_MAC_INIT; @@ -194,15 +193,14 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) /* Register count multiplied by bits per register */ hash_mask = (hw->mac.mta_reg_count * 32) - 1; - /* - * The bit_shift is the number of left-shifts + /* The bit_shift is the number of left-shifts * where 0xFF would still fall within the hash mask. */ while (hash_mask >> bit_shift != 0xFF) bit_shift++; hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); + (((u16)mc_addr[5]) << bit_shift))); return hash_value; } @@ -221,8 +219,8 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) * unless there are workarounds that change this. **/ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count) + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[E1000_VFMAILBOX_SIZE]; @@ -305,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) * @addr: pointer to the receive address * @index: receive address array register **/ -static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 *addr, u32 index) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; @@ -354,8 +352,7 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) s32 ret_val = E1000_SUCCESS; u32 in_msg = 0; - /* - * We only want to run this if there has been a rst asserted. + /* We only want to run this if there has been a rst asserted. * in this case that could mean a link change, device reset, * or a virtual function reset */ @@ -367,31 +364,33 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw) if (!mac->get_link_status) goto out; - /* if link status is down no point in checking to see if pf is up */ + /* if link status is down no point in checking to see if PF is up */ if (!(er32(STATUS) & E1000_STATUS_LU)) goto out; /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; /* if incoming message isn't clear to send we are waiting on response */ if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { - /* message is not CTS and is NACK we must have lost CTS status */ + /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & E1000_VT_MSGTYPE_NACK) ret_val = -E1000_ERR_MAC_INIT; goto out; } - /* the pf is talking, if we timed out in the past we reinit */ + /* the PF is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -E1000_ERR_MAC_INIT; goto out; } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 57db3c68dfcd..0f1eca639f68 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -38,30 +37,29 @@ struct e1000_hw; -#define E1000_DEV_ID_82576_VF 0x10CA -#define E1000_DEV_ID_I350_VF 0x1520 -#define E1000_REVISION_0 0 -#define E1000_REVISION_1 1 -#define E1000_REVISION_2 2 -#define E1000_REVISION_3 3 -#define E1000_REVISION_4 4 +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 -#define E1000_FUNC_0 0 -#define E1000_FUNC_1 1 +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 -/* - * Receive Address Register Count +/* Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * These entries are also used for MAC-based filtering. */ -#define E1000_RAR_ENTRIES_VF 1 +#define E1000_RAR_ENTRIES_VF 1 /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ } read; struct { struct { @@ -69,53 +67,53 @@ union e1000_adv_rx_desc { u32 data; struct { u16 pkt_info; /* RSS/Packet type */ - u16 hdr_info; /* Split Header, - * hdr buffer length */ + /* Split Header, hdr buffer length */ + u16 hdr_info; } hs_rss; } lo_dword; union { - u32 rss; /* RSS Hash */ + u32 rss; /* RSS Hash */ struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; -#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 -#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { struct { - u64 buffer_addr; /* Address of descriptor's data buf */ + u64 buffer_addr; /* Address of descriptor's data buf */ u32 cmd_type_len; u32 olinfo_status; } read; struct { - u64 rsvd; /* Reserved */ + u64 rsvd; /* Reserved */ u32 nxtseq_seed; u32 status; } wb; }; /* Adv Transmit Descriptor Config Masks */ -#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ -#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ -#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ -#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ -#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ -#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { @@ -125,11 +123,11 @@ struct e1000_adv_tx_context_desc { u32 mss_l4len_idx; }; -#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ enum e1000_mac_type { e1000_undefined = 0, @@ -262,5 +260,4 @@ struct e1000_hw { void e1000_rlpml_set_vf(struct e1000_hw *, u16); void e1000_init_function_pointers_vf(struct e1000_hw *hw); - #endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 11a1bdbe3fd9..31f91459312f 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -285,6 +285,8 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) /* prevent the interrupt handler from restarting watchdog */ set_bit(__IXGB_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); /* waiting for NAPI to complete can re-enable interrupts */ ixgb_irq_disable(adapter); @@ -298,7 +300,6 @@ ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog) adapter->link_speed = 0; adapter->link_duplex = 0; - netif_carrier_off(netdev); netif_stop_queue(netdev); ixgb_reset(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7dcbbec09a70..636f9e350162 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -151,6 +151,7 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; + bool rss_query_enabled; unsigned int vf_api; }; @@ -613,7 +614,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7) #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) @@ -643,7 +643,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; @@ -723,6 +722,8 @@ struct ixgbe_adapter { u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; + u16 bridge_mode; + u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; @@ -766,6 +767,15 @@ struct ixgbe_adapter { u8 default_up; unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + +/* maximum number of RETA entries among all devices supported by ixgbe + * driver: currently it's x550 device in non-SRIOV mode + */ +#define IXGBE_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; + +#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -955,4 +965,5 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index c5c97b483d7c..824a7ab79ab6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function. - * Disables relaxed ordering Then set pcie completion timeout + * Disables relaxed ordering for archs other than SPARC + * Then set pcie completion timeout * **/ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { +#ifndef CONFIG_SPARC u32 regval; u32 i; +#endif s32 ret_val; ret_val = ixgbe_start_hw_generic(hw); +#ifndef CONFIG_SPARC /* Disable relaxed ordering */ for (i = 0; ((i < hw->mac.max_tx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { @@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - +#endif if (ret_val) return ret_val; @@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index cf55a0df877b..e0c363948bf4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) */ hw->mac.ops.disable_rx_buff(hw); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); hw->mac.ops.enable_rx_buff(hw); @@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, .prot_autoc_write = &prot_autoc_write_82599, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9c66babd4edd..06d8f3cfa099 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; - u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { @@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) } IXGBE_WRITE_FLUSH(hw); +#ifndef CONFIG_SPARC /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { + u32 regval; + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { + u32 regval; + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } - +#endif return 0; } @@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) hw->adapter_stopped = true; /* Disable the receive unit */ - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); + hw->mac.ops.disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + if (regval & IXGBE_RXCTRL_RXEN) + hw->mac.ops.enable_rx(hw); + else + hw->mac.ops.disable_rx(hw); return 0; } @@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) return 0; } +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + } + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } +} + +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) +{ + u32 rxctrl; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); + + if (hw->mac.type != ixgbe_mac_82598EB) { + if (hw->mac.set_lben) { + u32 pfdtxgswc; + + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = false; + } + } +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 8cfadcb2676e..f21f8a165ec4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); +void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); #define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5be0dd508de..eafa9ec802ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1351,7 +1351,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, if (ixgbe_removed(adapter->hw.hw_addr)) { *data = 1; - return 1; + return true; } for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { before = ixgbe_read_reg(&adapter->hw, reg); @@ -1376,7 +1376,7 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, if (ixgbe_removed(adapter->hw.hw_addr)) { *data = 1; - return 1; + return true; } before = ixgbe_read_reg(&adapter->hw, reg); ixgbe_write_reg(&adapter->hw, reg, write & mask); @@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) /* shut down the DMA engines now so they can be reinitialized later */ /* first Rx */ - reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - reg_ctl &= ~IXGBE_RXCTRL_RXEN; - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); + hw->mac.ops.disable_rx(hw); ixgbe_disable_rx_queue(adapter, rx_ring); /* now Tx */ @@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) { struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; + struct ixgbe_hw *hw = &adapter->hw; u32 rctl, reg_data; int ret_val; int err; @@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) goto err_nomem; } - rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); ixgbe_configure_rx_ring(adapter, rx_ring); - rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; + rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); + rctl |= IXGBE_RXCTRL_DMBYPS; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); + hw->mac.ops.enable_rx(hw); + return 0; err_nomem: @@ -2852,6 +2853,45 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ixgbe_rss_indir_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return ixgbe_rss_indir_tbl_entries(adapter); +} + +static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ixgbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + + return 0; +} + static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -3109,6 +3149,9 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 2ad91cb04dab..631c603fc966 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -71,6 +71,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_hw *hw; u32 fcbuff; if (!netdev) @@ -85,25 +86,51 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!ddp->udl) return 0; + hw = &adapter->hw; len = ddp->len; - /* if there an error, force to invalidate ddp context */ - if (ddp->err) { + /* if no error then skip ddp context invalidation */ + if (!ddp->err) + goto skip_ddpinv; + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP FCoE lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); + + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); + } else { + /* other hardware requires DDP FCoE lock */ spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_WE)); /* guaranteed to be invalidated after 100us */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); spin_unlock_bh(&fcoe->lock); - if (fcbuff & IXGBE_FCBUFF_VALID) - udelay(100); - } + } + + if (fcbuff & IXGBE_FCBUFF_VALID) + usleep_range(100, 150); + +skip_ddpinv: if (ddp->sgl) dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); @@ -272,7 +299,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, /* program DMA context */ hw = &adapter->hw; - spin_lock_bh(&fcoe->lock); /* turn on last frame indication for target mode as FCP_RSPtarget is * supposed to send FCP_RSP when it is done. */ @@ -283,16 +309,33 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); } - IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); - /* program filter context */ - IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), + ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); + } else { + /* DDP lock for indirect DDP context access */ + spin_lock_bh(&fcoe->lock); + + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); - spin_unlock_bh(&fcoe->lock); + spin_unlock_bh(&fcoe->lock); + } return 1; @@ -371,6 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, struct fcoe_crc_eof *crc; __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); __le32 ddp_err; + int ddp_max; u32 fctl; u16 xid; @@ -392,7 +436,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else xid = be16_to_cpu(fh->fh_rx_id); - if (xid >= IXGBE_FCOE_DDP_MAX) + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + if (xid >= ddp_max) return -EINVAL; fcoe = &adapter->fcoe; @@ -612,7 +660,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - int i, fcoe_q, fcoe_i; + int i, fcoe_q, fcoe_i, fcoe_q_h = 0; + int fcreta_size; u32 etqf; /* Minimal functionality for FCoE requires at least CRC offloads */ @@ -633,10 +682,23 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) return; /* Use one or more Rx queues for FCoE by redirection table */ - for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcreta_size = IXGBE_FCRETA_SIZE; + if (adapter->hw.mac.type == ixgbe_mac_X550) + fcreta_size = IXGBE_FCRETA_SIZE_X550; + + for (i = 0; i < fcreta_size; i++) { + if (adapter->hw.mac.type == ixgbe_mac_X550) { + int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % + fcoe->indices); + fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; + fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & + IXGBE_FCRETA_ENTRY_HIGH_MASK; + } + fcoe_i = fcoe->offset + (i % fcoe->indices); fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + fcoe_q |= fcoe_q_h; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); } IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); @@ -672,13 +734,18 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; - int cpu, i; + int cpu, i, ddp_max; /* do nothing if no DDP pools were allocated */ if (!fcoe->ddp_pool) return; - for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + + for (i = 0; i < ddp_max; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); for_each_possible_cpu(cpu) @@ -758,6 +825,9 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) } adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 0772b7730fce..38385876effb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -46,6 +46,7 @@ #define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ #define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ +#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ /* Default traffic class to use for FCoE */ #define IXGBE_FCOE_DEFTC 3 @@ -77,7 +78,7 @@ struct ixgbe_fcoe { struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; atomic_t refcnt; spinlock_t lock; - struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; void *extra_ddp_buffer; dma_addr_t extra_ddp_buffer_dma; unsigned long mode; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 70cc4c5c0a01..d3f4b0ceb3f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb) { - struct ixgbe_adapter *adapter = q_vector->adapter; - if (ixgbe_qv_busy_polling(q_vector)) netif_receive_skb(skb); - else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); else - netif_rx(skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -2609,7 +2605,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* The lower 16bits of the EICR register are for the queue interrupts - * which should be masked here in order to not accidently clear them if + * which should be masked here in order to not accidentally clear them if * the bits are high when ixgbe_msix_other is called. There is a race * condition otherwise which results in possible performance loss * especially if the ixgbe_msix_other interrupt is triggering @@ -3232,89 +3228,148 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed) +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 128; + else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 512; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_reta(struct ixgbe_adapter *adapter) { + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0; - int i, j; - int reta_entries = 128; - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - int indices_multi; - - /* - * Program table for at least 2 queues w/ SR-IOV so that VFs can - * make full use of any rings they may have. We will use the - * PSRTYPE register to control how many rings we use within the PF. - */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) - rss_i = 2; - - /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + u32 indices_multi; + u8 *indir_tbl = adapter->rss_indir_tbl; /* Fill out the redirection table as follows: - * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices - * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index - * X550: 512 (8 bit wide) entries containing 6 bit RSS index + * - 82598: 8 bit wide entries containing pair of 4 bit RSS + * indices. + * - 82599/X540: 8 bit wide entries containing 4 bit RSS index + * - X550: 8 bit wide entries containing 6 bit RSS index */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) indices_multi = 0x11; else indices_multi = 0x1; - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - reta_entries = 512; - default: - break; - } - - /* Fill out redirection table */ - for (i = 0, j = 0; i < reta_entries; i++, j++) { - if (j == rss_i) - j = 0; - reta = (reta << 8) | (j * indices_multi); + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; if ((i & 3) == 3) { if (i < 128) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); else IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); + reta = 0; } } } -static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed) +/** + * Write the RETA table to HW (for x550 devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) { + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} + +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_reta(adapter); +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; unsigned int pf_pool = adapter->num_vfs; int i, j; /* Fill out hash function seeds */ for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]); + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), + adapter->rss_key[i]); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | j; - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), - vfreta); + + adapter->rss_indir_tbl[i] = j; } + + ixgbe_store_vfreta(adapter); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 mrqc = 0, rss_field = 0, vfmrqc = 0; - u32 rss_key[10]; u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ @@ -3358,7 +3413,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - netdev_rss_key_fill(rss_key, sizeof(rss_key)); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { unsigned int pf_pool = adapter->num_vfs; @@ -3368,12 +3423,12 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* Setup RSS through the VF registers */ - ixgbe_setup_vfreta(adapter, rss_key); + ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); } else { - ixgbe_setup_reta(adapter, rss_key); + ixgbe_setup_reta(adapter); mrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } @@ -3557,7 +3612,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ @@ -3603,6 +3658,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* enable ethertype anti spoofing if hw supports it */ if (hw->mac.ops.set_ethertype_anti_spoofing) hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + + /* Enable/Disable RSS query feature */ + ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, + adapter->vfinfo[i].rss_query_enabled); } } @@ -3705,8 +3764,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) u32 rxctrl, rfctl; /* disable receives while setting up the descriptors */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); ixgbe_setup_psrtype(adapter); ixgbe_setup_rdrxctl(adapter); @@ -3731,6 +3789,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); /* disable drop enable for 82598 parts */ if (hw->mac.type == ixgbe_mac_82598EB) rxctrl |= IXGBE_RXCTRL_DMBYPS; @@ -3924,7 +3983,7 @@ static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) for (i = 0; i < hw->mac.num_rar_entries; i++) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; } ixgbe_sync_mac_table(adapter); @@ -3992,7 +4051,7 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) adapter->mac_table[i].queue == queue) { adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; ixgbe_sync_mac_table(adapter); return 0; @@ -5014,7 +5073,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *upper; struct list_head *iter; - u32 rxctrl; int i; /* signal that we are down to the interrupt handler */ @@ -5022,8 +5080,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) return; /* do nothing if already down */ /* disable receives */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + hw->mac.ops.disable_rx(hw); /* disable all enabled rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) @@ -6174,7 +6231,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) /* Cause software interrupt to ensure rings are cleaned */ ixgbe_irq_rearm_queues(adapter, eics); - } /** @@ -7507,14 +7563,9 @@ static void ixgbe_netpoll(struct net_device *netdev) if (test_bit(__IXGBE_DOWN, &adapter->state)) return; - adapter->flags |= IXGBE_FLAG_IN_NETPOLL; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); - } else { - ixgbe_intr(adapter->pdev->irq, netdev); - } - adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; + /* loop through and schedule all active queues */ + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); } #endif @@ -7882,6 +7933,80 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } +/** + * ixgbe_configure_bridge_mode - set various bridge modes + * @adapter - the private structure + * @mode - requested bridge mode + * + * Configure some settings require for various bridge modes. + **/ +static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, + __u16 mode) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int p, num_pools; + u32 vmdctl; + + switch (mode) { + case BRIDGE_MODE_VEPA: + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + break; + case BRIDGE_MODE_VEB: + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, + IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + break; + default: + return -EINVAL; + } + + adapter->bridge_mode = mode; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return 0; +} + static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { @@ -7897,8 +8022,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { + u32 status; __u16 mode; - u32 reg = 0; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; @@ -7907,19 +8032,11 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; mode = nla_get_u16(attr); - if (mode == BRIDGE_MODE_VEPA) { - reg = 0; - adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; - } else if (mode == BRIDGE_MODE_VEB) { - reg = IXGBE_PFDTXGSWC_VT_LBEN; - adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; - } else - return -EINVAL; + status = ixgbe_configure_bridge_mode(adapter, mode); + if (status) + return status; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); - - e_info(drv, "enabling bridge mode: %s\n", - mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + break; } return 0; @@ -7930,17 +8047,12 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, u32 filter_mask) { struct ixgbe_adapter *adapter = netdev_priv(dev); - u16 mode; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; - if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) - mode = BRIDGE_MODE_VEB; - else - mode = BRIDGE_MODE_VEPA; - - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + adapter->bridge_mode, 0, 0); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -8052,6 +8164,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB @@ -8406,7 +8519,6 @@ skip_sriov: NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | @@ -8428,6 +8540,7 @@ skip_sriov: } netdev->hw_features |= NETIF_F_RXALL; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; @@ -8989,8 +9102,6 @@ static void __exit ixgbe_exit_module(void) pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); - - rcu_barrier(); /* Wait for completion of call_rcu()'s */ } #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index a5cb755de3a9..b1e4703ff2a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -73,6 +73,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -97,6 +98,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 79c00f57d3e7..e5ba04025e2b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -279,20 +279,18 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. */ -static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); u64 ns; - u32 remainder; unsigned long flags; spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_read(&adapter->tc); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -306,15 +304,14 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) * wall timer value. */ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec *ts) + const struct timespec64 *ts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); u64 ns; unsigned long flags; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); /* reset the timecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -407,7 +404,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + IXGBE_OVERFLOW_PERIOD); - struct timespec ts; + struct timespec64 ts; if (timeout) { ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); @@ -488,7 +485,7 @@ static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) * @work: pointer to the work struct * * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware - * timestamp has been taken for the current skb. It is necesary, because the + * timestamp has been taken for the current skb. It is necessary, because the * descriptor's "done" bit does not correlate with the timestamp event. */ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) @@ -874,8 +871,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime = ixgbe_ptp_gettime; - adapter->ptp_caps.settime = ixgbe_ptp_settime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; case ixgbe_mac_82599EB: @@ -890,8 +887,8 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime = ixgbe_ptp_gettime; - adapter->ptp_caps.settime = ixgbe_ptp_settime; + adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f37fe7269a7..1d17b5872dd1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -36,6 +36,7 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/ipv6.h> +#include <linux/if_bridge.h> #ifdef NETIF_F_HW_VLAN_CTAG_TX #include <linux/if_vlan.h> #endif @@ -79,7 +80,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) /* Initialize default switching mode VEB */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + adapter->bridge_mode = BRIDGE_MODE_VEB; /* If call to enable VFs succeeded then allocate memory * for per VF control structures. @@ -105,9 +106,18 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); - /* enable spoof checking for all VFs */ - for (i = 0; i < adapter->num_vfs; i++) + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; + + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + } + return 0; } @@ -141,7 +151,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) * The 82599 supports up to 64 VFs per physical function * but this implementation limits allocation to 63 so that * basic networking resources are still available to the - * physical function. If the user requests greater thn + * physical function. If the user requests greater than * 63 VFs then it is an error - reset to default of zero. */ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); @@ -424,6 +434,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) #endif /* CONFIG_FCOE */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: /* * Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are @@ -891,6 +902,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, switch (api) { case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -914,6 +926,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: break; default: return -1; @@ -941,6 +954,53 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 i, j; + u32 *out_buf = &msgbuf[1]; + const u8 *reta = adapter->rss_indir_tbl; + u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + /* Check if operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + /* This mailbox command is supported (required) only for 82599 and x540 + * VFs which support up to 4 RSS queues. Therefore we will compress the + * RETA by saving only 2 bits from each entry. This way we will be able + * to transfer the whole RETA in a single mailbox operation. + */ + for (i = 0; i < reta_size / 16; i++) { + out_buf[i] = 0; + for (j = 0; j < 16; j++) + out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); + } + + return 0; +} + +static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *rss_key = &msgbuf[1]; + + /* Check if the operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -997,6 +1057,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_QUEUES: retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_RETA: + retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RSS_KEY: + retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1330,6 +1396,26 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* This operation is currently supported only for 82599 and x540 + * devices. + */ + if (adapter->hw.mac.type < ixgbe_mac_82599EB || + adapter->hw.mac.type >= ixgbe_mac_X550) + return -EOPNOTSUPP; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].rss_query_enabled = setting; + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1343,5 +1429,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 32c26d586c01..2c197e6d1fe7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -47,6 +47,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fc5ecee56ca8..dd6ba5916dfe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -285,6 +285,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 #define IXGBE_VT_CTL 0x051B0 #define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ #define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ @@ -608,6 +610,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RTTBCNRM 0x04980 #define IXGBE_RTTQCNRM 0x04980 +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) /* FCoE DMA Context Registers */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ #define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ @@ -634,6 +638,9 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ #define IXGBE_REOFF 0x05158 /* Rx FC EOF */ #define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) /* FCoE Filter Context Registers */ #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ @@ -664,6 +671,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ #define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ #define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 /* Stats registers */ #define IXGBE_CRCERRS 0x04000 @@ -1690,7 +1701,7 @@ enum { #define IXGBE_MACC_FS 0x00040000 #define IXGBE_MAC_RX2TX_LPBK 0x00000002 -/* Veto Bit definiton */ +/* Veto Bit definition */ #define IXGBE_MMNGC_MNG_VETO 0x00000001 /* LINKS Bit Masks */ @@ -2462,8 +2473,8 @@ struct ixgbe_hic_read_shadow_ram { struct ixgbe_hic_write_shadow_ram { union ixgbe_hic_hdr2 hdr; - u32 address; - u16 length; + __be32 address; + __be16 length; u16 pad2; u16 data; u16 pad3; @@ -3067,6 +3078,10 @@ struct ixgbe_mac_operations { s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + void (*disable_rx)(struct ixgbe_hw *hw); + void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); /* DMA Coalescing */ @@ -3137,6 +3152,7 @@ struct ixgbe_mac_info { u8 flags; u8 san_mac_rar_index; struct ixgbe_thermal_sensor_data thermal_sensor_data; + bool set_lben; }; struct ixgbe_phy_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 49395420c9b3..f5f948d08b43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_write = &prot_autoc_write_generic, + .enable_rx = &ixgbe_enable_rx_generic, + .disable_rx = &ixgbe_disable_rx_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 50bf81908dd6..cf5cf819a6b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) return status; } +/** ixgbe_disable_rx_x550 - Disable RX unit + * + * Enables the Rx DMA unit for x550 + **/ +static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) +{ + u32 rxctrl, pfdtxgswc; + s32 status; + struct ixgbe_hic_disable_rxen fw_cmd; + + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { + pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); + hw->mac.set_lben = true; + } else { + hw->mac.set_lben = false; + } + + fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; + fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; + fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + fw_cmd.port_number = (u8)hw->bus.lan_id; + + status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + sizeof(struct ixgbe_hic_disable_rxen), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* If we fail - disable RX using register write */ + if (status) { + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (rxctrl & IXGBE_RXCTRL_RXEN) { + rxctrl &= ~IXGBE_RXCTRL_RXEN; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); + } + } + } +} + /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * @@ -1306,8 +1347,8 @@ mac_reset_top: * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing **/ -void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, - int vf) +static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, + bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; @@ -1322,6 +1363,33 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } +/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -1356,6 +1424,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .set_source_address_pruning = \ + &ixgbe_set_source_address_pruning_X550, \ .set_ethertype_anti_spoofing = \ &ixgbe_set_ethertype_anti_spoofing_X550, \ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ @@ -1366,6 +1436,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, .init_thermal_sensor_thresh = NULL, \ .prot_autoc_read = &prot_autoc_read_generic, \ .prot_autoc_write = &prot_autoc_write_generic, \ + .enable_rx = &ixgbe_enable_rx_generic, \ + .disable_rx = &ixgbe_disable_rx_x550, \ static struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 7412d378b77b..770e21a64388 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -29,138 +28,138 @@ #define _IXGBEVF_DEFINES_H_ /* Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_VF_IRQ_CLEAR_MASK 7 -#define IXGBE_VF_MAX_TX_QUEUES 8 -#define IXGBE_VF_MAX_RX_QUEUES 8 +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 /* DCB define */ #define IXGBE_VF_MAX_TRAFFIC_CLASS 8 /* Link speed */ typedef u32 ixgbe_link_speed; -#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 -#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 #define IXGBE_LINK_SPEED_100_FULL 0x0008 -#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ -#define IXGBE_LINKS_UP 0x40000000 -#define IXGBE_LINKS_SPEED_82599 0x30000000 -#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 -#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 -#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ -#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 -#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 /* Interrupt Vector Allocation Registers */ -#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ /* Receive Config masks */ -#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ -#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ -#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ -#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ -#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ -#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ /* PSRTYPE bit definitions */ -#define IXGBE_PSRTYPE_TCPHDR 0x00000010 -#define IXGBE_PSRTYPE_UDPHDR 0x00000020 -#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 -#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 -#define IXGBE_PSRTYPE_L2HDR 0x00001000 +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 /* SRRCTL bit definitions */ -#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ -#define IXGBE_SRRCTL_RDMTS_SHIFT 22 -#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 -#define IXGBE_SRRCTL_DROP_EN 0x10000000 -#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F -#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 -#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 -#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 -#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 /* Receive Descriptor bit definitions */ -#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ -#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ -#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ -#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ -#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ -#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ -#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ -#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ -#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ -#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ -#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ -#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ -#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ -#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ -#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ -#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ -#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ -#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ -#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ -#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ -#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ -#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ -#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ -#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ -#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ -#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ -#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ -#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ -#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ -#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ -#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ -#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ -#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ -#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define IXGBE_RXD_PRI_SHIFT 13 -#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define IXGBE_RXD_CFI_SHIFT 12 - -#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ -#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ -#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ -#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ -#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ -#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ -#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ -#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ -#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ -#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ - -#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F -#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 -#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 -#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 -#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 -#define IXGBE_RXDADV_RSCCNT_SHIFT 17 -#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 -#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 -#define IXGBE_RXDADV_SPH 0x8000 +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ IXGBE_RXD_ERR_CE | \ @@ -176,16 +175,16 @@ typedef u32 ixgbe_link_speed; IXGBE_RXDADV_ERR_OSE | \ IXGBE_RXDADV_ERR_USE) -#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ -#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ -#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ -#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ -#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ -#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ -#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ -#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ -#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ -#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { @@ -241,44 +240,44 @@ struct ixgbe_adv_tx_context_desc { }; /* Adv Transmit Descriptor Config Masks */ -#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ -#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ -#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ -#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ -#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ -#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ -#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ -#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ -#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ -#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ -#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ -#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Interrupt register bitmasks */ -#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 #define IXGBE_MAX_EITR 0x00000FF8 #define IXGBE_MIN_EITR 8 /* Error Codes */ -#define IXGBE_ERR_INVALID_MAC_ADDR -1 -#define IXGBE_ERR_RESET_FAILED -2 -#define IXGBE_ERR_INVALID_ARGUMENT -3 +#define IXGBE_ERR_INVALID_MAC_ADDR -1 +#define IXGBE_ERR_RESET_FAILED -2 +#define IXGBE_ERR_INVALID_ARGUMENT -3 /* Transmit Config masks */ #define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index cc0e5b7ff041..b2f5b161d792 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; + #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static int ixgbevf_get_settings(struct net_device *netdev, @@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev, if (link_up) { __u32 speed = SPEED_10000; + switch (link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: speed = SPEED_10000; @@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev, static u32 ixgbevf_get_msglevel(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead - * read EICS which is a shadow but doesn't clear EICR */ + * read EICS which is a shadow but doesn't clear EICR + */ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); @@ -390,21 +394,21 @@ clear_reset: static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) { - switch (stringset) { - case ETH_SS_TEST: - return IXGBE_TEST_LEN; - case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; - default: - return -EINVAL; - } + switch (stringset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_GLOBAL_STATS_LEN; + default: + return -EINVAL; + } } static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *base = (char *) adapter; + char *base = (char *)adapter; int i; #ifdef BP_EXTENDED_STATS u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, @@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) } test = reg_test_vf; - /* - * Perform the register test, looping through the test table + /* Perform the register test, looping through the test table * until we either fail or reach the null entry. */ while (test->reg) { @@ -617,8 +620,8 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) break; case WRITE_NO_TEST: ixgbe_write_reg(&adapter->hw, - test->reg + (i * 0x40), - test->write); + test->reg + (i * 0x40), + test->write); break; case TABLE32_TEST: b = reg_pattern_test(adapter, data, @@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev, hw_dbg(&adapter->hw, "offline testing starting\n"); /* Link test performed before hardware reset so autoneg doesn't - * interfere with test result */ + * interfere with test result + */ if (ixgbevf_link_test(adapter, &data[1])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, else ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; - /* if in mixed tx/rx queues per vector mode, report only rx settings */ + /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) return 0; @@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, int num_vectors, i; u16 tx_itr_param, rx_itr_param; - /* don't accept tx specific changes if we've got mixed RxTx vectors */ - if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count - && ec->tx_coalesce_usecs) + /* don't accept Tx specific changes if we've got mixed RxTx vectors */ + if (adapter->q_vector[0]->tx.count && + adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs) return -EINVAL; - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; @@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, else rx_itr_param = adapter->rx_itr_setting; - if (ec->tx_coalesce_usecs > 1) adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; else @@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, for (i = 0; i < num_vectors; i++) { q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) - /* tx only */ + /* Tx only */ q_vector->itr = tx_itr_param; else - /* rx only or mixed */ + /* Rx only or mixed */ q_vector->itr = rx_itr_param; ixgbevf_write_eitr(q_vector); } @@ -792,23 +794,92 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, return 0; } +static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct ixgbevf_adapter *adapter = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adapter->num_rx_queues; + return 0; + default: + hw_dbg(&adapter->hw, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_82599_RETA_SIZE; + + return 0; +} + +static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + /* We support this operation only for 82599 and x540 at the moment */ + if (adapter->hw.mac.type < ixgbe_mac_X550_vf) + return IXGBEVF_RSS_HASH_KEY_SIZE; + + return 0; +} + +static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* If neither indirection table nor hash key was requested - just + * return a success avoiding taking any locks. + */ + if (!indir && !key) + return 0; + + spin_lock_bh(&adapter->mbx_lock); + if (indir) + err = ixgbevf_get_reta_locked(&adapter->hw, indir, + adapter->num_rx_queues); + + if (!err && key) + err = ixgbevf_get_rss_key_locked(&adapter->hw, key); + + spin_unlock_bh(&adapter->mbx_lock); + + return err; +} + static const struct ethtool_ops ixgbevf_ethtool_ops = { - .get_settings = ixgbevf_get_settings, - .get_drvinfo = ixgbevf_get_drvinfo, - .get_regs_len = ixgbevf_get_regs_len, - .get_regs = ixgbevf_get_regs, - .nway_reset = ixgbevf_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = ixgbevf_get_ringparam, - .set_ringparam = ixgbevf_set_ringparam, - .get_msglevel = ixgbevf_get_msglevel, - .set_msglevel = ixgbevf_set_msglevel, - .self_test = ixgbevf_diag_test, - .get_sset_count = ixgbevf_get_sset_count, - .get_strings = ixgbevf_get_strings, - .get_ethtool_stats = ixgbevf_get_ethtool_stats, - .get_coalesce = ixgbevf_get_coalesce, - .set_coalesce = ixgbevf_set_coalesce, + .get_settings = ixgbevf_get_settings, + .get_drvinfo = ixgbevf_get_drvinfo, + .get_regs_len = ixgbevf_get_regs_len, + .get_regs = ixgbevf_get_regs, + .nway_reset = ixgbevf_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = ixgbevf_get_ringparam, + .set_ringparam = ixgbevf_set_ringparam, + .get_msglevel = ixgbevf_get_msglevel, + .set_msglevel = ixgbevf_set_msglevel, + .self_test = ixgbevf_diag_test, + .get_sset_count = ixgbevf_get_sset_count, + .get_strings = ixgbevf_get_strings, + .get_ethtool_stats = ixgbevf_get_ethtool_stats, + .get_coalesce = ixgbevf_get_coalesce, + .set_coalesce = ixgbevf_set_coalesce, + .get_rxnfc = ixgbevf_get_rxnfc, + .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size, + .get_rxfh_key_size = ixgbevf_get_rxfh_key_size, + .get_rxfh = ixgbevf_get_rxfh, }; void ixgbevf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 3a9b356dff01..775d08900949 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -51,7 +50,8 @@ #define DESC_NEEDED (MAX_SKB_FRAGS + 4) /* wrapper around a pointer to a socket buffer, - * so a DMA handle can be stored along with the buffer */ + * so a DMA handle can be stored along with the buffer + */ struct ixgbevf_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; @@ -132,9 +132,10 @@ struct ixgbevf_ring { u8 __iomem *tail; struct sk_buff *skb; - u16 reg_idx; /* holds the special value that gets the hardware register - * offset associated with this ring, which is different - * for DCB and RSS modes */ + /* holds the special value that gets the hardware register offset + * associated with this ring, which is different for DCB and RSS modes + */ + u16 reg_idx; int queue_index; /* needed for multiqueue queue management */ }; @@ -143,21 +144,23 @@ struct ixgbevf_ring { #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES -#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_MAX_RSS_QUEUES 2 +#define IXGBEVF_82599_RETA_SIZE 128 +#define IXGBEVF_RSS_HASH_KEY_SIZE 40 -#define IXGBEVF_DEFAULT_TXD 1024 -#define IXGBEVF_DEFAULT_RXD 512 -#define IXGBEVF_MAX_TXD 4096 -#define IXGBEVF_MIN_TXD 64 -#define IXGBEVF_MAX_RXD 4096 -#define IXGBEVF_MIN_RXD 64 +#define IXGBEVF_DEFAULT_TXD 1024 +#define IXGBEVF_DEFAULT_RXD 512 +#define IXGBEVF_MAX_TXD 4096 +#define IXGBEVF_MIN_TXD 64 +#define IXGBEVF_MAX_RXD 4096 +#define IXGBEVF_MIN_RXD 64 /* Supported Rx Buffer Sizes */ -#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ +#define IXGBEVF_RXBUFFER_2048 2048 -#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 -#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 +#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 +#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) @@ -186,10 +189,11 @@ struct ixgbevf_ring_container { */ struct ixgbevf_q_vector { struct ixgbevf_adapter *adapter; - u16 v_idx; /* index of q_vector within array, also used for - * finding the bit in EICR and friends that - * represents the vector for this ring */ - u16 itr; /* Interrupt throttle rate written to EITR */ + /* index of q_vector within array, also used for finding the bit in + * EICR and friends that represents the vector for this ring + */ + u16 v_idx; + u16 itr; /* Interrupt throttle rate written to EITR */ struct napi_struct napi; struct ixgbevf_ring_container rx, tx; char name[IFNAMSIZ + 9]; @@ -199,19 +203,21 @@ struct ixgbevf_q_vector { #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) -#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) +#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) +#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) -#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ + IXGBEVF_QV_STATE_POLL_YIELD) +#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ + IXGBEVF_QV_STATE_POLL_YIELD) spinlock_t lock; #endif /* CONFIG_NET_RX_BUSY_POLL */ }; + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) { - spin_lock_init(&q_vector->lock); q_vector->state = IXGBEVF_QV_STATE_IDLE; } @@ -220,6 +226,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBEVF_QV_LOCKED) { WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); @@ -240,6 +247,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) { int rc = false; + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_NAPI_YIELD)); @@ -256,6 +264,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if ((q_vector->state & IXGBEVF_QV_LOCKED)) { q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; @@ -275,6 +284,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) { int rc = false; + spin_lock_bh(&q_vector->lock); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); @@ -297,6 +307,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) { int rc = true; + spin_lock_bh(&q_vector->lock); if (q_vector->state & IXGBEVF_QV_OWNED) rc = false; @@ -307,8 +318,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) #endif /* CONFIG_NET_RX_BUSY_POLL */ -/* - * microsecond values for various ITR rates shifted by 2 to fit itr register +/* microsecond values for various ITR rates shifted by 2 to fit itr register * with the first 3 bits reserved 0 */ #define IXGBE_MIN_RSC_ITR 24 @@ -345,22 +355,22 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) writel(value, ring->tail); } -#define IXGBEVF_RX_DESC(R, i) \ +#define IXGBEVF_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_DESC(R, i) \ +#define IXGBEVF_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBEVF_TX_CTXTDESC(R, i) \ +#define IXGBEVF_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ -#define OTHER_VECTOR 1 -#define NON_Q_VECTORS (OTHER_VECTOR) +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) -#define MAX_MSIX_Q_VECTORS 2 +#define MAX_MSIX_Q_VECTORS 2 -#define MIN_MSIX_Q_VECTORS 1 -#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +#define MIN_MSIX_Q_VECTORS 1 +#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* board specific private data structure */ struct ixgbevf_adapter { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 4186981e562d..a16d267fbce4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -25,7 +24,6 @@ *******************************************************************************/ - /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ******************************************************************************/ @@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to * @msix_vector: the vector to map to the corresponding queue - */ + **/ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; + if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, ivar |= msix_vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { - /* tx or rx causes */ + /* Tx or Rx causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); @@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } -/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum +/** + * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - */ + **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, skb->ip_summed = CHECKSUM_UNNECESSARY; } -/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor +/** + * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated @@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, * This function checks the ring, descriptor, and packet information in * order to populate the checksum, VLAN, protocol, and other fields within * the skb. - */ + **/ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, } } -/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail +/** + * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail * @rx_ring: rx descriptor ring packet is being transacted on * @skb: pointer to current skb being adjusted * @@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, * that allow for significant optimizations versus the standard function. * As a result we can do things like drop a frag and maintain an accurate * truesize for the skb. - */ + **/ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, struct sk_buff *skb) { @@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, skb->tail += pull_len; } -/* ixgbevf_cleanup_headers - Correct corrupted or empty headers +/** + * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being fixed @@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, * it is large enough to qualify as a valid Ethernet frame. * * Returns true if an error was encountered and skb was freed. - */ + **/ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, return false; } -/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring +/** + * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * * Synchronizes page for reuse by the adapter - */ + **/ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *old_buff) { @@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; } -/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff +/** + * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff * @rx_ring: rx descriptor ring to transact packets on * @rx_buffer: buffer containing page to add * @rx_desc: descriptor containing length of buffer written by hardware @@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) * * The function will then update the page offset if necessary and return * true if the buffer can be reused by the adapter. - */ + **/ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, struct ixgbevf_rx_buffer *rx_buffer, union ixgbe_adv_rx_desc *rx_desc, @@ -958,7 +963,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, * source pruning. */ if ((skb->pkt_type == PACKET_BROADCAST || - skb->pkt_type == PACKET_MULTICAST) && + skb->pkt_type == PACKET_MULTICAST) && ether_addr_equal(rx_ring->netdev->dev_addr, eth_hdr(skb)->h_source)) { dev_kfree_skb_irq(skb); @@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) #endif /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ + * the budget to go below 1 because we'll exit polling + */ if (q_vector->rx.count > 1) per_ring_budget = max(budget/q_vector->rx.count, 1); else @@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way * @q_vector: structure containing interrupt and ring information - */ + **/ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; @@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; - /* - * set the WDIS bit to not clear the timer bits and cause an + /* set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; @@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->eims_enable_mask = 0; - /* - * Populate the IVAR table and set the ITR values to the + /* Populate the IVAR table and set the ITR values to the * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { struct ixgbevf_ring *ring; + q_vector = adapter->q_vector[v_idx]; ixgbevf_for_each_ring(ring, q_vector->rx) @@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { - /* tx only vector */ + /* Tx only vector */ if (adapter->tx_itr_setting == 1) q_vector->itr = IXGBE_10K_ITR; else q_vector->itr = adapter->tx_itr_setting; } else { - /* rx or rx/tx vector */ + /* Rx or Rx/Tx vector */ if (adapter->rx_itr_setting == 1) q_vector->itr = IXGBE_20K_ITR; else @@ -1167,13 +1172,13 @@ enum latency_range { * @q_vector: structure containing interrupt and ring information * @ring_container: structure containing ring performance data * - * Stores a new ITR value based on packets and byte - * counts during the last interrupt. The advantage of per interrupt - * computation is faster updates and more accurate ITR for the current - * traffic pattern. Constants in this function were computed - * based on theoretical maximum wire speed and thresholds were set based - * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. **/ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring_container *ring_container) @@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, if (packets == 0) return; - /* simple throttlerate management + /* simple throttle rate management * 0-20MB/s lowest (100000 ints/s) * 20-100MB/s low (20000 ints/s) * 100-1249MB/s bulk (8000 ints/s) @@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* - * The ideal configuration... + /* The ideal configuration... * We have enough vectors to map one per queue. */ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { @@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) goto out; } - /* - * If we don't have enough vectors for a 1-to-1 + /* If we don't have enough vectors for a 1-to-1 * mapping, we'll have to group them so there are * multiple queues per vector. */ @@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, - "request_irq failed for MSIX interrupt " - "Error: %d\n", err); + "request_irq failed for MSIX interrupt Error: %d\n", + err); goto free_queue_irqs; } } @@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { - hw_dbg(&adapter->hw, - "request_irq for msix_other failed: %d\n", err); + hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", + err); goto free_queue_irqs; } @@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) for (i = 0; i < q_vectors; i++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; + q_vector->rx.ring = NULL; q_vector->tx.ring = NULL; q_vector->rx.count = 0; @@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) err = ixgbevf_request_msix_irqs(adapter); if (err) - hw_dbg(&adapter->hw, - "request_irq failed, Error %d\n", err); + hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); return err; } @@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, /* write value back with RXDCTL.ENABLE bit cleared */ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); - /* the hardware may take up to 100us to really disable the rx queue */ + /* the hardware may take up to 100us to really disable the Rx queue */ do { udelay(10); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); @@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and - * the Base and Length of the Rx Descriptor Ring */ + * the Base and Length of the Rx Descriptor Ring + */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); } @@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; + netdev_for_each_uc_addr(ha, netdev) { hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); udelay(200); } } else { - /* - * If the list is empty then send message to PF driver to - * clear all macvlans on this VF. + /* If the list is empty then send message to PF driver to + * clear all MAC VLANs on this VF. */ hw->mac.ops.set_uc_addr(hw, 0, NULL); } @@ -2026,7 +2030,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_11, + int api[] = { ixgbe_mbox_api_12, + ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int err = 0, idx = 0; @@ -2184,7 +2189,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) return; /* do nothing if already down */ - /* disable all enabled rx queues */ + /* disable all enabled Rx queues */ for (i = 0; i < adapter->num_rx_queues; i++) ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); @@ -2328,6 +2333,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) switch (hw->api_version) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: adapter->num_rx_queues = rss; adapter->num_tx_queues = rss; default: @@ -2406,8 +2412,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) int err = 0; int vector, v_budget; - /* - * It's easy to be greedy for MSI-X vectors, but it really + /* It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for * (roughly) the same number of vectors as there are CPU's. @@ -2418,7 +2423,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) v_budget += NON_Q_VECTORS; /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ + * mean we disable MSI-X capabilities of the adapter. + */ adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); if (!adapter->msix_entries) { @@ -2544,8 +2550,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) err = ixgbevf_alloc_q_vectors(adapter); if (err) { - hw_dbg(&adapter->hw, "Unable to allocate memory for queue " - "vectors\n"); + hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); goto err_alloc_q_vectors; } @@ -2555,8 +2560,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) goto err_alloc_queues; } - hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " - "Tx Queue count = %u\n", + hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); @@ -2600,7 +2604,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) /** * ixgbevf_sw_init - Initialize general software structures - * (struct ixgbevf_adapter) * @adapter: board private structure to initialize * * ixgbevf_sw_init initializes the Adapter private data structure. @@ -2615,7 +2618,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) int err; /* PCI config space info */ - hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; hw->revision_id = pdev->revision; @@ -2686,8 +2688,8 @@ out: { \ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ - u64 current_counter = (current_counter_msb << 32) | \ - current_counter_lsb; \ + u64 current_counter = (current_counter_msb << 32) | \ + current_counter_lsb; \ if (current_counter < last_counter) \ counter += 0x1000000000LL; \ last_counter = current_counter; \ @@ -2758,14 +2760,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) ixgbevf_reinit_locked(adapter); } -/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure +/** + * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the * bits needed to check for TX hangs. As a result we should immediately * determine if a hang has occurred. - */ + **/ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -2783,7 +2786,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) set_check_for_tx_hang(adapter->tx_ring[i]); } - /* get one bit for every active tx/rx interrupt vector */ + /* get one bit for every active Tx/Rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; @@ -2797,7 +2800,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) { @@ -2825,7 +2828,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_link_is_up - update netif_carrier status and * print link up message - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) { @@ -2850,7 +2853,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) /** * ixgbevf_watchdog_link_is_down - update netif_carrier status and * print link down message - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure **/ static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) { @@ -2956,7 +2959,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) - * @tx_ring: tx descriptor ring (for a specific queue) to setup + * @tx_ring: Tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ @@ -2983,8 +2986,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " - "descriptor ring\n"); + hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } @@ -3006,8 +3008,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, - "Allocation for Tx Queue %u failed\n", i); + hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); break; } @@ -3016,7 +3017,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) /** * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) - * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @rx_ring: Rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ @@ -3065,8 +3066,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; - hw_dbg(&adapter->hw, - "Allocation for Rx Queue %u failed\n", i); + hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); break; } return err; @@ -3136,11 +3136,11 @@ static int ixgbevf_open(struct net_device *netdev) if (hw->adapter_stopped) { ixgbevf_reset(adapter); /* if adapter is still stopped then PF isn't up and - * the vf can't start. */ + * the VF can't start. + */ if (hw->adapter_stopped) { err = IXGBE_ERR_MBX; - pr_err("Unable to start - perhaps the PF Driver isn't " - "up yet\n"); + pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); goto err_setup_reset; } } @@ -3163,8 +3163,7 @@ static int ixgbevf_open(struct net_device *netdev) ixgbevf_configure(adapter); - /* - * Map the Tx/Rx rings to the vectors we were allotted. + /* Map the Tx/Rx rings to the vectors we were allotted. * if request_irq will be called in this function map_rings * must be called *before* up_complete */ @@ -3288,6 +3287,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (first->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, @@ -3313,7 +3313,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, *hdr_len += l4len; *hdr_len = skb_transport_offset(skb) + l4len; - /* update gso size and bytecount with header size */ + /* update GSO size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; @@ -3343,6 +3343,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_hdr = 0; + switch (first->protocol) { case htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); @@ -3356,8 +3357,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); + "partial checksum but proto=%x!\n", + first->protocol); } break; } @@ -3380,8 +3381,8 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum but l4 proto=%x!\n", + l4_hdr); } break; } @@ -3405,7 +3406,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); - /* set HW vlan bit if vlan is present */ + /* set HW VLAN bit if VLAN is present */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); @@ -3572,11 +3573,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ + * but since that doesn't exist yet, just open code it. + */ smp_mb(); /* We need to check again in a case another CPU has just - * made room available. */ + * made room available. + */ if (likely(ixgbevf_desc_unused(tx_ring) < size)) return -EBUSY; @@ -3615,8 +3618,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring = adapter->tx_ring[skb->queue_mapping]; - /* - * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, @@ -3712,6 +3714,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; break; default: @@ -3794,8 +3797,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) u32 err; pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call + /* pci_restore_state clears dev->state_saved so call * pci_save_state to restore it. */ pci_save_state(pdev); @@ -3930,8 +3932,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "No usable DMA " - "configuration, aborting\n"); + dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_dma; } pci_using_dac = 0; @@ -3962,8 +3963,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); - /* - * call save state here in standalone driver because it relies on + /* call save state here in standalone driver because it relies on * adapter struct to exist, and needs to call netdev_priv */ pci_save_state(pdev); @@ -3978,7 +3978,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbevf_assign_netdev_ops(netdev); - /* Setup hw api */ + /* Setup HW API */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -3998,11 +3998,11 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | @@ -4131,7 +4131,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) * * This function is called after a PCI bus error affecting * this device has been detected. - */ + **/ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -4166,7 +4166,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, * * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the ixgbevf_resume routine. - */ + **/ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4194,7 +4194,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) * This callback is called when the error recovery driver tells us that * its OK to resume normal operation. Implementation resembles the * second-half of the ixgbevf_resume routine. - */ + **/ static void ixgbevf_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4214,17 +4214,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { }; static struct pci_driver ixgbevf_driver = { - .name = ixgbevf_driver_name, - .id_table = ixgbevf_pci_tbl, - .probe = ixgbevf_probe, - .remove = ixgbevf_remove, + .name = ixgbevf_driver_name, + .id_table = ixgbevf_pci_tbl, + .probe = ixgbevf_probe, + .remove = ixgbevf_remove, #ifdef CONFIG_PM /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, #endif - .shutdown = ixgbevf_shutdown, - .err_handler = &ixgbevf_err_handler + .shutdown = ixgbevf_shutdown, + .err_handler = &ixgbevf_err_handler }; /** @@ -4236,6 +4236,7 @@ static struct pci_driver ixgbevf_driver = { static int __init ixgbevf_init_module(void) { int ret; + pr_info("%s - version %s\n", ixgbevf_driver_string, ixgbevf_driver_version); @@ -4266,6 +4267,7 @@ static void __exit ixgbevf_exit_module(void) char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) { struct ixgbevf_adapter *adapter = hw->back; + return adapter->netdev->name; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index d5028ddf4b31..dc68fea4894b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) } /** - * ixgbevf_poll_for_ack - Wait for message acknowledgement + * ixgbevf_poll_for_ack - Wait for message acknowledgment * @hw: pointer to the HW structure * - * returns 0 if it successfully received a message acknowledgement + * returns 0 if it successfully received a message acknowledgment **/ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) { @@ -213,7 +212,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw) s32 ret_val = IXGBE_ERR_MBX; if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | - IXGBE_VFMAILBOX_RSTI))) { + IXGBE_VFMAILBOX_RSTI))) { ret_val = 0; hw->mbx.stats.rsts++; } @@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); - /* reserve mailbox for vf use */ + /* reserve mailbox for VF use */ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) ret_val = 0; @@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) s32 ret_val; u16 i; - - /* lock the mailbox to prevent pf/vf race condition */ + /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; @@ -279,7 +277,7 @@ out_no_write: } /** - * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf + * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer @@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) s32 ret_val = 0; u16 i; - /* lock the mailbox to prevent pf/vf race condition */ + /* lock the mailbox to prevent PF/VF race condition */ ret_val = ixgbevf_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_read; @@ -311,17 +309,18 @@ out_no_read: } /** - * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox + * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox * @hw: pointer to the HW structure * - * Initializes the hw->mbx struct to correct values for vf mailbox + * Initializes the hw->mbx struct to correct values for VF mailbox */ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout - * value to begin communications */ + * value to begin communications + */ mbx->timeout = 0; mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; @@ -337,13 +336,13 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) } const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { - .init_params = ixgbevf_init_mbx_params_vf, - .read = ixgbevf_read_mbx_vf, - .write = ixgbevf_write_mbx_vf, - .read_posted = ixgbevf_read_posted_mbx, - .write_posted = ixgbevf_write_posted_mbx, - .check_for_msg = ixgbevf_check_for_msg_vf, - .check_for_ack = ixgbevf_check_for_ack_vf, - .check_for_rst = ixgbevf_check_for_rst_vf, + .init_params = ixgbevf_init_mbx_params_vf, + .read = ixgbevf_read_mbx_vf, + .write = ixgbevf_write_mbx_vf, + .read_posted = ixgbevf_read_posted_mbx, + .write_posted = ixgbevf_write_posted_mbx, + .check_for_msg = ixgbevf_check_for_msg_vf, + .check_for_ack = ixgbevf_check_for_ack_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 0bc30058ff82..82f44e06e5fc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -30,71 +29,70 @@ #include "vf.h" -#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ -#define IXGBE_ERR_MBX -100 +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 -#define IXGBE_VFMAILBOX 0x002FC -#define IXGBE_VFMBMEM 0x00200 +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 /* Define mailbox register bits */ -#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ -#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ -#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ -#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ -#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ -#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ -#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) -#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) +#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x))) +#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn))) -#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ -#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ -#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ -#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ -#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ -#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ - +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the * PF. The reverse is true if it is IXGBE_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ -#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with - * this are the ACK */ -#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with - * this are the NACK */ -#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still - * clear to send requests */ -#define IXGBE_VT_MSGINFO_SHIFT 16 +/* Messages below or'd with this are the ACK */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with this are the NACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 +#define IXGBE_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for exra info for certain messages */ -#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) /* definitions to support mailbox API version negotiation */ -/* - * each element denotes a version of the API; existing numbers may not +/* each element denotes a version of the API; existing numbers may not * change; any additions must go at the end */ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; /* mailbox API, legacy requests */ -#define IXGBE_VF_RESET 0x01 /* VF requests reset */ -#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ -#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ -#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ /* mailbox API, version 1.0 VF requests */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ @@ -105,20 +103,24 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */ /* GET_QUEUES return data indices within the mailbox */ -#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ -#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ -#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ -#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS hash key */ /* length of permanent address message returned from PF */ -#define IXGBE_VF_PERMADDR_MSG_LEN 4 +#define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ -#define IXGBE_VF_MC_TYPE_WORD 3 +#define IXGBE_VF_MC_TYPE_WORD 3 -#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ -#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ -#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ /* forward declaration of the HW struct */ struct ixgbe_hw; diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 3e712fd6e695..2764fd16261f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -28,58 +27,58 @@ #ifndef _IXGBEVF_REGS_H_ #define _IXGBEVF_REGS_H_ -#define IXGBE_VFCTRL 0x00000 -#define IXGBE_VFSTATUS 0x00008 -#define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFFRTIMER 0x00048 -#define IXGBE_VFRXMEMWRAP 0x03190 -#define IXGBE_VTEICR 0x00100 -#define IXGBE_VTEICS 0x00104 -#define IXGBE_VTEIMS 0x00108 -#define IXGBE_VTEIMC 0x0010C -#define IXGBE_VTEIAC 0x00110 -#define IXGBE_VTEIAM 0x00114 -#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) -#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) -#define IXGBE_VTIVAR_MISC 0x00140 -#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) -#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) -#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) -#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) -#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) -#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) -#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) -#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) -#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) -#define IXGBE_VFPSRTYPE 0x00300 -#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) -#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) -#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) -#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) -#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) -#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) -#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) -#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) -#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) -#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) -#define IXGBE_VFGPRC 0x0101C -#define IXGBE_VFGPTC 0x0201C -#define IXGBE_VFGORC_LSB 0x01020 -#define IXGBE_VFGORC_MSB 0x01024 -#define IXGBE_VFGOTC_LSB 0x02020 -#define IXGBE_VFGOTC_MSB 0x02024 -#define IXGBE_VFMPRC 0x01034 -#define IXGBE_VFMRQC 0x3000 -#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) -#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * (x))) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x))) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x))) +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x))) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x))) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x))) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x))) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x))) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x))) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x))) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x))) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x))) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x))) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x))) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x))) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x))) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x))) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x))) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x))) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x))) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x))) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 +#define IXGBE_VFMRQC 0x3000 +#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4)) +#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4)) /* VFMRQC bits */ -#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ -#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 -#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_VFMRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP 0x00200000 -#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) +#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) #endif /* _IXGBEVF_REGS_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index cdb53be7d995..d1339b050627 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2012 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -65,7 +64,7 @@ static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw) * ixgbevf_reset_hw_vf - Performs hardware reset * @hw: pointer to hardware structure * - * Resets the hardware by reseting the transmit and receive units, masks and + * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts. **/ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) @@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) mdelay(10); - /* set our "perm_addr" based on info provided by PF */ - /* also set up the mc_filter_type which is piggy backed - * on the mac address in word 3 */ + /* set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); if (ret_val) return ret_val; @@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; - memcpy(hw->mac.perm_addr, addr, ETH_ALEN); + ether_addr_copy(hw->mac.perm_addr, addr); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return 0; @@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) u32 reg_val; u16 i; - /* - * Set the adapter_stopped flag so other driver functions stop touching + /* Set the adapter_stopped flag so other driver functions stop touching * the hardware */ hw->adapter_stopped = true; @@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from - * incoming rx multicast addresses, to determine the bit-vector to check in + * incoming Rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. @@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) **/ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { - memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); + ether_addr_copy(mac_addr, hw->mac.perm_addr); return 0; } @@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); - /* - * If index is one then this is the start of a new list and needs + /* If index is one then this is the start of a new list and needs * indication to the PF so it can do it's own list management. * If it is zero then that tells the PF to just clear all of * this VF's macvlans and there is no new list. @@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; if (addr) - memcpy(msg_addr, addr, ETH_ALEN); + ether_addr_copy(msg_addr, addr); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -259,6 +257,129 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) } /** + * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. + * @adapter: pointer to the port handle + * @reta: buffer to fill with RETA contents. + * @num_rx_queues: Number of Rx queues configured for this port + * + * The "reta" buffer should be big enough to contain 32 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) +{ + int err, i, j; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u32 *hw_reta = &msgbuf[1]; + u32 mask = 0; + + /* We have to use a mailbox for 82599 and x540 devices only. + * For these devices RETA has 128 entries. + * Also these VFs support up to 4 RSS queues. Therefore PF will compress + * 16 RETA entries in each DWORD giving 2 bits to each entry. + */ + int dwords = IXGBEVF_82599_RETA_SIZE / 16; + + /* We support the RSS querying for 82599 and x540 devices only. + * Thus return an error if API doesn't support RETA querying or querying + * is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RETA; + + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + /* ixgbevf doesn't support more than 2 queues at the moment */ + if (num_rx_queues > 1) + mask = 0x1; + + for (i = 0; i < dwords; i++) + for (j = 0; j < 16; j++) + reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask; + + return 0; +} + +/** + * ixgbevf_get_rss_key_locked - get the RSS Random Key + * @hw: pointer to the HW structure + * @rss_key: buffer to fill with RSS Hash Key contents. + * + * The "rss_key" buffer should be big enough to contain 10 registers. + * + * Returns: 0 on success. + * if API doesn't support this operation - (-EOPNOTSUPP). + */ +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) +{ + int err; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + + /* We currently support the RSS Random Key retrieval for 82599 and x540 + * devices only. + * + * Thus return an error if API doesn't support RSS Random Key retrieval + * or if the operation is not supported for this device type. + */ + if (hw->api_version != ixgbe_mbox_api_12 || + hw->mac.type >= ixgbe_mac_X550_vf) + return -EOPNOTSUPP; + + msgbuf[0] = IXGBE_VF_GET_RSS_KEY; + err = hw->mbx.ops.write_posted(hw, msgbuf, 1); + + if (err) + return err; + + err = hw->mbx.ops.read_posted(hw, msgbuf, 11); + + if (err) + return err; + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* If the operation has been refused by a PF return -EPERM */ + if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK)) + return -EPERM; + + /* If we didn't get an ACK there must have been + * some sort of mailbox error so we should treat it + * as such. + */ + if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK)) + return IXGBE_ERR_MBX; + + memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE); + + return 0; +} + +/** * ixgbevf_set_rar_vf - set device MAC address * @hw: pointer to hardware structure * @index: Receive address register to write @@ -275,7 +396,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; - memcpy(msg_addr, addr, ETH_ALEN); + ether_addr_copy(msg_addr, addr); ret_val = mbx->ops.write_posted(hw, msgbuf, 3); if (!ret_val) @@ -292,7 +413,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, } static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, - u32 *msg, u16 size) + u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 retmsg[IXGBE_VFMAILBOX_SIZE]; @@ -348,7 +469,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** - * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address + * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID * @vind: unused by VF drivers @@ -462,7 +583,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, } /* if the read failed it could just be a mailbox collision, best wait - * until we are called again and don't report an error */ + * until we are called again and don't report an error + */ if (mbx->ops.read(hw, &in_msg, 1)) goto out; @@ -480,7 +602,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, } /* if we passed all the tests above then the link is up and we no - * longer need to check for link */ + * longer need to check for link + */ mac->get_link_status = false; out: @@ -545,6 +668,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, /* do nothing if API doesn't support ixgbevf_get_queues */ switch (hw->api_version) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: break; default: return 0; @@ -561,8 +685,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; - /* - * if we we didn't get an ACK there must have been + /* if we we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such */ @@ -595,17 +718,17 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, } static const struct ixgbe_mac_operations ixgbevf_mac_ops = { - .init_hw = ixgbevf_init_hw_vf, - .reset_hw = ixgbevf_reset_hw_vf, - .start_hw = ixgbevf_start_hw_vf, - .get_mac_addr = ixgbevf_get_mac_addr_vf, - .stop_adapter = ixgbevf_stop_hw_vf, - .setup_link = ixgbevf_setup_mac_link_vf, - .check_link = ixgbevf_check_mac_link_vf, - .set_rar = ixgbevf_set_rar_vf, - .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, - .set_uc_addr = ixgbevf_set_uc_addr_vf, - .set_vfta = ixgbevf_set_vfta_vf, + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_check_mac_link_vf, + .set_rar = ixgbevf_set_rar_vf, + .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, + .set_uc_addr = ixgbevf_set_uc_addr_vf, + .set_vfta = ixgbevf_set_vfta_vf, }; const struct ixgbevf_info ixgbevf_82599_vf_info = { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 5b172427f459..d40f036b6df0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -13,8 +13,7 @@ more details. You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + this program; if not, see <http://www.gnu.org/licenses/>. The full GNU General Public License is included in this distribution in the file called "COPYING". @@ -169,7 +168,7 @@ struct ixgbevf_hw_stats { }; struct ixgbevf_info { - enum ixgbe_mac_type mac; + enum ixgbe_mac_type mac; const struct ixgbe_mac_operations *mac_ops; }; @@ -185,28 +184,32 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) return; writel(value, reg_addr + reg); } + #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); #define IXGBE_READ_REG(h, r) ixgbevf_read_reg(h, r) static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset, u32 value) + u32 offset, u32 value) { ixgbe_write_reg(hw, reg + (offset << 2), value); } + #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, - u32 offset) + u32 offset) { return ixgbevf_read_reg(hw, reg + (offset << 2)); } + #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); +int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); +int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key); #endif /* __IXGBE_VF_H__ */ - |