summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/e1000e
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:00:47 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:00:47 +0300
commit6c373ca89399c5a3f7ef210ad8f63dc3437da345 (patch)
tree74d1ec65087df1da1021b43ac51acc1ee8601809 /drivers/net/ethernet/intel/e1000e
parentbb0fd7ab0986105765d11baa82e619c618a235aa (diff)
parent9f9151412dd7aae0e3f51a89ae4a1f8755fdb4d0 (diff)
downloadlinux-6c373ca89399c5a3f7ef210ad8f63dc3437da345.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add BQL support to via-rhine, from Tino Reichardt. 2) Integrate SWITCHDEV layer support into the DSA layer, so DSA drivers can support hw switch offloading. From Floria Fainelli. 3) Allow 'ip address' commands to initiate multicast group join/leave, from Madhu Challa. 4) Many ipv4 FIB lookup optimizations from Alexander Duyck. 5) Support EBPF in cls_bpf classifier and act_bpf action, from Daniel Borkmann. 6) Remove the ugly compat support in ARP for ugly layers like ax25, rose, etc. And use this to clean up the neigh layer, then use it to implement MPLS support. All from Eric Biederman. 7) Support L3 forwarding offloading in switches, from Scott Feldman. 8) Collapse the LOCAL and MAIN ipv4 FIB tables when possible, to speed up route lookups even further. From Alexander Duyck. 9) Many improvements and bug fixes to the rhashtable implementation, from Herbert Xu and Thomas Graf. In particular, in the case where an rhashtable user bulk adds a large number of items into an empty table, we expand the table much more sanely. 10) Don't make the tcp_metrics hash table per-namespace, from Eric Biederman. 11) Extend EBPF to access SKB fields, from Alexei Starovoitov. 12) Split out new connection request sockets so that they can be established in the main hash table. Much less false sharing since hash lookups go direct to the request sockets instead of having to go first to the listener then to the request socks hashed underneath. From Eric Dumazet. 13) Add async I/O support for crytpo AF_ALG sockets, from Tadeusz Struk. 14) Support stable privacy address generation for RFC7217 in IPV6. From Hannes Frederic Sowa. 15) Hash network namespace into IP frag IDs, also from Hannes Frederic Sowa. 16) Convert PTP get/set methods to use 64-bit time, from Richard Cochran. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1816 commits) fm10k: Bump driver version to 0.15.2 fm10k: corrected VF multicast update fm10k: mbx_update_max_size does not drop all oversized messages fm10k: reset head instead of calling update_max_size fm10k: renamed mbx_tx_dropped to mbx_tx_oversized fm10k: update xcast mode before synchronizing multicast addresses fm10k: start service timer on probe fm10k: fix function header comment fm10k: comment next_vf_mbx flow fm10k: don't handle mailbox events in iov_event path and always process mailbox fm10k: use separate workqueue for fm10k driver fm10k: Set PF queues to unlimited bandwidth during virtualization fm10k: expose tx_timeout_count as an ethtool stat fm10k: only increment tx_timeout_count in Tx hang path fm10k: remove extraneous "Reset interface" message fm10k: separate PF only stats so that VF does not display them fm10k: use hw->mac.max_queues for stats fm10k: only show actual queues, not the maximum in hardware fm10k: allow creation of VLAN on default vid fm10k: fix unused warnings ...
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e')
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c776
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h3
9 files changed, 801 insertions, 109 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index bb7ab3c321d6..0570c668ec3d 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -141,6 +141,7 @@
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
+#define E1000_RCTL_RDMTS_HEX 0x00010000
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9416e5a7e0c8..5d9ceb17b4cb 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -132,6 +132,7 @@ enum e1000_boards {
board_pchlan,
board_pch2lan,
board_pch_lpt,
+ board_pch_spt
};
struct e1000_ps_page {
@@ -342,6 +343,7 @@ struct e1000_adapter {
struct timecounter tc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+ struct pm_qos_request pm_qos_req;
u16 eee_advert;
};
@@ -501,6 +503,7 @@ extern const struct e1000_info e1000_ich10_info;
extern const struct e1000_info e1000_pch_info;
extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
+extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 865ce45f9ec3..11f486e4ff7b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -896,18 +896,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
mask |= (1 << 18);
break;
default:
break;
}
- if (mac->type == e1000_pch_lpt)
+ if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt))
wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- if (mac->type == e1000_pch_lpt) {
+ if ((mac->type == e1000_pch_lpt) ||
+ (mac->type == e1000_pch_spt)) {
/* Cannot test write-protected SHRAL[n] registers */
if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
continue;
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 72f5475c4b90..19e8c487db06 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -87,6 +87,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_I218_V2 0x15A1
#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */
#define E1000_REVISION_4 4
@@ -108,6 +112,7 @@ enum e1000_mac_type {
e1000_pchlan,
e1000_pch2lan,
e1000_pch_lpt,
+ e1000_pch_spt,
};
enum e1000_media_type {
@@ -153,6 +158,7 @@ enum e1000_bus_width {
e1000_bus_width_pcie_x1,
e1000_bus_width_pcie_x2,
e1000_bus_width_pcie_x4 = 4,
+ e1000_bus_width_pcie_x8 = 8,
e1000_bus_width_32,
e1000_bus_width_64,
e1000_bus_width_reserved
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 48b74a549155..9d81c0317433 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -123,6 +123,14 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data);
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 *data);
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 data);
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
@@ -229,7 +237,8 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
/* Unforce SMBus mode in PHY */
e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
@@ -321,6 +330,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -461,6 +471,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -590,35 +601,54 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
-
- /* Can't read flash registers if the register set isn't mapped. */
- if (!hw->flash_address) {
- e_dbg("ERROR: Flash registers not mapped\n");
- return -E1000_ERR_CONFIG;
- }
+ u32 nvm_size;
nvm->type = e1000_nvm_flash_sw;
- gfpreg = er32flash(ICH_FLASH_GFPREG);
+ if (hw->mac.type == e1000_pch_spt) {
+ /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+ * STRAP register. This is because in SPT the GbE Flash region
+ * is no longer accessed through the flash registers. Instead,
+ * the mechanism has changed, and the Flash region access
+ * registers are now implemented in GbE memory space.
+ */
+ nvm->flash_base_addr = 0;
+ nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
+ * NVM_SIZE_MULTIPLIER;
+ nvm->flash_bank_size = nvm_size / 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ /* Set the base address for flash register access */
+ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+ } else {
+ /* Can't read flash registers if register set isn't mapped. */
+ if (!hw->flash_address) {
+ e_dbg("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
- /* sector_X_addr is a "sector"-aligned address (4096 bytes)
- * Add 1 to sector_end_addr since this sector is included in
- * the overall size.
- */
- sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
- sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+ gfpreg = er32flash(ICH_FLASH_GFPREG);
- /* flash_base_addr is byte-aligned */
- nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
- /* find total size of the NVM, then cut in half since the total
- * size represents two separate NVM banks.
- */
- nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT);
- nvm->flash_bank_size /= 2;
- /* Adjust to word count */
- nvm->flash_bank_size /= sizeof(u16);
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr
+ << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ }
nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
@@ -682,6 +712,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch2lan;
/* fall-through */
case e1000_pch_lpt:
+ case e1000_pch_spt:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -699,7 +730,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
break;
}
- if (mac->type == e1000_pch_lpt) {
+ if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
@@ -919,8 +950,9 @@ release:
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
- if (!link || ((status & E1000_STATUS_SPEED_100) &&
- (status & E1000_STATUS_FD)))
+ if ((hw->phy.revision > 5) || !link ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
goto update_fextnvm6;
ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
@@ -1100,6 +1132,21 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+ */
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ &phy_reg);
+ if (ret_val)
+ goto release;
+ phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ phy_reg);
+ if (ret_val)
+ goto release;
+ }
+
/* Force SMBus mode in PHY */
ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
if (ret_val)
@@ -1302,7 +1349,8 @@ out:
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val;
+ s32 ret_val, tipg_reg = 0;
+ u16 emi_addr, emi_val = 0;
bool link;
u16 phy_reg;
@@ -1333,48 +1381,55 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* the IPG and reduce Rx latency in the PHY.
*/
if (((hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_pch_lpt)) && link) {
+ (hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) && link) {
u32 reg;
reg = er32(STATUS);
+ tipg_reg = er32(TIPG);
+ tipg_reg &= ~E1000_TIPG_IPGT_MASK;
+
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
- u16 emi_addr;
+ tipg_reg |= 0xFF;
+ /* Reduce Rx latency in analog PHY */
+ emi_val = 0;
+ } else {
- reg = er32(TIPG);
- reg &= ~E1000_TIPG_IPGT_MASK;
- reg |= 0xFF;
- ew32(TIPG, reg);
+ /* Roll back the default values */
+ tipg_reg |= 0x08;
+ emi_val = 1;
+ }
- /* Reduce Rx latency in analog PHY */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
+ ew32(TIPG, tipg_reg);
- if (hw->mac.type == e1000_pch2lan)
- emi_addr = I82579_RX_CONFIG;
- else
- emi_addr = I217_RX_CONFIG;
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
- ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
- hw->phy.ops.release(hw);
+ hw->phy.ops.release(hw);
- if (ret_val)
- return ret_val;
- }
+ if (ret_val)
+ return ret_val;
}
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type == e1000_pch_spt)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
}
-
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
/* Set platform power management values for
* Latency Tolerance Reporting (LTR)
*/
@@ -1386,6 +1441,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
+ /* FEXTNVM6 K1-off workaround */
+ if (hw->mac.type == e1000_pch_spt) {
+ u32 pcieanacfg = er32(PCIEANACFG);
+ u32 fextnvm6 = er32(FEXTNVM6);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+ ew32(FEXTNVM6, fextnvm6);
+ }
+
if (!link)
return 0; /* No link detected */
@@ -1479,6 +1547,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -1929,6 +1998,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -2961,6 +3031,20 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
s32 ret_val;
switch (hw->mac.type) {
+ /* In SPT, read from the CTRL_EXT reg instead of
+ * accessing the sector valid bits from the nvm
+ */
+ case e1000_pch_spt:
+ *bank = er32(CTRL_EXT)
+ & E1000_CTRL_EXT_NVMVS;
+ if ((*bank == 0) || (*bank == 1)) {
+ e_dbg("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
+ } else {
+ *bank = *bank - 2;
+ return 0;
+ }
+ break;
case e1000_ich8lan:
case e1000_ich9lan:
eecd = er32(EECD);
@@ -3008,6 +3092,99 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
}
/**
+ * e1000_read_nvm_spt - NVM access for SPT
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words.
+ * @data: pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM
+ **/
+static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = 0;
+ u32 bank = 0;
+ u32 dword = 0;
+ u16 offset_to_read;
+ u16 i;
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ e_dbg("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = 0;
+
+ for (i = 0; i < words; i += 2) {
+ if (words - i == 1) {
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] =
+ dev_spec->shadow_ram[offset + i].value;
+ } else {
+ offset_to_read = act_offset + i -
+ ((act_offset + i) % 2);
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ if ((act_offset + i) % 2 == 0)
+ data[i] = (u16)(dword & 0xFFFF);
+ else
+ data[i] = (u16)((dword >> 16) & 0xFFFF);
+ }
+ } else {
+ offset_to_read = act_offset + i;
+ if (!(dev_spec->shadow_ram[offset + i].modified) ||
+ !(dev_spec->shadow_ram[offset + i + 1].modified)) {
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ }
+ if (dev_spec->shadow_ram[offset + i].modified)
+ data[i] =
+ dev_spec->shadow_ram[offset + i].value;
+ else
+ data[i] = (u16)(dword & 0xFFFF);
+ if (dev_spec->shadow_ram[offset + i].modified)
+ data[i + 1] =
+ dev_spec->shadow_ram[offset + i + 1].value;
+ else
+ data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ e_dbg("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
* e1000_read_nvm_ich8lan - Read word(s) from the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the word(s) to read.
@@ -3090,8 +3267,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
-
- ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
@@ -3107,7 +3286,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
s32 i;
@@ -3128,7 +3310,11 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
e_dbg("Flash controller busy, cannot get access\n");
}
@@ -3151,9 +3337,16 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
u32 i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
do {
@@ -3170,6 +3363,23 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
}
/**
+ * e1000_read_flash_dword_ich8lan - Read dword from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash dword at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+ return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
+/**
* e1000_read_flash_word_ich8lan - Read word from flash
* @hw: pointer to the HW structure
* @offset: offset to data location
@@ -3201,7 +3411,14 @@ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u16 word = 0;
- ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+ /* In SPT, only 32 bits access is supported,
+ * so this function should not be called.
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ else
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+
if (ret_val)
return ret_val;
@@ -3287,6 +3504,82 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
}
/**
+ * e1000_read_flash_data32_ich8lan - Read dword from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the dword to read.
+ * @data: Pointer to the dword to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+
+static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+ hw->mac.type != e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (!ret_val) {
+ *data = er32flash(ICH_FLASH_FDATA0);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
* e1000_write_nvm_ich8lan - Write word(s) to the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the word(s) to write.
@@ -3321,7 +3614,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * e1000_update_nvm_checksum_spt - Update the checksum for NVM
* @hw: pointer to the HW structure
*
* The NVM checksum is updated by calling the generic update_nvm_checksum,
@@ -3331,13 +3624,13 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
* After a successful commit, the shadow ram is cleared and is ready for
* future writes.
**/
-static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
s32 ret_val;
- u16 data;
+ u32 dword = 0;
ret_val = e1000e_update_nvm_checksum_generic(hw);
if (ret_val)
@@ -3371,12 +3664,175 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
}
-
- for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
/* Determine whether to write the value stored
* in the other NVM bank or a modified value stored
* in the shadow RAM
*/
+ ret_val = e1000_read_flash_dword_ich8lan(hw,
+ i + old_bank_offset,
+ &dword);
+
+ if (dev_spec->shadow_ram[i].modified) {
+ dword &= 0xffff0000;
+ dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+ }
+ if (dev_spec->shadow_ram[i + 1].modified) {
+ dword &= 0x0000ffff;
+ dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+ << 16);
+ }
+ if (ret_val)
+ break;
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD - 1)
+ dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usleep_range(100, 200);
+
+ /* Write the data to the new bank. Offset in words */
+ act_offset = i + new_bank_offset;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+ dword);
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+ e_dbg("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+ /*offset in words but we read dword */
+ --act_offset;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0xBFFFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ /* offset in words but we read dword */
+ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0x00FFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ usleep_range(10000, 20000);
+ }
+
+out:
+ if (ret_val)
+ e_dbg("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
+ * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u16 data = 0;
+
+ ret_val = e1000e_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val) {
+ e_dbg("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
if (dev_spec->shadow_ram[i].modified) {
data = dev_spec->shadow_ram[i].value;
} else {
@@ -3498,6 +3954,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -3583,9 +4040,13 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u8 count = 0;
- if (size < 1 || size > 2 || data > size * 0xff ||
- offset > ICH_FLASH_LINEAR_ADDR_MASK)
- return -E1000_ERR_NVM;
+ if (hw->mac.type == e1000_pch_spt) {
+ if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ } else {
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
@@ -3596,12 +4057,25 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val)
break;
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
- hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
- ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
@@ -3640,6 +4114,90 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
}
/**
+* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+* @hw: pointer to the HW structure
+* @offset: The offset (in bytes) of the dwords to read.
+* @data: The 4 bytes to write to the NVM.
+*
+* Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val;
+ u8 count = 0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+ do {
+ udelay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val)
+ break;
+
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
+ >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+ ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+ ew32flash(ICH_FLASH_FDATA0, data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+ if (!ret_val)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ e_dbg("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
+
+/**
* e1000_write_flash_byte_ich8lan - Write a single byte to NVM
* @hw: pointer to the HW structure
* @offset: The index of the byte to read.
@@ -3656,6 +4214,40 @@ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
}
/**
+* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+* @hw: pointer to the HW structure
+* @offset: The offset of the word to write.
+* @dword: The dword to write to the NVM.
+*
+* Writes a single dword to the NVM using the flash access registers.
+* Goes through a retry algorithm before giving up.
+**/
+static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword)
+{
+ s32 ret_val;
+ u16 program_retries;
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+ if (!ret_val)
+ return ret_val;
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
+ usleep_range(100, 200);
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+ if (!ret_val)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return 0;
+}
+
+/**
* e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
* @hw: pointer to the HW structure
* @offset: The offset of the byte to write.
@@ -3759,9 +4351,18 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+ if (hw->mac.type == e1000_pch_spt)
+ hsflctl.regval =
+ er32flash(ICH_FLASH_HSFSTS) >> 16;
+ else
+ hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+ if (hw->mac.type == e1000_pch_spt)
+ ew32flash(ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
@@ -4180,7 +4781,8 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(RFCTL, reg);
/* Enable ECC on Lynxpoint */
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
reg = er32(PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
ew32(PBECCSTS, reg);
@@ -4583,7 +5185,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(device_id == E1000_DEV_ID_PCH_I218_LM3) ||
- (device_id == E1000_DEV_ID_PCH_I218_V3)) {
+ (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type == e1000_pch_spt)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
@@ -5058,6 +5661,17 @@ static const struct e1000_nvm_operations ich8_nvm_ops = {
.write = e1000_write_nvm_ich8lan,
};
+static const struct e1000_nvm_operations spt_nvm_ops = {
+ .acquire = e1000_acquire_nvm_ich8lan,
+ .release = e1000_release_nvm_ich8lan,
+ .read = e1000_read_nvm_spt,
+ .update = e1000_update_nvm_checksum_spt,
+ .reload = e1000e_reload_nvm_generic,
+ .valid_led_default = e1000_valid_led_default_ich8lan,
+ .validate = e1000_validate_nvm_checksum_ich8lan,
+ .write = e1000_write_nvm_ich8lan,
+};
+
const struct e1000_info e1000_ich8_info = {
.mac = e1000_ich8lan,
.flags = FLAG_HAS_WOL
@@ -5166,3 +5780,23 @@ const struct e1000_info e1000_pch_lpt_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &ich8_nvm_ops,
};
+
+const struct e1000_info e1000_pch_spt_info = {
+ .mac = e1000_pch_spt,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9018,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8066a498eaac..770a573b9eea 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -95,9 +95,18 @@
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
+#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
+/* bit for disabling packet buffer read */
+#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#define K1_ENTRY_LATENCY 0
+#define K1_MIN_TIME 1
+#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
+#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
+#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
+
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1e8c40fd5c3d..c509a5c900f5 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -70,6 +70,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
[board_pch2lan] = &e1000_pch2_info,
[board_pch_lpt] = &e1000_pch_lpt_info,
+ [board_pch_spt] = &e1000_pch_spt_info,
};
struct e1000_reg_info {
@@ -946,7 +947,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
@@ -1231,7 +1232,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
(count < tx_ring->count)) {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
+ dma_rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
@@ -1331,7 +1332,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
break;
(*work_done)++;
skb = buffer_info->skb;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
/* in the packet split case this is header only */
prefetch(skb->data - NET_IP_ALIGN);
@@ -1535,7 +1536,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
@@ -1796,7 +1797,8 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt))) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -1876,7 +1878,8 @@ static irqreturn_t e1000_intr(int __always_unused irq, void *data)
}
/* Reset on uncorrectable ECC error */
- if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+ if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt))) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -2257,7 +2260,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
- } else if (hw->mac.type == e1000_pch_lpt) {
+ } else if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
} else {
ew32(IMS, IMS_ENABLE_MASK);
@@ -3014,6 +3018,19 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TCTL, tctl);
hw->mac.ops.config_collision_dist(hw);
+
+ /* SPT Si errata workaround to avoid data corruption */
+ if (hw->mac.type == e1000_pch_spt) {
+ u32 reg_val;
+
+ reg_val = er32(IOSFPC);
+ reg_val |= E1000_RCTL_RDMTS_HEX;
+ ew32(IOSFPC, reg_val);
+
+ reg_val = er32(TARC(0));
+ reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
+ ew32(TARC(0), reg_val);
+ }
}
/**
@@ -3280,9 +3297,9 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RXDCTL(0), rxdctl | 0x3);
}
- pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
+ pm_qos_update_request(&adapter->pm_qos_req, lat);
} else {
- pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ pm_qos_update_request(&adapter->pm_qos_req,
PM_QOS_DEFAULT_VALUE);
}
@@ -3490,8 +3507,11 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
struct e1000_hw *hw = &adapter->hw;
u32 incvalue, incperiod, shift;
- /* Make sure clock is enabled on I217 before checking the frequency */
- if ((hw->mac.type == e1000_pch_lpt) &&
+ /* Make sure clock is enabled on I217/I218/I219 before checking
+ * the frequency
+ */
+ if (((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) &&
!(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
@@ -3505,10 +3525,13 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- /* On I217, the clock frequency is 25MHz or 96MHz as
- * indicated by the System Clock Frequency Indication
+ case e1000_pch_spt:
+ /* On I217, I218 and I219, the clock frequency is 25MHz
+ * or 96MHz as indicated by the System Clock Frequency
+ * Indication
*/
- if ((hw->mac.type != e1000_pch_lpt) ||
+ if (((hw->mac.type != e1000_pch_lpt) &&
+ (hw->mac.type != e1000_pch_spt)) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHz;
@@ -3875,6 +3898,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
fc->refresh_time = 0x0400;
if (adapter->netdev->mtu <= ETH_DATA_LEN) {
@@ -4060,6 +4084,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
*/
set_bit(__E1000_DOWN, &adapter->state);
+ netif_carrier_off(netdev);
+
/* disable receives in the hardware */
rctl = er32(RCTL);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
@@ -4084,8 +4110,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netif_carrier_off(netdev);
-
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
spin_unlock(&adapter->stats64_lock);
@@ -4379,7 +4403,7 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
@@ -4490,7 +4514,7 @@ static int e1000_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+ pm_qos_remove_request(&adapter->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4759,7 +4783,8 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.mgpdc += er32(MGTPDC);
/* Correctable ECC Errors */
- if (hw->mac.type == e1000_pch_lpt) {
+ if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
u32 pbeccsts = er32(PBECCSTS);
adapter->corr_errors +=
@@ -6144,7 +6169,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- } else if (hw->mac.type == e1000_pch_lpt) {
+ } else if ((hw->mac.type == e1000_pch_lpt) ||
+ (hw->mac.type == e1000_pch_spt)) {
if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
@@ -6807,7 +6833,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_ioremap;
if ((adapter->flags & FLAG_HAS_FLASH) &&
- (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+ (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) &&
+ (hw->mac.type < e1000_pch_spt)) {
flash_start = pci_resource_start(pdev, 1);
flash_len = pci_resource_len(pdev, 1);
adapter->hw.flash_address = ioremap(flash_start, flash_len);
@@ -6847,7 +6874,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_hw_init;
if ((adapter->flags & FLAG_IS_ICH) &&
- (adapter->flags & FLAG_READ_ONLY_NVM))
+ (adapter->flags & FLAG_READ_ONLY_NVM) &&
+ (hw->mac.type < e1000_pch_spt))
e1000e_write_protect_nvm_ich8lan(&adapter->hw);
hw->mac.ops.get_bus_info(&adapter->hw);
@@ -7043,7 +7071,7 @@ err_hw_init:
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
- if (adapter->hw.flash_address)
+ if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
e1000e_reset_interrupt_capability(adapter);
err_flashmap:
@@ -7116,7 +7144,8 @@ static void e1000_remove(struct pci_dev *pdev)
kfree(adapter->rx_ring);
iounmap(adapter->hw.hw_addr);
- if (adapter->hw.flash_address)
+ if ((adapter->hw.flash_address) &&
+ (adapter->hw.mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
@@ -7213,6 +7242,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 978ef9c4a043..8d7b21dc7e19 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -106,20 +106,18 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u32 remainder;
u64 ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
ns = timecounter_read(&adapter->tc);
spin_unlock_irqrestore(&adapter->systim_lock, flags);
- ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
- ts->tv_nsec = remainder;
+ *ts = ns_to_timespec64(ns);
return 0;
}
@@ -133,14 +131,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
* wall timer value.
**/
static int e1000e_phc_settime(struct ptp_clock_info *ptp,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
u64 ns;
- ns = timespec_to_ns(ts);
+ ns = timespec64_to_ns(ts);
/* reset the timecounter */
spin_lock_irqsave(&adapter->systim_lock, flags);
@@ -171,11 +169,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
- struct timespec ts;
+ struct timespec64 ts;
- adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
+ adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
- e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ e_dbg("SYSTIM overflow check at %lld.%09lu\n",
+ (long long) ts.tv_sec, ts.tv_nsec);
schedule_delayed_work(&adapter->systim_overflow_work,
E1000_SYSTIM_OVERFLOW_PERIOD);
@@ -190,8 +189,8 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
- .gettime = e1000e_phc_gettime,
- .settime = e1000e_phc_settime,
+ .gettime64 = e1000e_phc_gettime,
+ .settime64 = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
@@ -221,7 +220,9 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- if ((hw->mac.type != e1000_pch_lpt) ||
+ case e1000_pch_spt:
+ if (((hw->mac.type != e1000_pch_lpt) &&
+ (hw->mac.type != e1000_pch_spt)) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index ea235bbe50d3..85eefc4832ba 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,6 +38,7 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
@@ -67,6 +68,7 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
@@ -121,6 +123,7 @@
(0x054E4 + ((_i - 16) * 8)))
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */