summaryrefslogtreecommitdiff
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-03-29 04:22:18 +0400
committerDave Airlie <airlied@redhat.com>2009-03-29 04:22:18 +0400
commit90f959bcb386da2c71613dcefc6a285e054a539e (patch)
treeee3e9dd4111d4aad12e579cb0c2c159114dff263 /drivers/net/e1000e/netdev.c
parent41f13fe81dd1b08723ab9f3fc3c7f29cfa81f1a5 (diff)
parent07d43ba98621f08e252a48c96b258b4d572b0257 (diff)
downloadlinux-90f959bcb386da2c71613dcefc6a285e054a539e.tar.xz
drm: merge Linux master into HEAD
Conflicts: drivers/gpu/drm/drm_info.c drivers/gpu/drm/drm_proc.c drivers/gpu/drm/i915/i915_gem_debugfs.c
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c261
1 files changed, 119 insertions, 142 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 91817d0afcaf..bfb2d6c85c54 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -44,10 +44,11 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "0.3.3.3-k6"
+#define DRV_VERSION "0.3.3.4-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -56,6 +57,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_82572] = &e1000_82572_info,
[board_82573] = &e1000_82573_info,
[board_82574] = &e1000_82574_info,
+ [board_82583] = &e1000_82583_info,
[board_80003es2lan] = &e1000_es2_info,
[board_ich8lan] = &e1000_ich8_info,
[board_ich9lan] = &e1000_ich9_info,
@@ -99,8 +101,8 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(vlan));
+ vlan_gro_receive(&adapter->napi, adapter->vlgrp,
+ le16_to_cpu(vlan), skb);
else
napi_gro_receive(&adapter->napi, skb);
}
@@ -565,15 +567,14 @@ next_desc:
static void e1000_put_txbuf(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
- if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
+ buffer_info->dma = 0;
if (buffer_info->skb) {
+ skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
+ buffer_info->time_stamp = 0;
}
static void e1000_print_tx_hang(struct e1000_adapter *adapter)
@@ -620,15 +621,16 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
- bool cleaned = 0;
+ bool cleaned;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
- while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
- for (cleaned = 0; !cleaned; ) {
+ while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+ (count < tx_ring->count)) {
+ for (cleaned = 0; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
@@ -654,10 +656,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
-#define E1000_TX_WEIGHT 64
- /* weight of a sort for tx, to avoid endless transmit cleanup */
- if (count++ == E1000_TX_WEIGHT)
- break;
}
tx_ring->next_to_clean = i;
@@ -678,13 +676,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
if (adapter->detect_tx_hung) {
- /*
- * Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i
- */
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = 0;
- if (tx_ring->buffer_info[eop].dma &&
- time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
+ if (tx_ring->buffer_info[i].time_stamp &&
+ time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ))
&& !(er32(STATUS) & E1000_STATUS_TXOFF)) {
e1000_print_tx_hang(adapter);
@@ -695,7 +691,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
adapter->total_tx_packets += total_tx_packets;
adapter->net_stats.tx_bytes += total_tx_bytes;
adapter->net_stats.tx_packets += total_tx_packets;
- return cleaned;
+ return (count < tx_ring->count);
}
/**
@@ -1152,7 +1148,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
* read ICR disables interrupts using IAM
*/
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = 1;
/*
* ICH8 workaround-- Call gig speed drop workaround on cable
@@ -1179,12 +1175,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1218,7 +1214,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
* IMC write
*/
- if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+ if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = 1;
/*
* ICH8 workaround-- Call gig speed drop workaround on cable
@@ -1246,12 +1242,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
@@ -1265,7 +1261,8 @@ static irqreturn_t e1000_msix_other(int irq, void *data)
u32 icr = er32(ICR);
if (!(icr & E1000_ICR_INT_ASSERTED)) {
- ew32(IMS, E1000_IMS_OTHER);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_OTHER);
return IRQ_NONE;
}
@@ -1282,7 +1279,8 @@ static irqreturn_t e1000_msix_other(int irq, void *data)
}
no_link_interrupt:
- ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
return IRQ_HANDLED;
}
@@ -1320,10 +1318,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
adapter->rx_ring->set_itr = 0;
}
- if (netif_rx_schedule_prep(&adapter->napi)) {
+ if (napi_schedule_prep(&adapter->napi)) {
adapter->total_rx_bytes = 0;
adapter->total_rx_packets = 0;
- __netif_rx_schedule(&adapter->napi);
+ __napi_schedule(&adapter->napi);
}
return IRQ_HANDLED;
}
@@ -1698,7 +1696,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- spin_lock_init(&adapter->tx_queue_lock);
return 0;
err:
@@ -2007,32 +2004,25 @@ static int e1000_clean(struct napi_struct *napi, int budget)
!(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
goto clean_rx;
- /*
- * e1000_clean is called per-cpu. This lock protects
- * tx_ring from being cleaned by multiple cpus
- * simultaneously. A failure obtaining the lock means
- * tx_ring is currently being cleaned anyway.
- */
- if (spin_trylock(&adapter->tx_queue_lock)) {
- tx_cleaned = e1000_clean_tx_irq(adapter);
- spin_unlock(&adapter->tx_queue_lock);
- }
+ tx_cleaned = e1000_clean_tx_irq(adapter);
clean_rx:
adapter->clean_rx(adapter, &work_done, budget);
- if (tx_cleaned)
+ if (!tx_cleaned)
work_done = budget;
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- netif_rx_complete(napi);
- if (adapter->msix_entries)
- ew32(IMS, adapter->rx_ring->ims_val);
- else
- e1000_irq_enable(adapter);
+ napi_complete(napi);
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ ew32(IMS, adapter->rx_ring->ims_val);
+ else
+ e1000_irq_enable(adapter);
+ }
}
return work_done;
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_alloc_queues(adapter))
return -ENOMEM;
- spin_lock_init(&adapter->tx_queue_lock);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -3320,7 +3308,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
adapter->stats.algnerrc += er32(ALGNERRC);
adapter->stats.rxerrc += er32(RXERRC);
- if (hw->mac.type != e1000_82574)
+ if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583))
adapter->stats.tncrs += er32(TNCRS);
adapter->stats.cexterr += er32(CEXTERR);
adapter->stats.tsctc += er32(TSCTC);
@@ -3777,23 +3765,30 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
unsigned int i;
u8 css;
u32 cmd_len = E1000_TXD_CMD_DEXT;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case cpu_to_be16(ETH_P_IPV6):
/* XXX not handling all IPV6 headers */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
cmd_len |= E1000_TXD_CMD_TCP;
break;
default:
if (unlikely(net_ratelimit()))
- e_warn("checksum_partial proto=%x!\n", skb->protocol);
+ e_warn("checksum_partial proto=%x!\n",
+ be16_to_cpu(protocol));
break;
}
@@ -3832,42 +3827,40 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
{
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_buffer *buffer_info;
- unsigned int len = skb->len - skb->data_len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int len = skb_headlen(skb);
+ unsigned int offset, size, count = 0, i;
unsigned int f;
+ dma_addr_t *map;
i = tx_ring->next_to_use;
+ if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+ adapter->tx_dma_failed++;
+ return 0;
+ }
+
+ map = skb_shinfo(skb)->dma_maps;
+ offset = 0;
+
while (len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (mss && !nr_frags && size == len && size > 8)
- size -= 4;
-
buffer_info->length = size;
- /* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->dma =
- pci_map_single(adapter->pdev,
- skb->data + offset,
- size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
- dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
- adapter->tx_dma_failed++;
- return -1;
- }
buffer_info->next_to_watch = i;
+ buffer_info->dma = map[0] + offset;
+ count++;
len -= size;
offset += size;
- count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
+
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
}
for (f = 0; f < nr_frags; f++) {
@@ -3875,49 +3868,27 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (mss && f == (nr_frags-1) && size == len && size > 8)
- size -= 4;
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
- buffer_info->dma =
- pci_map_page(adapter->pdev,
- frag->page,
- offset,
- size,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)) {
- dev_err(&adapter->pdev->dev,
- "TX DMA page map failed\n");
- adapter->tx_dma_failed++;
- return -1;
- }
-
buffer_info->next_to_watch = i;
+ buffer_info->dma = map[f + 1] + offset;
len -= size;
offset += size;
count++;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
}
}
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i--;
-
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
@@ -4069,7 +4040,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len;
- unsigned long irq_flags;
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4138,18 +4108,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
- if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
-
/*
* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time
*/
- if (e1000_maybe_stop_tx(netdev, count + 2)) {
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
+ if (e1000_maybe_stop_tx(netdev, count + 2))
return NETDEV_TX_BUSY;
- }
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= E1000_TX_FLAGS_VLAN;
@@ -4161,7 +4125,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = e1000_tso(adapter, skb);
if (tso < 0) {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4178,22 +4141,20 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
+ /* if count is 0 then mapping error has occured */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
- if (count < 0) {
- /* handle pci_map_single() error in e1000_tx_map */
+ if (count) {
+ e1000_tx_queue(adapter, tx_flags, count);
+ netdev->trans_start = jiffies;
+ /* Make sure there is space in the ring for the next send. */
+ e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
+
+ } else {
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
- return NETDEV_TX_OK;
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
}
- e1000_tx_queue(adapter, tx_flags, count);
-
- netdev->trans_start = jiffies;
-
- /* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
-
- spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
return NETDEV_TX_OK;
}
@@ -4543,6 +4504,14 @@ static int e1000_resume(struct pci_dev *pdev)
return err;
}
+ /* AER (Advanced Error Reporting) hooks */
+ err = pci_enable_pcie_error_reporting(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
+ "0x%x\n", err);
+ /* non-fatal, continue */
+ }
+
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4637,24 +4606,29 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int err;
+ pci_ers_result_t result;
e1000e_disable_l1aspm(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
- pci_restore_state(pdev);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
- e1000e_reset(adapter);
- ew32(WUS, ~0);
+ e1000e_reset(adapter);
+ ew32(WUS, ~0);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
- return PCI_ERS_RESULT_RECOVERED;
+ return result;
}
/**
@@ -4922,12 +4896,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /*
- * We should not be using LLTX anymore, but we are still Tx faster with
- * it.
- */
- netdev->features |= NETIF_F_LLTX;
-
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
@@ -5091,6 +5059,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ int err;
/*
* flush_scheduled work may reschedule our watchdog task, so
@@ -5125,6 +5094,12 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
+ /* AER disable */
+ err = pci_disable_pcie_error_reporting(pdev);
+ if (err)
+ dev_err(&pdev->dev,
+ "pci_disable_pcie_error_reporting failed 0x%x\n", err);
+
pci_disable_device(pdev);
}
@@ -5156,6 +5131,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
board_80003es2lan },