summaryrefslogtreecommitdiff
path: root/drivers/net/qlge/qlge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r--drivers/net/qlge/qlge_main.c109
1 files changed, 79 insertions, 30 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 45421c8b6010..91191f761fba 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -641,7 +641,7 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
}
-static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
+static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
{
int status = 0;
/* wait for reg to come ready */
@@ -656,8 +656,11 @@ static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
if (status)
goto exit;
- /* get the data */
- *data = ql_read32(qdev, FLASH_DATA);
+ /* This data is stored on flash as an array of
+ * __le32. Since ql_read32() returns cpu endian
+ * we need to swap it back.
+ */
+ *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
exit:
return status;
}
@@ -666,13 +669,20 @@ static int ql_get_flash_params(struct ql_adapter *qdev)
{
int i;
int status;
- u32 *p = (u32 *)&qdev->flash;
+ __le32 *p = (__le32 *)&qdev->flash;
+ u32 offset = 0;
+
+ /* Second function's parameters follow the first
+ * function's.
+ */
+ if (qdev->func)
+ offset = sizeof(qdev->flash) / sizeof(u32);
if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
return -ETIMEDOUT;
for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
- status = ql_read_flash_word(qdev, i, p);
+ status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
goto exit;
@@ -888,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
lbq_desc->index);
lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
if (lbq_desc->p.lbq_page == NULL) {
+ rx_ring->lbq_clean_idx = clean_idx;
QPRINTK(qdev, RX_STATUS, ERR,
"Couldn't get a page.\n");
return;
@@ -897,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
+ rx_ring->lbq_clean_idx = clean_idx;
+ put_page(lbq_desc->p.lbq_page);
+ lbq_desc->p.lbq_page = NULL;
QPRINTK(qdev, RX_STATUS, ERR,
"PCI mapping failed.\n");
return;
@@ -958,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (pci_dma_mapping_error(qdev->pdev, map)) {
QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
+ dev_kfree_skb_any(sbq_desc->p.skb);
+ sbq_desc->p.skb = NULL;
return;
}
pci_unmap_addr_set(sbq_desc, mapaddr, map);
@@ -1420,18 +1436,32 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
}
- if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
- QPRINTK(qdev, RX_STATUS, ERR,
- "Bad checksum for this %s packet.\n",
- ((ib_mac_rsp->
- flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
- skb->ip_summed = CHECKSUM_NONE;
- } else if (qdev->rx_csum &&
- ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
- ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ }
+ }
}
qdev->stats.rx_packets++;
qdev->stats.rx_bytes += skb->len;
@@ -1439,12 +1469,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Passing a VLAN packet upstream.\n");
- vlan_hwaccel_rx(skb, qdev->vlgrp,
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
le16_to_cpu(ib_mac_rsp->vlan_id));
} else {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Passing a normal packet upstream.\n");
- netif_rx(skb);
+ netif_receive_skb(skb);
}
}
@@ -1501,6 +1531,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
netif_stop_queue(qdev->ndev);
netif_carrier_off(qdev->ndev);
ql_disable_interrupts(qdev);
+ /* Clear adapter up bit to signal the recovery
+ * process that it shouldn't kill the reset worker
+ * thread
+ */
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
@@ -1906,6 +1941,9 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring = &qdev->tx_ring[tx_ring_idx];
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
QPRINTK(qdev, TX_QUEUED, INFO,
"%s: shutting down tx queue %d du to lack of resources.\n",
@@ -1917,10 +1955,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
- if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
- QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
- return NETDEV_TX_BUSY;
- }
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -1946,6 +1980,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
ql_hw_csum_setup(skb,
(struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
+ if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
+ NETDEV_TX_OK) {
+ QPRINTK(qdev, TX_QUEUED, ERR,
+ "Could not map the segments.\n");
+ return NETDEV_TX_BUSY;
+ }
QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
@@ -2863,8 +2903,8 @@ static int ql_start_rss(struct ql_adapter *qdev)
/*
* Fill out the Indirection Table.
*/
- for (i = 0; i < 32; i++)
- hash_id[i] = i & 1;
+ for (i = 0; i < 256; i++)
+ hash_id[i] = i & (qdev->rss_ring_count - 1);
/*
* Random values for the IPv6 and IPv4 Hash Keys.
@@ -2947,9 +2987,9 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
mask = value << 16;
ql_write32(qdev, SYS, mask | value);
- /* Set the default queue. */
- value = NIC_RCV_CFG_DFQ;
- mask = NIC_RCV_CFG_DFQ_MASK;
+ /* Set the default queue, and VLAN behavior. */
+ value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
+ mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
/* Set the MPI interrupt to enabled. */
@@ -3090,7 +3130,11 @@ static int ql_adapter_down(struct ql_adapter *qdev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
- cancel_delayed_work_sync(&qdev->asic_reset_work);
+ /* Don't kill the reset worker thread if we
+ * are in the process of recovery.
+ */
+ if (test_bit(QL_ADAPTER_UP, &qdev->flags))
+ cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
@@ -3122,6 +3166,11 @@ static int ql_adapter_down(struct ql_adapter *qdev)
ql_tx_ring_clean(qdev);
+ /* Call netif_napi_del() from common point.
+ */
+ for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
+ netif_napi_del(&qdev->rx_ring[i].napi);
+
spin_lock(&qdev->hw_lock);
status = ql_adapter_reset(qdev);
if (status)
@@ -3491,7 +3540,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
+ ql_queue_asic_error(qdev);
}
static void ql_asic_reset_work(struct work_struct *work)