diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-31 10:31:57 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-31 10:31:57 +0300 |
commit | a9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch) | |
tree | 886e75fdfd09690cd262ca69cb7f5d1d42b48602 /drivers/net/qlge | |
parent | b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (diff) | |
parent | 6a94cb73064c952255336cc57731904174b2c58f (diff) | |
download | linux-a9de18eb761f7c1c860964b2e5addc1a35c7e861.tar.xz |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/include/asm/pda.h
kernel/fork.c
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge.h | 5 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 168 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_mpi.c | 2 |
4 files changed, 89 insertions, 88 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 38116f9d4163..ba2e1c5b6bcf 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -1375,7 +1375,6 @@ struct ql_adapter { spinlock_t adapter_lock; spinlock_t hw_lock; spinlock_t stats_lock; - spinlock_t legacy_lock; /* used for maintaining legacy intr sync */ /* PCI Bus Relative Register Addresses */ void __iomem *reg_base; @@ -1399,8 +1398,6 @@ struct ql_adapter { struct msix_entry *msi_x_entry; struct intr_context intr_context[MAX_RX_RINGS]; - int (*legacy_check) (struct ql_adapter *); - int tx_ring_count; /* One per online CPU. */ u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ u32 rss_ring_count; /* One per online CPU. */ @@ -1502,7 +1499,7 @@ void ql_mpi_work(struct work_struct *work); void ql_mpi_reset_work(struct work_struct *work); int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); void ql_queue_asic_error(struct ql_adapter *qdev); -void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); void ql_set_ethtool_ops(struct net_device *ndev); int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c index b62fbd4bf00f..eefb81b13758 100644 --- a/drivers/net/qlge/qlge_ethtool.c +++ b/drivers/net/qlge/qlge_ethtool.c @@ -97,7 +97,7 @@ exit: return status; } -void ql_update_stats(struct ql_adapter *qdev) +static void ql_update_stats(struct ql_adapter *qdev) { u32 i; u64 data; diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 4b2caa6b7ac5..718a7bd0cd1a 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -336,12 +336,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, (addr[5]); QPRINTK(qdev, IFUP, INFO, - "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x" + "Adding %s address %pM" " at index %d in the CAM.\n", ((type == MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" : - "UNICAST"), addr[0], addr[1], addr[2], addr[3], - addr[4], addr[5], index); + "UNICAST"), addr, index); status = ql_wait_reg_rdy(qdev, @@ -577,41 +576,53 @@ static void ql_disable_interrupts(struct ql_adapter *qdev) * incremented everytime we queue a worker and decremented everytime * a worker finishes. Once it hits zero we enable the interrupt. */ -void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) +u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) + u32 var = 0; + unsigned long hw_flags = 0; + struct intr_context *ctx = qdev->intr_context + intr; + + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { + /* Always enable if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ ql_write32(qdev, INTR_EN, - qdev->intr_context[intr].intr_en_mask); - else { - if (qdev->legacy_check) - spin_lock(&qdev->legacy_lock); - if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) { - QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n", - intr); - ql_write32(qdev, INTR_EN, - qdev->intr_context[intr].intr_en_mask); - } else { - QPRINTK(qdev, INTR, ERR, - "Skip enable, other queue(s) are active.\n"); - } - if (qdev->legacy_check) - spin_unlock(&qdev->legacy_lock); + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + return var; } + + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (atomic_dec_and_test(&ctx->irq_cnt)) { + ql_write32(qdev, INTR_EN, + ctx->intr_en_mask); + var = ql_read32(qdev, STS); + } + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); + return var; } static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) { u32 var = 0; + unsigned long hw_flags; + struct intr_context *ctx; - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) - goto exit; - else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { + /* HW disables for us if we're MSIX multi interrupts and + * it's not the default (zeroeth) interrupt. + */ + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) + return 0; + + ctx = qdev->intr_context + intr; + spin_lock_irqsave(&qdev->hw_lock, hw_flags); + if (!atomic_read(&ctx->irq_cnt)) { ql_write32(qdev, INTR_EN, - qdev->intr_context[intr].intr_dis_mask); + ctx->intr_dis_mask); var = ql_read32(qdev, STS); } - atomic_inc(&qdev->intr_context[intr].irq_cnt); -exit: + atomic_inc(&ctx->irq_cnt); + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); return var; } @@ -623,13 +634,15 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) * and enables only if the result is zero. * So we precharge it here. */ - atomic_set(&qdev->intr_context[i].irq_cnt, 1); + if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || + i == 0)) + atomic_set(&qdev->intr_context[i].irq_cnt, 1); ql_enable_completion_interrupt(qdev, i); } } -int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) +static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) { int status = 0; /* wait for reg to come ready */ @@ -819,7 +832,7 @@ end: } /* Get the next large buffer. */ -struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) +static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) { struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; rx_ring->lbq_curr_idx++; @@ -830,7 +843,7 @@ struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) } /* Get the next small buffer. */ -struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) +static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) { struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; rx_ring->sbq_curr_idx++; @@ -1153,7 +1166,7 @@ map_error: return NETDEV_TX_BUSY; } -void ql_realign_skb(struct sk_buff *skb, int len) +static void ql_realign_skb(struct sk_buff *skb, int len) { void *temp_addr = skb->data; @@ -1438,7 +1451,6 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, "Passing a normal packet upstream.\n"); netif_rx(skb); } - ndev->last_rx = jiffies; } /* Process an outbound completion from an rx ring. */ @@ -1635,7 +1647,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget) rx_ring->cq_id); if (work_done < budget) { - __netif_rx_complete(qdev->ndev, napi); + __netif_rx_complete(napi); ql_enable_completion_interrupt(qdev, rx_ring->irq); } return work_done; @@ -1720,24 +1732,10 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id) static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) { struct rx_ring *rx_ring = dev_id; - struct ql_adapter *qdev = rx_ring->qdev; - netif_rx_schedule(qdev->ndev, &rx_ring->napi); + netif_rx_schedule(&rx_ring->napi); return IRQ_HANDLED; } -/* We check here to see if we're already handling a legacy - * interrupt. If we are, then it must belong to another - * chip with which we're sharing the interrupt line. - */ -int ql_legacy_check(struct ql_adapter *qdev) -{ - int err; - spin_lock(&qdev->legacy_lock); - err = atomic_read(&qdev->intr_context[0].irq_cnt); - spin_unlock(&qdev->legacy_lock); - return err; -} - /* This handles a fatal error, MPI activity, and the default * rx_ring in an MSI-X multiple vector environment. * In MSI/Legacy environment it also process the rest of @@ -1752,12 +1750,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) int i; int work_done = 0; - if (qdev->legacy_check && qdev->legacy_check(qdev)) { - QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); - return IRQ_NONE; /* Not our interrupt */ + spin_lock(&qdev->hw_lock); + if (atomic_read(&qdev->intr_context[0].irq_cnt)) { + QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n"); + spin_unlock(&qdev->hw_lock); + return IRQ_NONE; } + spin_unlock(&qdev->hw_lock); - var = ql_read32(qdev, STS); + var = ql_disable_completion_interrupt(qdev, intr_context->intr); /* * Check for fatal error. @@ -1817,12 +1818,12 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) &rx_ring->rx_work, 0); else - netif_rx_schedule(qdev->ndev, - &rx_ring->napi); + netif_rx_schedule(&rx_ring->napi); work_done++; } } } + ql_enable_completion_interrupt(qdev, intr_context->intr); return work_done ? IRQ_HANDLED : IRQ_NONE; } @@ -2066,7 +2067,7 @@ err: return -ENOMEM; } -void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *lbq_desc; @@ -2129,7 +2130,7 @@ mem_error: return -ENOMEM; } -void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) +static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) { int i; struct bq_desc *sbq_desc; @@ -2464,7 +2465,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring->sbq_base_indirect_dma = shadow_reg_dma; /* PCI doorbell mem area + 0x00 for consumer index register */ - rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area; + rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; rx_ring->cnsmr_idx = 0; rx_ring->curr_entry = rx_ring->cq_base; @@ -2472,10 +2473,10 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) rx_ring->valid_db_reg = doorbell_area + 0x04; /* PCI doorbell mem area + 0x18 for large buffer consumer */ - rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18); + rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); /* PCI doorbell mem area + 0x1c */ - rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c); + rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); memset((void *)cqicb, 0, sizeof(struct cqicb)); cqicb->msix_vect = rx_ring->irq; @@ -2606,7 +2607,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) * Assign doorbell registers for this tx_ring. */ /* TX PCI doorbell mem area for tx producer index */ - tx_ring->prod_idx_db_reg = (u32 *) doorbell_area; + tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; tx_ring->prod_idx = 0; /* TX PCI doorbell mem area + 0x04 */ tx_ring->valid_db_reg = doorbell_area + 0x04; @@ -2701,8 +2702,6 @@ msi: } } irq_type = LEG_IRQ; - spin_lock_init(&qdev->legacy_lock); - qdev->legacy_check = ql_legacy_check; QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); } @@ -3124,11 +3123,7 @@ static void ql_display_dev_info(struct net_device *ndev) qdev->chip_rev_id >> 4 & 0x0000000f, qdev->chip_rev_id >> 8 & 0x0000000f, qdev->chip_rev_id >> 12 & 0x0000000f); - QPRINTK(qdev, PROBE, INFO, - "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", - ndev->dev_addr[0], ndev->dev_addr[1], - ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4], - ndev->dev_addr[5]); + QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); } static int ql_adapter_down(struct ql_adapter *qdev) @@ -3153,7 +3148,7 @@ static int ql_adapter_down(struct ql_adapter *qdev) * a workqueue only if it's a single interrupt * environment (MSI/Legacy). */ - for (i = 1; i > qdev->rx_ring_count; i++) { + for (i = 1; i < qdev->rx_ring_count; i++) { rx_ring = &qdev->rx_ring[i]; /* Only the RSS rings use NAPI on multi irq * environment. Outbound completion processing @@ -3523,6 +3518,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) { struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); struct sockaddr *addr = p; + int ret = 0; if (netif_running(ndev)) return -EBUSY; @@ -3535,11 +3531,11 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); - return -1; + ret = -1; } spin_unlock(&qdev->hw_lock); - return 0; + return ret; } static void qlge_tx_timeout(struct net_device *ndev) @@ -3589,7 +3585,7 @@ static void ql_release_all(struct pci_dev *pdev) qdev->q_workqueue = NULL; } if (qdev->reg_base) - iounmap((void *)qdev->reg_base); + iounmap(qdev->reg_base); if (qdev->doorbell_area) iounmap(qdev->doorbell_area); pci_release_regions(pdev); @@ -3718,6 +3714,22 @@ err_out: return err; } + +static const struct net_device_ops qlge_netdev_ops = { + .ndo_open = qlge_open, + .ndo_stop = qlge_close, + .ndo_start_xmit = qlge_send, + .ndo_change_mtu = qlge_change_mtu, + .ndo_get_stats = qlge_get_stats, + .ndo_set_multicast_list = qlge_set_multicast_list, + .ndo_set_mac_address = qlge_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qlge_tx_timeout, + .ndo_vlan_rx_register = ql_vlan_rx_register, + .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid, +}; + static int __devinit qlge_probe(struct pci_dev *pdev, const struct pci_device_id *pci_entry) { @@ -3755,19 +3767,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev, */ ndev->tx_queue_len = qdev->tx_ring_size; ndev->irq = pdev->irq; - ndev->open = qlge_open; - ndev->stop = qlge_close; - ndev->hard_start_xmit = qlge_send; + + ndev->netdev_ops = &qlge_netdev_ops; SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); - ndev->change_mtu = qlge_change_mtu; - ndev->get_stats = qlge_get_stats; - ndev->set_multicast_list = qlge_set_multicast_list; - ndev->set_mac_address = qlge_set_mac_address; - ndev->tx_timeout = qlge_tx_timeout; ndev->watchdog_timeo = 10 * HZ; - ndev->vlan_rx_register = ql_vlan_rx_register; - ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid; - ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid; + err = register_netdev(ndev); if (err) { dev_err(&pdev->dev, "net device registration failed.\n"); diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 24fe344bcf1f..fa31891b6e62 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -19,7 +19,7 @@ exit: return status; } -int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) +static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) { int i, status; |