summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-05-09 02:11:26 +0400
committerStephen Hemminger <shemminger@osdl.org>2006-05-09 03:00:23 +0400
commitd324031245abbb54e4e0321004430826052b6c37 (patch)
tree23dda7055fce9675263fe992c3beda27f1161650
parent6810b548b25114607e0814612d84125abccc0a4f (diff)
downloadlinux-d324031245abbb54e4e0321004430826052b6c37.tar.xz
sky2: backout NAPI reschedule
This is a backout of earlier patch. The whole rescheduling hack was a bad idea. It doesn't really solve the problem and it makes the code more complicated for no good reason. Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
-rw-r--r--drivers/net/sky2.c20
-rw-r--r--include/linux/netdevice.h18
2 files changed, 12 insertions, 26 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 227df9876a2c..76da74fbe859 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -2105,7 +2105,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- restart_poll:
if (unlikely(status & ~Y2_IS_STAT_BMU)) {
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2136,7 +2135,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
}
if (status & Y2_IS_STAT_BMU) {
- work_done += sky2_status_intr(hw, work_limit - work_done);
+ work_done = sky2_status_intr(hw, work_limit);
*budget -= work_done;
dev0->quota -= work_done;
@@ -2148,22 +2147,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
mod_timer(&hw->idle_timer, jiffies + HZ);
- local_irq_disable();
- __netif_rx_complete(dev0);
+ netif_rx_complete(dev0);
status = sky2_read32(hw, B0_Y2_SP_LISR);
-
- if (unlikely(status)) {
- /* More work pending, try and keep going */
- if (__netif_rx_schedule_prep(dev0)) {
- __netif_rx_reschedule(dev0, work_done);
- status = sky2_read32(hw, B0_Y2_SP_EISR);
- local_irq_enable();
- goto restart_poll;
- }
- }
-
- local_irq_enable();
return 0;
}
@@ -2181,6 +2167,8 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
prefetch(&hw->st_le[hw->st_idx]);
if (likely(__netif_rx_schedule_prep(dev0)))
__netif_rx_schedule(dev0);
+ else
+ printk(KERN_DEBUG PFX "irq race detected\n");
return IRQ_HANDLED;
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 309f9190a922..a461b51d6076 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -831,21 +831,19 @@ static inline void netif_rx_schedule(struct net_device *dev)
__netif_rx_schedule(dev);
}
-
-static inline void __netif_rx_reschedule(struct net_device *dev, int undo)
-{
- dev->quota += undo;
- list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-}
-
-/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
+/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().
+ * Do not inline this?
+ */
static inline int netif_rx_reschedule(struct net_device *dev, int undo)
{
if (netif_rx_schedule_prep(dev)) {
unsigned long flags;
+
+ dev->quota += undo;
+
local_irq_save(flags);
- __netif_rx_reschedule(dev, undo);
+ list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
local_irq_restore(flags);
return 1;
}