summaryrefslogtreecommitdiff
path: root/include/linux/netpoll.h
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-05-21 23:27:26 +0400
committerJens Axboe <jens.axboe@oracle.com>2010-05-21 23:27:26 +0400
commitee9a3607fb03e804ddf624544105f4e34260c380 (patch)
treece41b6e0fa10982a306f6c142a92dbf3c9961284 /include/linux/netpoll.h
parentb492e95be0ae672922f4734acf3f5d35c30be948 (diff)
parentd515e86e639890b33a09390d062b0831664f04a2 (diff)
downloadlinux-ee9a3607fb03e804ddf624544105f4e34260c380.tar.xz
Merge branch 'master' into for-2.6.35
Conflicts: fs/ext3/fsync.c Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'include/linux/netpoll.h')
-rw-r--r--include/linux/netpoll.h13
1 files changed, 9 insertions, 4 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index a765ea898549..e9e231215865 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -14,6 +14,7 @@
struct netpoll {
struct net_device *dev;
+ struct net_device *real_dev;
char dev_name[IFNAMSIZ];
const char *name;
void (*rx_hook)(struct netpoll *, int, char *, int);
@@ -36,8 +37,11 @@ struct netpoll_info {
struct sk_buff_head txq;
struct delayed_work tx_work;
+
+ struct netpoll *netpoll;
};
+void netpoll_poll_dev(struct net_device *dev);
void netpoll_poll(struct netpoll *np);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
@@ -47,22 +51,23 @@ int netpoll_trap(void);
void netpoll_set_trap(int trap);
void netpoll_cleanup(struct netpoll *np);
int __netpoll_rx(struct sk_buff *skb);
+void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
-static inline int netpoll_rx(struct sk_buff *skb)
+static inline bool netpoll_rx(struct sk_buff *skb)
{
struct netpoll_info *npinfo = skb->dev->npinfo;
unsigned long flags;
- int ret = 0;
+ bool ret = false;
if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
- return 0;
+ return false;
spin_lock_irqsave(&npinfo->rx_lock, flags);
/* check rx_flags again with the lock held */
if (npinfo->rx_flags && __netpoll_rx(skb))
- ret = 1;
+ ret = true;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
return ret;