summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-11-16 21:40:59 +0300
committerDavid S. Miller <davem@davemloft.net>2016-11-16 21:40:59 +0300
commitfc3f9146cec6ced8866071f711aba3f356af65f6 (patch)
tree51efb65cae00ee88ec39c9e41569a95c0f8526f7 /include/linux/netdevice.h
parent2874aa2e467dbc0b4f7cb0ee5dc872e98e000a47 (diff)
parent80f1c21c53dc5906944eac52d643f63e82ad1673 (diff)
downloadlinux-fc3f9146cec6ced8866071f711aba3f356af65f6.tar.xz
Merge branch 'busypoll-preemption-and-other-optimizations'
Eric Dumazet says: ==================== net: busy-poll: allow preemption and other optimizations It is time to have preemption points in sk_busy_loop() and improve its scalability. Also napi_complete() and friends can tell drivers when it is safe to not re-enable device interrupts, saving some overhead under high busy polling. mlx4 and bnx2x are changed accordingly, to show how this busy polling status can be exploited by drivers. Next steps will implement Zach Brown suggestion, where NAPI polling would be enabled all the time for some chosen queues. This is needed for efficient epoll() support anyway. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h17
1 files changed, 14 insertions, 3 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 86bacf6a64f0..bcddf951ccee 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -334,6 +334,16 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
+ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
+};
+
+enum {
+ NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
+ NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
+ NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
+ NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
};
enum gro_result {
@@ -453,16 +463,17 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false;
}
-void __napi_complete(struct napi_struct *n);
-void napi_complete_done(struct napi_struct *n, int work_done);
+bool __napi_complete(struct napi_struct *n);
+bool napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
* @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
+ * Return false if device should avoid rearming interrupts.
*/
-static inline void napi_complete(struct napi_struct *n)
+static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}