summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-08-07 22:28:54 +0300
committerJakub Kicinski <kuba@kernel.org>2023-08-07 23:05:53 +0300
commit66244337512fbe51a32e7ebc8a5b5c5dc7a5421e (patch)
tree4661a21f18d2a3d50f072805cb724011013d8259 /include/linux
parent96bc313783cbad7682f5f91f7483dc47296398bb (diff)
parent4a36d0180c452c3482792e0ff14e2bcf536a9284 (diff)
downloadlinux-66244337512fbe51a32e7ebc8a5b5c5dc7a5421e.tar.xz
Merge branch 'page_pool-a-couple-of-assorted-optimizations'
Alexander Lobakin says: ==================== page_pool: a couple of assorted optimizations That initially was a spin-off of the IAVF PP series[0], but has grown (and shrunk) since then a bunch. In fact, it consists of three semi-independent blocks: * #1-2: Compile-time optimization. Split page_pool.h into 2 headers to not overbloat the consumers not needing complex inline helpers and then stop including it in skbuff.h at all. The first patch is also prereq for the whole series. * #3: Improve cacheline locality for users of the Page Pool frag API. * #4-6: Use direct cache recycling more aggressively, when it is safe obviously. In addition, make sure nobody wants to use Page Pool API with disabled interrupts. Patches #1 and #5 are authored by Yunsheng and Jakub respectively, with small modifications from my side as per ML discussions. For the perf numbers for #3-6, please see individual commit messages. Also available on my GH with many more Page Pool goodies[1]. [0] https://lore.kernel.org/netdev/20230530150035.1943669-1-aleksander.lobakin@intel.com [1] https://github.com/alobakin/linux/commits/iavf-pp-frag ==================== Link: https://lore.kernel.org/r/20230804180529.2483231-1-aleksander.lobakin@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/skbuff.h5
2 files changed, 10 insertions, 2 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 310f85903c91..dc2844b071c2 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -625,6 +625,12 @@ do { \
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
} while (0)
+#define lockdep_assert_no_hardirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
@@ -659,6 +665,7 @@ do { \
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_no_hardirq() do { } while (0)
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 16a49ba534e4..aa57e2eca33b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,7 +32,6 @@
#include <linux/if_packet.h>
#include <linux/llist.h>
#include <net/flow.h>
-#include <net/page_pool.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
@@ -3421,13 +3420,15 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
}
+bool napi_pp_put_page(struct page *page, bool napi_safe);
+
static inline void
napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
{
struct page *page = skb_frag_page(frag);
#ifdef CONFIG_PAGE_POOL
- if (recycle && page_pool_return_skb_page(page, napi_safe))
+ if (recycle && napi_pp_put_page(page, napi_safe))
return;
#endif
put_page(page);