summaryrefslogtreecommitdiff
path: root/lib/debugobjects.c
diff options
context:
space:
mode:
authorLeon Romanovsky <leon@kernel.org>2026-03-05 13:41:16 +0300
committerLeon Romanovsky <leon@kernel.org>2026-03-05 13:41:16 +0300
commitf63f1d74e952d85ada5af95a52ca61c7dc72d5e4 (patch)
tree7c2aaea088317cbd6c7bcf1881ae22d25a29c950 /lib/debugobjects.c
parentf30bc6f9b9cc492634a333be9c6aa9755ca1bf17 (diff)
parent385a06f74ff7a03e3fb0b15fb87cfeb052d75073 (diff)
downloadlinux-f63f1d74e952d85ada5af95a52ca61c7dc72d5e4.tar.xz
Add support for TLP emulation
This series adds support for Transaction Layer Packet (TLP) emulation response gateway regions, enabling userspace device emulation software to write TLP responses directly to lower layers without kernel driver involvement. Currently, the mlx5 driver exposes VirtIO emulation access regions via the MLX5_IB_METHOD_VAR_OBJ_ALLOC ioctl. This series extends that ioctl to also support allocating TLP response gateway channels for PCI device emulation use cases. Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'lib/debugobjects.c')
-rw-r--r--lib/debugobjects.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 89a1d6745dc2..12f50de85b62 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -398,9 +398,26 @@ static void fill_pool(void)
atomic_inc(&cpus_allocating);
while (pool_should_refill(&pool_global)) {
+ gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
HLIST_HEAD(head);
- if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
+ /*
+ * Allow reclaim only in preemptible context and during
+ * early boot. If not preemptible, the caller might hold
+ * locks causing a deadlock in the allocator.
+ *
+ * If the reclaim flag is not set during early boot then
+ * allocations, which happen before deferred page
+ * initialization has completed, will fail.
+ *
+ * In preemptible context the flag is harmless and not a
+ * performance issue as that's usually invoked from slow
+ * path initialization context.
+ */
+ if (preemptible() || system_state < SYSTEM_SCHEDULING)
+ gfp |= __GFP_KSWAPD_RECLAIM;
+
+ if (!kmem_alloc_batch(&head, obj_cache, gfp))
break;
guard(raw_spinlock_irqsave)(&pool_lock);