summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlexander Halbuer <halbuer@sra.uni-hannover.de>2023-02-01 19:25:49 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-03-29 02:20:06 +0300
commit2ede3c13be889900f8f5c04b5dc6298201d835c4 (patch)
tree488e83a1d0a8e5cb9da1de43de60b2f03b95ea78 /mm
parenta4a4659d86ec0395164dc346c1a7f4d2d1ff4db2 (diff)
downloadlinux-2ede3c13be889900f8f5c04b5dc6298201d835c4.tar.xz
mm: reduce lock contention of pcp buffer refill
rmqueue_bulk() batches the allocation of multiple elements to refill the per-CPU buffers into a single hold of the zone lock. Each element is allocated and checked using check_pcp_refill(). The check touches every related struct page which is especially expensive for higher order allocations (huge pages). This patch reduces the time holding the lock by moving the check out of the critical section similar to rmqueue_buddy() which allocates a single element. Measurements of parallel allocation-heavy workloads show a reduction of the average huge page allocation latency of 50 percent for two cores and nearly 90 percent for 24 cores. Link: https://lkml.kernel.org/r/20230201162549.68384-1-halbuer@sra.uni-hannover.de Signed-off-by: Alexander Halbuer <halbuer@sra.uni-hannover.de> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7136c36c5d01..a853f0aa5d31 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3138,6 +3138,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
{
unsigned long flags;
int i, allocated = 0;
+ struct list_head *prev_tail = list->prev;
+ struct page *pos, *n;
spin_lock_irqsave(&zone->lock, flags);
for (i = 0; i < count; ++i) {
@@ -3146,9 +3148,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
if (unlikely(page == NULL))
break;
- if (unlikely(check_pcp_refill(page, order)))
- continue;
-
/*
* Split buddy pages returned by expand() are received here in
* physical page order. The page is added to the tail of
@@ -3160,7 +3159,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* pages are ordered properly.
*/
list_add_tail(&page->pcp_list, list);
- allocated++;
if (is_migrate_cma(get_pcppage_migratetype(page)))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
-(1 << order));
@@ -3174,6 +3172,22 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
*/
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock_irqrestore(&zone->lock, flags);
+
+ /*
+ * Pages are appended to the pcp list without checking to reduce the
+ * time holding the zone lock. Checking the appended pages happens right
+ * after the critical section while still holding the pcp lock.
+ */
+ pos = list_first_entry(prev_tail, struct page, pcp_list);
+ list_for_each_entry_safe_from(pos, n, list, pcp_list) {
+ if (unlikely(check_pcp_refill(pos, order))) {
+ list_del(&pos->pcp_list);
+ continue;
+ }
+
+ allocated++;
+ }
+
return allocated;
}