diff options
author | Mark Brown <broonie@kernel.org> | 2023-07-17 08:12:31 +0300 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2023-07-17 08:12:31 +0300 |
commit | 0791faebfe750292a8a842b64795a390ca4a3b51 (patch) | |
tree | 0e6095a5a0130398b0693bddfdc421c41eebda7c /mm/swap.c | |
parent | e8bf1741c14eb8e4a4e1364d45aeeab66660ab9b (diff) | |
parent | fdf0eaf11452d72945af31804e2a1048ee1b574c (diff) | |
download | linux-0791faebfe750292a8a842b64795a390ca4a3b51.tar.xz |
ASoC: Merge v6.5-rc2
Get a similar baseline to my other branches, and fixes for people using
the branch.
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/swap.c b/mm/swap.c index 423199ee8478..cd8f0150ba3a 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { /* * This path almost never happens for VM activity - pages are normally freed - * via pagevecs. But it gets used by networking - and for compound pages. + * in batches. But it gets used by networking - and for compound pages. */ static void __page_cache_release(struct folio *folio) { @@ -1044,25 +1044,25 @@ void release_pages(release_pages_arg arg, int nr) EXPORT_SYMBOL(release_pages); /* - * The pages which we're about to release may be in the deferred lru-addition + * The folios which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's - * OK from a correctness point of view but is inefficient - those pages may be + * OK from a correctness point of view but is inefficient - those folios may be * cache-warm and we want to give them back to the page allocator ASAP. * - * So __pagevec_release() will drain those queues here. + * So __folio_batch_release() will drain those queues here. * folio_batch_move_lru() calls folios_put() directly to avoid * mutual recursion. */ -void __pagevec_release(struct pagevec *pvec) +void __folio_batch_release(struct folio_batch *fbatch) { - if (!pvec->percpu_pvec_drained) { + if (!fbatch->percpu_pvec_drained) { lru_add_drain(); - pvec->percpu_pvec_drained = true; + fbatch->percpu_pvec_drained = true; } - release_pages(pvec->pages, pagevec_count(pvec)); - pagevec_reinit(pvec); + release_pages(fbatch->folios, folio_batch_count(fbatch)); + folio_batch_reinit(fbatch); } -EXPORT_SYMBOL(__pagevec_release); +EXPORT_SYMBOL(__folio_batch_release); /** * folio_batch_remove_exceptionals() - Prune non-folios from a batch. |