summaryrefslogtreecommitdiff
path: root/mm/damon/paddr.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/damon/paddr.c')
-rw-r--r--mm/damon/paddr.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 69afe1933e29..4102a8c5f992 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * DAMON Primitives for The Physical Address Space
+ * DAMON Code for The Physical Address Space
*
* Author: SeongJae Park <sj@kernel.org>
*/
@@ -92,12 +92,20 @@ static bool damon_folio_young_one(struct folio *folio,
{
bool *accessed = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
+ pte_t pte;
*accessed = false;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
- *accessed = pte_young(ptep_get(pvmw.pte)) ||
+ pte = ptep_get(pvmw.pte);
+
+ /*
+ * PFN swap PTEs, such as device-exclusive ones, that
+ * actually map pages are "old" from a CPU perspective.
+ * The MMU notifier takes care of any device aspects.
+ */
+ *accessed = (pte_present(pte) && pte_young(pte)) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
@@ -203,11 +211,15 @@ static bool damos_pa_filter_match(struct damos_filter *filter,
{
bool matched = false;
struct mem_cgroup *memcg;
+ size_t folio_sz;
switch (filter->type) {
case DAMOS_FILTER_TYPE_ANON:
matched = folio_test_anon(folio);
break;
+ case DAMOS_FILTER_TYPE_ACTIVE:
+ matched = folio_test_active(folio);
+ break;
case DAMOS_FILTER_TYPE_MEMCG:
rcu_read_lock();
memcg = folio_memcg_check(folio);
@@ -222,6 +234,14 @@ static bool damos_pa_filter_match(struct damos_filter *filter,
if (matched)
damon_folio_mkold(folio);
break;
+ case DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:
+ folio_sz = folio_size(folio);
+ matched = filter->sz_range.min <= folio_sz &&
+ folio_sz <= filter->sz_range.max;
+ break;
+ case DAMOS_FILTER_TYPE_UNMAPPED:
+ matched = !folio_mapped(folio) || !folio_raw_mapping(folio);
+ break;
default:
break;
}
@@ -239,11 +259,11 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
if (scheme->core_filters_allowed)
return false;
- damos_for_each_filter(filter, scheme) {
+ damos_for_each_ops_filter(filter, scheme) {
if (damos_pa_filter_match(filter, folio))
return !filter->allow;
}
- return false;
+ return scheme->ops_filters_default_reject;
}
static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
@@ -267,7 +287,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
struct folio *folio;
/* check access in page level again by default */
- damos_for_each_filter(filter, s) {
+ damos_for_each_ops_filter(filter, s) {
if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
install_young_filter = false;
break;
@@ -519,7 +539,7 @@ static bool damon_pa_scheme_has_filter(struct damos *s)
{
struct damos_filter *f;
- damos_for_each_filter(f, s)
+ damos_for_each_ops_filter(f, s)
return true;
return false;
}
@@ -528,7 +548,6 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
unsigned long *sz_filter_passed)
{
unsigned long addr;
- LIST_HEAD(folio_list);
struct folio *folio;
if (!damon_pa_scheme_has_filter(s))
@@ -604,7 +623,6 @@ static int __init damon_pa_initcall(void)
.update = NULL,
.prepare_access_checks = damon_pa_prepare_access_checks,
.check_accesses = damon_pa_check_accesses,
- .reset_aggregated = NULL,
.target_valid = NULL,
.cleanup = NULL,
.apply_scheme = damon_pa_apply_scheme,