diff options
author | Mark Brown <broonie@kernel.org> | 2023-07-17 08:12:31 +0300 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2023-07-17 08:12:31 +0300 |
commit | 0791faebfe750292a8a842b64795a390ca4a3b51 (patch) | |
tree | 0e6095a5a0130398b0693bddfdc421c41eebda7c /mm/vmstat.c | |
parent | e8bf1741c14eb8e4a4e1364d45aeeab66660ab9b (diff) | |
parent | fdf0eaf11452d72945af31804e2a1048ee1b574c (diff) | |
download | linux-0791faebfe750292a8a842b64795a390ca4a3b51.tar.xz |
ASoC: Merge v6.5-rc2
Get a similar baseline to my other branches, and fixes for people using
the branch.
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r-- | mm/vmstat.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c index c28046371b45..b731d57996c5 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -28,6 +28,7 @@ #include <linux/mm_inline.h> #include <linux/page_ext.h> #include <linux/page_owner.h> +#include <linux/sched/isolation.h> #include "internal.h" @@ -1180,6 +1181,9 @@ const char * const vmstat_text[] = { "nr_zspages", #endif "nr_free_cma", +#ifdef CONFIG_UNACCEPTED_MEMORY + "nr_unaccepted", +#endif /* enum numa_stat_item counters */ #ifdef CONFIG_NUMA @@ -2022,6 +2026,20 @@ static void vmstat_shepherd(struct work_struct *w) for_each_online_cpu(cpu) { struct delayed_work *dw = &per_cpu(vmstat_work, cpu); + /* + * In kernel users of vmstat counters either require the precise value and + * they are using zone_page_state_snapshot interface or they can live with + * an imprecision as the regular flushing can happen at arbitrary time and + * cumulative error can grow (see calculate_normal_threshold). + * + * From that POV the regular flushing can be postponed for CPUs that have + * been isolated from the kernel interference without critical + * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd + * for all isolated CPUs to avoid interference with the isolated workload. + */ + if (cpu_is_isolated(cpu)) + continue; + if (!delayed_work_pending(dw) && need_update(cpu)) queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); |