summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-29 01:45:34 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-29 02:07:41 +0300
commit0f66114893997f781029c109b0974b7f61130df7 (patch)
treeba197844fbdb8af581e4dc91d01d907408671528
parent599d0c954f91d0689c9bb421b5bc04ea02437a41 (diff)
downloadlinux-0f66114893997f781029c109b0974b7f61130df7.tar.xz
mm, mmzone: clarify the usage of zone padding
Zone padding separates write-intensive fields used by page allocation, compaction and vmstats but the comments are a little misleading and need clarification. Link: http://lkml.kernel.org/r/1467970510-21195-5-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d4f5cac0a8c3..edafdaf62e90 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -477,20 +477,21 @@ struct zone {
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
+ /* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_)
+
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
/* zone flags, see below */
unsigned long flags;
- /* Write-intensive fields used from the page allocator */
+ /* Primarily protects free_area */
spinlock_t lock;
+ /* Write-intensive fields used by compaction and vmstats. */
ZONE_PADDING(_pad2_)
- /* Write-intensive fields used by page reclaim */
-
/*
* When free pages are below this point, additional steps are taken
* when reading the number of free pages to avoid per-cpu counter