diff options
author | Tony Lindgren <tony@atomide.com> | 2016-01-25 21:46:21 +0300 |
---|---|---|
committer | Tony Lindgren <tony@atomide.com> | 2016-01-25 21:46:21 +0300 |
commit | 7e3b1207705c10ada363bbc7d0235730ce1f5b79 (patch) | |
tree | cd1eb567acb4e72c1bdfbe8b59746c73b8627ee5 /include/linux/mmzone.h | |
parent | 143c6fe3a415edf2dde3f507b3a00998b4c4001e (diff) | |
parent | 20437f79f6627a31752f422688a6047c25cefcf1 (diff) | |
download | linux-7e3b1207705c10ada363bbc7d0235730ce1f5b79.tar.xz |
Merge branch 'enable-devices' into omap-for-v4.5/fixes
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 37 |
1 files changed, 15 insertions, 22 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e23a9e704536..33bb1b19273e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -195,11 +195,6 @@ static inline int is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -static inline int is_unevictable_lru(enum lru_list lru) -{ - return (lru == LRU_UNEVICTABLE); -} - struct zone_reclaim_stat { /* * The pageout code in vmscan.c keeps track of how many of the @@ -361,10 +356,10 @@ struct zone { struct per_cpu_pageset __percpu *pageset; /* - * This is a per-zone reserve of pages that should not be - * considered dirtyable memory. + * This is a per-zone reserve of pages that are not available + * to userspace allocations. */ - unsigned long dirty_balance_reserve; + unsigned long totalreserve_pages; #ifndef CONFIG_SPARSEMEM /* @@ -576,19 +571,17 @@ static inline bool zone_is_empty(struct zone *zone) /* Maximum number of zones on a zonelist */ #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) +enum { + ZONELIST_FALLBACK, /* zonelist with fallback */ #ifdef CONFIG_NUMA - -/* - * The NUMA zonelists are doubled because we need zonelists that restrict the - * allocations to a single node for __GFP_THISNODE. - * - * [0] : Zonelist with fallback - * [1] : No fallback (__GFP_THISNODE) - */ -#define MAX_ZONELISTS 2 -#else -#define MAX_ZONELISTS 1 + /* + * The NUMA zonelists are doubled because we need zonelists that + * restrict the allocations to a single node for __GFP_THISNODE. + */ + ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ #endif + MAX_ZONELISTS +}; /* * This struct contains information about a zone in a zonelist. It is stored @@ -1207,13 +1200,13 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); * the zone and PFN linkages are still valid. This is expensive, but walkers * of the full memmap are extremely rare. */ -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else -static inline int memmap_valid_within(unsigned long pfn, +static inline bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - return 1; + return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |