diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-s390/pgtable.h | 7 | ||||
-rw-r--r-- | include/linux/mm.h | 6 | ||||
-rw-r--r-- | include/linux/page-flags.h | 141 | ||||
-rw-r--r-- | include/linux/vmstat.h | 138 |
4 files changed, 151 insertions, 141 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 859b5e969826..24312387fa24 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -657,13 +657,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) __pte; \ }) -#define SetPageUptodate(_page) \ - do { \ - struct page *__page = (_page); \ - if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ - page_test_and_clear_dirty(_page); \ - } while (0) - #ifdef __s390x__ #define pfn_pmd(pfn, pgprot) \ diff --git a/include/linux/mm.h b/include/linux/mm.h index c41a1299b8cf..75179529e399 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout; #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> -#include <asm/atomic.h> #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone, set_page_section(page, pfn_to_section_nr(pfn)); } +/* + * Some inline functions in vmstat.h depend on page_zone() + */ +#include <linux/vmstat.h> + #ifndef CONFIG_DISCONTIGMEM /* The array of struct pages - for discontigmem use pgdat->lmem_map */ extern struct page *mem_map; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0c076d58c676..ff235c4b79ea 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -5,12 +5,8 @@ #ifndef PAGE_FLAGS_H #define PAGE_FLAGS_H -#include <linux/percpu.h> -#include <linux/cache.h> #include <linux/types.h> -#include <asm/pgtable.h> - /* * Various page->flags bits: * @@ -103,134 +99,6 @@ #endif /* - * Global page accounting. One instance per CPU. Only unsigned longs are - * allowed. - * - * - Fields can be modified with xxx_page_state and xxx_page_state_zone at - * any time safely (which protects the instance from modification by - * interrupt. - * - The __xxx_page_state variants can be used safely when interrupts are - * disabled. - * - The __xxx_page_state variants can be used if the field is only - * modified from process context and protected from preemption, or only - * modified from interrupt context. In this case, the field should be - * commented here. - */ -struct page_state { - unsigned long nr_dirty; /* Dirty writeable pages */ - unsigned long nr_writeback; /* Pages under writeback */ - unsigned long nr_unstable; /* NFS unstable pages */ - unsigned long nr_page_table_pages;/* Pages used for pagetables */ - unsigned long nr_mapped; /* mapped into pagetables. - * only modified from process context */ - unsigned long nr_slab; /* In slab */ -#define GET_PAGE_STATE_LAST nr_slab - - /* - * The below are zeroed by get_page_state(). Use get_full_page_state() - * to add up all these. - */ - unsigned long pgpgin; /* Disk reads */ - unsigned long pgpgout; /* Disk writes */ - unsigned long pswpin; /* swap reads */ - unsigned long pswpout; /* swap writes */ - - unsigned long pgalloc_high; /* page allocations */ - unsigned long pgalloc_normal; - unsigned long pgalloc_dma32; - unsigned long pgalloc_dma; - - unsigned long pgfree; /* page freeings */ - unsigned long pgactivate; /* pages moved inactive->active */ - unsigned long pgdeactivate; /* pages moved active->inactive */ - - unsigned long pgfault; /* faults (major+minor) */ - unsigned long pgmajfault; /* faults (major only) */ - - unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ - unsigned long pgrefill_normal; - unsigned long pgrefill_dma32; - unsigned long pgrefill_dma; - - unsigned long pgsteal_high; /* total highmem pages reclaimed */ - unsigned long pgsteal_normal; - unsigned long pgsteal_dma32; - unsigned long pgsteal_dma; - - unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ - unsigned long pgscan_kswapd_normal; - unsigned long pgscan_kswapd_dma32; - unsigned long pgscan_kswapd_dma; - - unsigned long pgscan_direct_high;/* total highmem pages scanned */ - unsigned long pgscan_direct_normal; - unsigned long pgscan_direct_dma32; - unsigned long pgscan_direct_dma; - - unsigned long pginodesteal; /* pages reclaimed via inode freeing */ - unsigned long slabs_scanned; /* slab objects scanned */ - unsigned long kswapd_steal; /* pages reclaimed by kswapd */ - unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ - unsigned long pageoutrun; /* kswapd's calls to page reclaim */ - unsigned long allocstall; /* direct reclaim calls */ - - unsigned long pgrotated; /* pages rotated to tail of the LRU */ - unsigned long nr_bounce; /* pages for bounce buffers */ -}; - -extern void get_page_state(struct page_state *ret); -extern void get_page_state_node(struct page_state *ret, int node); -extern void get_full_page_state(struct page_state *ret); -extern unsigned long read_page_state_offset(unsigned long offset); -extern void mod_page_state_offset(unsigned long offset, unsigned long delta); -extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); - -#define read_page_state(member) \ - read_page_state_offset(offsetof(struct page_state, member)) - -#define mod_page_state(member, delta) \ - mod_page_state_offset(offsetof(struct page_state, member), (delta)) - -#define __mod_page_state(member, delta) \ - __mod_page_state_offset(offsetof(struct page_state, member), (delta)) - -#define inc_page_state(member) mod_page_state(member, 1UL) -#define dec_page_state(member) mod_page_state(member, 0UL - 1) -#define add_page_state(member,delta) mod_page_state(member, (delta)) -#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) - -#define __inc_page_state(member) __mod_page_state(member, 1UL) -#define __dec_page_state(member) __mod_page_state(member, 0UL - 1) -#define __add_page_state(member,delta) __mod_page_state(member, (delta)) -#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) - -#define page_state(member) (*__page_state(offsetof(struct page_state, member))) - -#define state_zone_offset(zone, member) \ -({ \ - unsigned offset; \ - if (is_highmem(zone)) \ - offset = offsetof(struct page_state, member##_high); \ - else if (is_normal(zone)) \ - offset = offsetof(struct page_state, member##_normal); \ - else if (is_dma32(zone)) \ - offset = offsetof(struct page_state, member##_dma32); \ - else \ - offset = offsetof(struct page_state, member##_dma); \ - offset; \ -}) - -#define __mod_page_state_zone(zone, member, delta) \ - do { \ - __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ - } while (0) - -#define mod_page_state_zone(zone, member, delta) \ - do { \ - mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ - } while (0) - -/* * Manipulation of page state flags */ #define PageLocked(page) \ @@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) #define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) -#ifndef SetPageUptodate +#ifdef CONFIG_S390 +#define SetPageUptodate(_page) \ + do { \ + struct page *__page = (_page); \ + if (!test_and_set_bit(PG_uptodate, &__page->flags)) \ + page_test_and_clear_dirty(_page); \ + } while (0) +#else #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) #endif #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h new file mode 100644 index 000000000000..3ca0c1989fc2 --- /dev/null +++ b/include/linux/vmstat.h @@ -0,0 +1,138 @@ +#ifndef _LINUX_VMSTAT_H +#define _LINUX_VMSTAT_H + +#include <linux/types.h> +#include <linux/percpu.h> + +/* + * Global page accounting. One instance per CPU. Only unsigned longs are + * allowed. + * + * - Fields can be modified with xxx_page_state and xxx_page_state_zone at + * any time safely (which protects the instance from modification by + * interrupt. + * - The __xxx_page_state variants can be used safely when interrupts are + * disabled. + * - The __xxx_page_state variants can be used if the field is only + * modified from process context and protected from preemption, or only + * modified from interrupt context. In this case, the field should be + * commented here. + */ +struct page_state { + unsigned long nr_dirty; /* Dirty writeable pages */ + unsigned long nr_writeback; /* Pages under writeback */ + unsigned long nr_unstable; /* NFS unstable pages */ + unsigned long nr_page_table_pages;/* Pages used for pagetables */ + unsigned long nr_mapped; /* mapped into pagetables. + * only modified from process context */ + unsigned long nr_slab; /* In slab */ +#define GET_PAGE_STATE_LAST nr_slab + + /* + * The below are zeroed by get_page_state(). Use get_full_page_state() + * to add up all these. + */ + unsigned long pgpgin; /* Disk reads */ + unsigned long pgpgout; /* Disk writes */ + unsigned long pswpin; /* swap reads */ + unsigned long pswpout; /* swap writes */ + + unsigned long pgalloc_high; /* page allocations */ + unsigned long pgalloc_normal; + unsigned long pgalloc_dma32; + unsigned long pgalloc_dma; + + unsigned long pgfree; /* page freeings */ + unsigned long pgactivate; /* pages moved inactive->active */ + unsigned long pgdeactivate; /* pages moved active->inactive */ + + unsigned long pgfault; /* faults (major+minor) */ + unsigned long pgmajfault; /* faults (major only) */ + + unsigned long pgrefill_high; /* inspected in refill_inactive_zone */ + unsigned long pgrefill_normal; + unsigned long pgrefill_dma32; + unsigned long pgrefill_dma; + + unsigned long pgsteal_high; /* total highmem pages reclaimed */ + unsigned long pgsteal_normal; + unsigned long pgsteal_dma32; + unsigned long pgsteal_dma; + + unsigned long pgscan_kswapd_high;/* total highmem pages scanned */ + unsigned long pgscan_kswapd_normal; + unsigned long pgscan_kswapd_dma32; + unsigned long pgscan_kswapd_dma; + + unsigned long pgscan_direct_high;/* total highmem pages scanned */ + unsigned long pgscan_direct_normal; + unsigned long pgscan_direct_dma32; + unsigned long pgscan_direct_dma; + + unsigned long pginodesteal; /* pages reclaimed via inode freeing */ + unsigned long slabs_scanned; /* slab objects scanned */ + unsigned long kswapd_steal; /* pages reclaimed by kswapd */ + unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */ + unsigned long pageoutrun; /* kswapd's calls to page reclaim */ + unsigned long allocstall; /* direct reclaim calls */ + + unsigned long pgrotated; /* pages rotated to tail of the LRU */ + unsigned long nr_bounce; /* pages for bounce buffers */ +}; + +extern void get_page_state(struct page_state *ret); +extern void get_page_state_node(struct page_state *ret, int node); +extern void get_full_page_state(struct page_state *ret); +extern unsigned long read_page_state_offset(unsigned long offset); +extern void mod_page_state_offset(unsigned long offset, unsigned long delta); +extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); + +#define read_page_state(member) \ + read_page_state_offset(offsetof(struct page_state, member)) + +#define mod_page_state(member, delta) \ + mod_page_state_offset(offsetof(struct page_state, member), (delta)) + +#define __mod_page_state(member, delta) \ + __mod_page_state_offset(offsetof(struct page_state, member), (delta)) + +#define inc_page_state(member) mod_page_state(member, 1UL) +#define dec_page_state(member) mod_page_state(member, 0UL - 1) +#define add_page_state(member,delta) mod_page_state(member, (delta)) +#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta)) + +#define __inc_page_state(member) __mod_page_state(member, 1UL) +#define __dec_page_state(member) __mod_page_state(member, 0UL - 1) +#define __add_page_state(member,delta) __mod_page_state(member, (delta)) +#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta)) + +#define page_state(member) (*__page_state(offsetof(struct page_state, member))) + +#define state_zone_offset(zone, member) \ +({ \ + unsigned offset; \ + if (is_highmem(zone)) \ + offset = offsetof(struct page_state, member##_high); \ + else if (is_normal(zone)) \ + offset = offsetof(struct page_state, member##_normal); \ + else if (is_dma32(zone)) \ + offset = offsetof(struct page_state, member##_dma32); \ + else \ + offset = offsetof(struct page_state, member##_dma); \ + offset; \ +}) + +#define __mod_page_state_zone(zone, member, delta) \ + do { \ + __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ + } while (0) + +#define mod_page_state_zone(zone, member, delta) \ + do { \ + mod_page_state_offset(state_zone_offset(zone, member), (delta)); \ + } while (0) + +DECLARE_PER_CPU(struct page_state, page_states); + +#endif /* _LINUX_VMSTAT_H */ + |