diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 04:15:20 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-27 04:15:20 +0400 |
commit | 31453a9764f7e2a72a6e2c502ace586e2663a68c (patch) | |
tree | 5d4db63de5b4b85d1ffdab4e95a75175a784a10a /include | |
parent | f9ba5375a8aae4aeea6be15df77e24707a429812 (diff) | |
parent | 93ed0e2d07b25aff4db1d61bfbcd1e82074c0ad5 (diff) | |
download | linux-31453a9764f7e2a72a6e2c502ace586e2663a68c.tar.xz |
Merge branch 'akpm-incoming-1'
* akpm-incoming-1: (176 commits)
scripts/checkpatch.pl: add check for declaration of pci_device_id
scripts/checkpatch.pl: add warnings for static char that could be static const char
checkpatch: version 0.31
checkpatch: statement/block context analyser should look at sanitised lines
checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar
checkpatch: clean up structure definition macro handline
checkpatch: update copyright dates
checkpatch: Add additional attribute #defines
checkpatch: check for incorrect permissions
checkpatch: ensure kconfig help checks only apply when we are adding help
checkpatch: simplify and consolidate "missing space after" checks
checkpatch: add check for space after struct, union, and enum
checkpatch: returning errno typically should be negative
checkpatch: handle casts better fixing false categorisation of : as binary
checkpatch: ensure we do not collapse bracketed sections into constants
checkpatch: suggest cleanpatch and cleanfile when appropriate
checkpatch: types may sit on a line on their own
checkpatch: fix regressions in "fix handling of leading spaces"
div64_u64(): improve precision on 32bit platforms
lib/parser: cleanup match_number()
...
Diffstat (limited to 'include')
29 files changed, 483 insertions, 119 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f4229fb315e1..2c0fc10956ba 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -150,6 +150,7 @@ #define DATA_DATA \ *(.data) \ *(.ref.data) \ + *(.data..shared_aligned) /* percpu related */ \ DEV_KEEP(init.data) \ DEV_KEEP(exit.data) \ CPU_KEEP(init.data) \ @@ -636,7 +637,7 @@ #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ - . = ALIGN(PAGE_SIZE); \ + . = ALIGN(4); \ VMLINUX_SYMBOL(__initramfs_start) = .; \ *(.init.ramfs) \ VMLINUX_SYMBOL(__initramfs_end) = .; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 35b00746c712..4ce34fa937d4 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -111,6 +111,7 @@ void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); extern spinlock_t bdi_lock; extern struct list_head bdi_list; +extern struct list_head bdi_pending_list; static inline int wb_has_dirty_io(struct bdi_writeback *wb) { @@ -285,7 +286,7 @@ enum { void clear_bdi_congested(struct backing_dev_info *bdi, int sync); void set_bdi_congested(struct backing_dev_info *bdi, int sync); long congestion_wait(int sync, long timeout); - +long wait_iff_congested(struct zone *zone, int sync, long timeout); static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) { diff --git a/include/linux/fs.h b/include/linux/fs.h index bb20373d0b46..4658777b41cc 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -34,9 +34,9 @@ /* And dynamically-tunable limits and defaults: */ struct files_stat_struct { - int nr_files; /* read only */ - int nr_free_files; /* read only */ - int max_files; /* tunable */ + unsigned long nr_files; /* read only */ + unsigned long nr_free_files; /* read only */ + unsigned long max_files; /* tunable */ }; struct inodes_stat_t { @@ -402,7 +402,7 @@ extern void __init inode_init_early(void); extern void __init files_init(unsigned long); extern struct files_stat_struct files_stat; -extern int get_max_files(void); +extern unsigned long get_max_files(void); extern int sysctl_nr_open; extern struct inodes_stat_t inodes_stat; extern int leases_enable, lease_break_time; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 975609cb8548..e8713d55360a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -9,6 +9,32 @@ struct vm_area_struct; +/* Plain integer GFP bitmasks. Do not use this directly. */ +#define ___GFP_DMA 0x01u +#define ___GFP_HIGHMEM 0x02u +#define ___GFP_DMA32 0x04u +#define ___GFP_MOVABLE 0x08u +#define ___GFP_WAIT 0x10u +#define ___GFP_HIGH 0x20u +#define ___GFP_IO 0x40u +#define ___GFP_FS 0x80u +#define ___GFP_COLD 0x100u +#define ___GFP_NOWARN 0x200u +#define ___GFP_REPEAT 0x400u +#define ___GFP_NOFAIL 0x800u +#define ___GFP_NORETRY 0x1000u +#define ___GFP_COMP 0x4000u +#define ___GFP_ZERO 0x8000u +#define ___GFP_NOMEMALLOC 0x10000u +#define ___GFP_HARDWALL 0x20000u +#define ___GFP_THISNODE 0x40000u +#define ___GFP_RECLAIMABLE 0x80000u +#ifdef CONFIG_KMEMCHECK +#define ___GFP_NOTRACK 0x200000u +#else +#define ___GFP_NOTRACK 0 +#endif + /* * GFP bitmasks.. * @@ -18,10 +44,10 @@ struct vm_area_struct; * without the underscores and use them consistently. The definitions here may * be used in bit comparisons. */ -#define __GFP_DMA ((__force gfp_t)0x01u) -#define __GFP_HIGHMEM ((__force gfp_t)0x02u) -#define __GFP_DMA32 ((__force gfp_t)0x04u) -#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ +#define __GFP_DMA ((__force gfp_t)___GFP_DMA) +#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) +#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /* * Action modifiers - doesn't change the zoning @@ -38,27 +64,22 @@ struct vm_area_struct; * __GFP_MOVABLE: Flag that this page will be movable by the page migration * mechanism or reclaimed */ -#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ -#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ -#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ -#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ -#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ -#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ -#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ -#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ -#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ -#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ -#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ -#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ -#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ -#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ - -#ifdef CONFIG_KMEMCHECK -#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ -#else -#define __GFP_NOTRACK ((__force gfp_t)0) -#endif +#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ +#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */ +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ /* * This may seem redundant, but it's a way of annotating false positives vs. @@ -186,14 +207,14 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) #endif #define GFP_ZONE_TABLE ( \ - (ZONE_NORMAL << 0 * ZONES_SHIFT) \ - | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ - | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ - | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ - | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ - | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ - | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ - | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ + (ZONE_NORMAL << 0 * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ + | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ + | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ + | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ ) /* @@ -203,20 +224,20 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) * allowed. */ #define GFP_ZONE_BAD ( \ - 1 << (__GFP_DMA | __GFP_HIGHMEM) \ - | 1 << (__GFP_DMA | __GFP_DMA32) \ - | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ - | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ - | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ - | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ - | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ - | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ + 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ + | 1 << (___GFP_DMA | ___GFP_DMA32) \ + | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ + | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ ) static inline enum zone_type gfp_zone(gfp_t flags) { enum zone_type z; - int bit = flags & GFP_ZONEMASK; + int bit = (__force int) (flags & GFP_ZONEMASK); z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & ((1 << ZONES_SHIFT) - 1); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index e3060ef85b6d..8a85ec109a3a 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -28,18 +28,6 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) #include <asm/kmap_types.h> -#ifdef CONFIG_DEBUG_HIGHMEM - -void debug_kmap_atomic(enum km_type type); - -#else - -static inline void debug_kmap_atomic(enum km_type type) -{ -} - -#endif - #ifdef CONFIG_HIGHMEM #include <asm/highmem.h> @@ -49,6 +37,27 @@ extern unsigned long totalhigh_pages; void kmap_flush_unused(void); +DECLARE_PER_CPU(int, __kmap_atomic_idx); + +static inline int kmap_atomic_idx_push(void) +{ + int idx = __get_cpu_var(__kmap_atomic_idx)++; +#ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx > KM_TYPE_NR); +#endif + return idx; +} + +static inline int kmap_atomic_idx_pop(void) +{ + int idx = --__get_cpu_var(__kmap_atomic_idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(idx < 0); +#endif + return idx; +} + #else /* CONFIG_HIGHMEM */ static inline unsigned int nr_free_highpages(void) { return 0; } @@ -66,19 +75,19 @@ static inline void kunmap(struct page *page) { } -static inline void *kmap_atomic(struct page *page, enum km_type idx) +static inline void *__kmap_atomic(struct page *page) { pagefault_disable(); return page_address(page); } -#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) +#define kmap_atomic_prot(page, prot) __kmap_atomic(page) -static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) +static inline void __kunmap_atomic(void *addr) { pagefault_enable(); } -#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) +#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_atomic_to_page(ptr) virt_to_page(ptr) #define kmap_flush_unused() do {} while(0) @@ -86,12 +95,20 @@ static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) #endif /* CONFIG_HIGHMEM */ -/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ -/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ -#define kunmap_atomic(addr, idx) do { \ - BUILD_BUG_ON(__same_type((addr), struct page *)); \ - kunmap_atomic_notypecheck((addr), (idx)); \ - } while (0) +/* + * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work. + */ +#define kmap_atomic(page, args...) __kmap_atomic(page) + +/* + * Prevent people trying to call kunmap_atomic() as if it were kunmap() + * kunmap_atomic() should get the return value of kmap_atomic, not the page. + */ +#define kunmap_atomic(addr, args...) \ +do { \ + BUILD_BUG_ON(__same_type((addr), struct page *)); \ + __kunmap_atomic(addr); \ +} while (0) /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage @@ -201,8 +218,8 @@ static inline void copy_user_highpage(struct page *to, struct page *from, vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); copy_user_page(vto, vfrom, vaddr, to); - kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); + kunmap_atomic(vfrom, KM_USER0); } #endif @@ -214,8 +231,8 @@ static inline void copy_highpage(struct page *to, struct page *from) vfrom = kmap_atomic(from, KM_USER0); vto = kmap_atomic(to, KM_USER1); copy_page(vto, vfrom); - kunmap_atomic(vfrom, KM_USER0); kunmap_atomic(vto, KM_USER1); + kunmap_atomic(vfrom, KM_USER0); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h new file mode 100644 index 000000000000..d186fcc5d257 --- /dev/null +++ b/include/linux/i2c/apds990x.h @@ -0,0 +1,79 @@ +/* + * This file is part of the APDS990x sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __APDS990X_H__ +#define __APDS990X_H__ + + +#define APDS_IRLED_CURR_12mA 0x3 +#define APDS_IRLED_CURR_25mA 0x2 +#define APDS_IRLED_CURR_50mA 0x1 +#define APDS_IRLED_CURR_100mA 0x0 + +/** + * struct apds990x_chip_factors - defines effect of the cover window + * @ga: Total glass attenuation + * @cf1: clear channel factor 1 for raw to lux conversion + * @irf1: IR channel factor 1 for raw to lux conversion + * @cf2: clear channel factor 2 for raw to lux conversion + * @irf2: IR channel factor 2 for raw to lux conversion + * @df: device factor for conversion formulas + * + * Structure for tuning ALS calculation to match with environment. + * Values depend on the material above the sensor and the sensor + * itself. If the GA is zero, driver will use uncovered sensor default values + * format: decimal value * APDS_PARAM_SCALE except df which is plain integer. + */ +#define APDS_PARAM_SCALE 4096 +struct apds990x_chip_factors { + int ga; + int cf1; + int irf1; + int cf2; + int irf2; + int df; +}; + +/** + * struct apds990x_platform_data - platform data for apsd990x.c driver + * @cf: chip factor data + * @pddrive: IR-led driving current + * @ppcount: number of IR pulses used for proximity estimation + * @setup_resources: interrupt line setup call back function + * @release_resources: interrupt line release call back function + * + * Proximity detection result depends heavily on correct ppcount, pdrive + * and cover window. + * + */ + +struct apds990x_platform_data { + struct apds990x_chip_factors cf; + u8 pdrive; + u8 ppcount; + int (*setup_resources)(void); + int (*release_resources)(void); +}; + +#endif diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h new file mode 100644 index 000000000000..8b5e2df36c72 --- /dev/null +++ b/include/linux/i2c/bh1770glc.h @@ -0,0 +1,53 @@ +/* + * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver. + * Chip is combined proximity and ambient light sensor. + * + * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies). + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef __BH1770_H__ +#define __BH1770_H__ + +/** + * struct bh1770_platform_data - platform data for bh1770glc driver + * @led_def_curr: IR led driving current. + * @glass_attenuation: Attenuation factor for covering window. + * @setup_resources: Call back for interrupt line setup function + * @release_resources: Call back for interrupte line release function + * + * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor + * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85 + */ + +struct bh1770_platform_data { +#define BH1770_LED_5mA 0 +#define BH1770_LED_10mA 1 +#define BH1770_LED_20mA 2 +#define BH1770_LED_50mA 3 +#define BH1770_LED_100mA 4 +#define BH1770_LED_150mA 5 +#define BH1770_LED_200mA 6 + __u8 led_def_curr; +#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */ + __u32 glass_attenuation; + int (*setup_resources)(void); + int (*release_resources)(void); +}; +#endif diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 7fb592793738..8cdcc2a199ad 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -81,8 +81,7 @@ io_mapping_free(struct io_mapping *mapping) /* Atomic map/unmap */ static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, - unsigned long offset, - int slot) + unsigned long offset) { resource_size_t phys_addr; unsigned long pfn; @@ -90,13 +89,13 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); - return iomap_atomic_prot_pfn(pfn, slot, mapping->prot); + return iomap_atomic_prot_pfn(pfn, mapping->prot); } static inline void -io_mapping_unmap_atomic(void __iomem *vaddr, int slot) +io_mapping_unmap_atomic(void __iomem *vaddr) { - iounmap_atomic(vaddr, slot); + iounmap_atomic(vaddr); } static inline void __iomem * @@ -137,14 +136,13 @@ io_mapping_free(struct io_mapping *mapping) /* Atomic map/unmap */ static inline void __iomem * io_mapping_map_atomic_wc(struct io_mapping *mapping, - unsigned long offset, - int slot) + unsigned long offset) { return ((char __force __iomem *) mapping) + offset; } static inline void -io_mapping_unmap_atomic(void __iomem *vaddr, int slot) +io_mapping_unmap_atomic(void __iomem *vaddr) { } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index edef168a0406..450092c1e35f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -173,6 +173,11 @@ extern int _cond_resched(void); (__x < 0) ? -__x : __x; \ }) +#define abs64(x) ({ \ + s64 __x = (x); \ + (__x < 0) ? -__x : __x; \ + }) + #ifdef CONFIG_PROVE_LOCKING void might_fault(void); #else @@ -203,10 +208,10 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int); extern long simple_strtol(const char *,char **,unsigned int); extern unsigned long long simple_strtoull(const char *,char **,unsigned int); extern long long simple_strtoll(const char *,char **,unsigned int); -extern int strict_strtoul(const char *, unsigned int, unsigned long *); -extern int strict_strtol(const char *, unsigned int, long *); -extern int strict_strtoull(const char *, unsigned int, unsigned long long *); -extern int strict_strtoll(const char *, unsigned int, long long *); +extern int __must_check strict_strtoul(const char *, unsigned int, unsigned long *); +extern int __must_check strict_strtol(const char *, unsigned int, long *); +extern int __must_check strict_strtoull(const char *, unsigned int, unsigned long long *); +extern int __must_check strict_strtoll(const char *, unsigned int, long long *); extern int sprintf(char * buf, const char * fmt, ...) __attribute__ ((format (printf, 2, 3))); extern int vsprintf(char *buf, const char *, va_list) @@ -277,6 +282,11 @@ asmlinkage int vprintk(const char *fmt, va_list args) asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))) __cold; +/* + * Please don't use printk_ratelimit(), because it shares ratelimiting state + * with all other unrelated printk_ratelimit() callsites. Instead use + * printk_ratelimited() or plain old __ratelimit(). + */ extern int __printk_ratelimit(const char *func); #define printk_ratelimit() __printk_ratelimit(__func__) extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, @@ -651,6 +661,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) +#define min3(x, y, z) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + typeof(z) _min3 = (z); \ + (void) (&_min1 == &_min2); \ + (void) (&_min1 == &_min3); \ + _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ + (_min2 < _min3 ? _min2 : _min3); }) + +#define max3(x, y, z) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + typeof(z) _max3 = (z); \ + (void) (&_max1 == &_max2); \ + (void) (&_max1 == &_max3); \ + _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ + (_max2 > _max3 ? _max2 : _max3); }) + /** * min_not_zero - return the minimum that is _not_ zero, unless both are zero * @x: value1 diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 62dbee554f60..c238ad2f82ea 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -171,11 +171,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); } -static inline unsigned int __must_check -__kfifo_must_check_helper(unsigned int val) -{ - return val; -} +/* __kfifo_must_check_helper() is temporarily disabled because it was faulty */ +#define __kfifo_must_check_helper(x) (x) /** * kfifo_initialized - Check if the fifo is initialized diff --git a/include/linux/math64.h b/include/linux/math64.h index c87f1528703a..23fcdfcba81b 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor) return dividend / divisor; } +/** + * div64_s64 - signed 64bit divide with 64bit divisor + */ +static inline s64 div64_s64(s64 dividend, s64 divisor) +{ + return dividend / divisor; +} + #elif BITS_PER_LONG == 32 #ifndef div_u64_rem @@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); extern u64 div64_u64(u64 dividend, u64 divisor); #endif +#ifndef div64_s64 +extern s64 div64_s64(s64 dividend, s64 divisor); +#endif + #endif /* BITS_PER_LONG */ /** diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 864035fb8f8a..4307231bd22f 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -70,6 +70,10 @@ extern void online_page(struct page *page); extern int online_pages(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); +#ifdef CONFIG_MEMORY_HOTREMOVE +extern bool is_pageblock_removable_nolock(struct page *page); +#endif /* CONFIG_MEMORY_HOTREMOVE */ + /* reasonably generic interface to expand the physical pages in a zone */ extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); diff --git a/include/linux/mm.h b/include/linux/mm.h index a4c66846fb8f..721f451c3029 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -144,6 +144,7 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ +#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ /* * This interface is used by x86 PAT code to identify a pfn mapping that is @@ -497,8 +498,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) -/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ -#ifdef NODE_NOT_IN_PAGEFLAGS +/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ +#ifdef NODE_NOT_IN_PAGE_FLAGS #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ SECTIONS_PGOFF : ZONES_PGOFF) @@ -723,6 +724,7 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ @@ -868,6 +870,7 @@ int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); void account_page_dirtied(struct page *page, struct address_space *mapping); +void account_page_writeback(struct page *page); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); @@ -1031,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *); int vma_wants_writenotify(struct vm_area_struct *vma); -extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); +extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl); +static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pte_t *ptep; + __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); + return ptep; +} #ifdef __PAGETABLE_PUD_FOLDED static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cb57d657ce4d..bb7288a782fd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -310,6 +310,8 @@ struct mm_struct { #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_mm *mmu_notifier_mm; #endif + /* How many tasks sharing this mm are OOM_DISABLE */ + atomic_t oom_disable_count; }; /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3984c4eb41fd..39c24ebe9cfd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -104,6 +104,8 @@ enum zone_stat_item { NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ + NR_DIRTIED, /* page dirtyings since bootup */ + NR_WRITTEN, /* page writings since bootup */ #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ @@ -421,6 +423,9 @@ struct zone { typedef enum { ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ + ZONE_CONGESTED, /* zone has many dirty pages backed by + * a congested BDI + */ } zone_flags_t; static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) @@ -438,6 +443,11 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) clear_bit(flag, &zone->flags); } +static inline int zone_is_reclaim_congested(const struct zone *zone) +{ + return test_bit(ZONE_CONGESTED, &zone->flags); +} + static inline int zone_is_reclaim_locked(const struct zone *zone) { return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 9d2f1837b3d8..112adf8bd47d 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -21,8 +21,8 @@ #define __module_cat(a,b) ___module_cat(a,b) #define __MODULE_INFO(tag, name, info) \ static const char __module_cat(name,__LINE__)[] \ - __used \ - __attribute__((section(".modinfo"),unused)) = __stringify(tag) "=" info + __used __attribute__((section(".modinfo"), unused, aligned(1))) \ + = __stringify(tag) "=" info #else /* !MODULE */ #define __MODULE_INFO(tag, name, info) #endif diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index e8c06122be36..19ef95d293ae 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -67,7 +67,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, #define get_pageblock_flags(page) \ get_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1) -#define set_pageblock_flags(page) \ - set_pageblock_flags_group(page, 0, NR_PAGEBLOCK_BITS-1) +#define set_pageblock_flags(page, flags) \ + set_pageblock_flags_group(page, flags, \ + 0, NR_PAGEBLOCK_BITS-1) #endif /* PAGEBLOCK_FLAGS_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e12cdc6d79ee..2d1ffe3cf1ee 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -299,6 +299,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); +extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags); extern void unlock_page(struct page *page); static inline void __set_page_locked(struct page *page) @@ -351,6 +353,17 @@ static inline void lock_page_nosync(struct page *page) } /* + * lock_page_or_retry - Lock the page, unless this would block and the + * caller indicated that it can handle a retry. + */ +static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + might_sleep(); + return trylock_page(page) || __lock_page_or_retry(page, mm, flags); +} + +/* * This is exported only for wait_on_page_locked/wait_on_page_writeback. * Never use this directly! */ diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 8f69d09a41a5..03ff67b0cdf5 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -36,6 +36,8 @@ static inline void ratelimit_state_init(struct ratelimit_state *rs, rs->begin = 0; } +extern struct ratelimit_state printk_ratelimit_state; + extern int ___ratelimit(struct ratelimit_state *rs, const char *func); #define __ratelimit(state) ___ratelimit(state, __func__) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 31b2fd75dcba..bb83c0da2071 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -25,8 +25,8 @@ * pointing to this anon_vma once its vma list is empty. */ struct anon_vma { - spinlock_t lock; /* Serialize access to vma list */ struct anon_vma *root; /* Root of this anon_vma tree */ + spinlock_t lock; /* Serialize access to vma list */ #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) /* @@ -205,9 +205,20 @@ int try_to_unmap_one(struct page *, struct vm_area_struct *, /* * Called from mm/filemap_xip.c to unmap empty zero page */ -pte_t *page_check_address(struct page *, struct mm_struct *, +pte_t *__page_check_address(struct page *, struct mm_struct *, unsigned long, spinlock_t **, int); +static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, + unsigned long address, + spinlock_t **ptlp, int sync) +{ + pte_t *ptep; + + __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, + ptlp, sync)); + return ptep; +} + /* * Used by swapoff to help locate where page is expected in vma. */ @@ -230,7 +241,20 @@ int try_to_munlock(struct page *); /* * Called by memory-failure.c to kill processes. */ -struct anon_vma *page_lock_anon_vma(struct page *page); +struct anon_vma *__page_lock_anon_vma(struct page *page); + +static inline struct anon_vma *page_lock_anon_vma(struct page *page) +{ + struct anon_vma *anon_vma; + + __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page)); + + /* (void) is needed to make gcc happy */ + (void) __cond_lock(&anon_vma->root->lock, anon_vma); + + return anon_vma; +} + void page_unlock_anon_vma(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); diff --git a/include/linux/sched.h b/include/linux/sched.h index 56154bbb8da9..393ce94e54b7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1706,7 +1706,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ #define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 7cdd63366f88..eba53e71d2cc 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -271,8 +271,18 @@ extern void scan_mapping_unevictable_pages(struct address_space *); extern unsigned long scan_unevictable_pages; extern int scan_unevictable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +#ifdef CONFIG_NUMA extern int scan_unevictable_register_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node); +#else +static inline int scan_unevictable_register_node(struct node *node) +{ + return 0; +} +static inline void scan_unevictable_unregister_node(struct node *node) +{ +} +#endif extern int kswapd_run(int nid); extern void kswapd_stop(int nid); diff --git a/include/linux/types.h b/include/linux/types.h index 357dbc19606f..c2a9eb44f2fa 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -121,15 +121,7 @@ typedef __u64 u_int64_t; typedef __s64 int64_t; #endif -/* - * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid - * common 32/64-bit compat problems. - * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other - * architectures) and to 8-byte boundaries on 64-bit architetures. The new - * aligned_64 type enforces 8-byte alignment so that structs containing - * aligned_64 values have the same alignment on 32-bit and 64-bit architectures. - * No conversions are necessary between 32-bit user-space and a 64-bit kernel. - */ +/* this is a special 64bit data type that is 8-byte aligned */ #define aligned_u64 __u64 __attribute__((aligned(8))) #define aligned_be64 __be64 __attribute__((aligned(8))) #define aligned_le64 __le64 __attribute__((aligned(8))) @@ -186,7 +178,15 @@ typedef __u64 __bitwise __be64; typedef __u16 __bitwise __sum16; typedef __u32 __bitwise __wsum; -/* this is a special 64bit data type that is 8-byte aligned */ +/* + * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid + * common 32/64-bit compat problems. + * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other + * architectures) and to 8-byte boundaries on 64-bit architetures. The new + * aligned_64 type enforces 8-byte alignment so that structs containing + * aligned_64 values have the same alignment on 32-bit and 64-bit architectures. + * No conversions are necessary between 32-bit user-space and a 64-bit kernel. + */ #define __aligned_u64 __u64 __attribute__((aligned(8))) #define __aligned_be64 __be64 __attribute__((aligned(8))) #define __aligned_le64 __le64 __attribute__((aligned(8))) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 63a4fe6d51bd..a03dcf62ca9d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -53,8 +53,10 @@ static inline void vmalloc_init(void) #endif extern void *vmalloc(unsigned long size); +extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); +extern void *vzalloc_node(unsigned long size, int node); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 070bb7a88936..0c0771f06bfa 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } __INIT_WORK((_work), (_func), 0); \ } while (0) -#define INIT_WORK_ON_STACK(_work, _func) \ +#define INIT_WORK_ONSTACK(_work, _func) \ do { \ __INIT_WORK((_work), (_func), 1); \ } while (0) @@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } init_timer(&(_work)->timer); \ } while (0) -#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ +#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ do { \ - INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ + INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ init_timer_on_stack(&(_work)->timer); \ } while (0) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 72a5d647a5f2..c7299d2ace6b 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -149,6 +149,8 @@ int write_cache_pages(struct address_space *mapping, int do_writepages(struct address_space *mapping, struct writeback_control *wbc); void set_page_dirty_balance(struct page *page, int page_mkwrite); void writeback_set_ratelimit(void); +void tag_pages_for_writeback(struct address_space *mapping, + pgoff_t start, pgoff_t end); /* pdflush.c */ extern int nr_pdflush_threads; /* Global so it can be exported to sysctl diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 01e9e0076a92..6bcb00645de4 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -242,18 +242,20 @@ TRACE_EVENT(ext4_da_writepages, __entry->pages_skipped = wbc->pages_skipped; __entry->range_start = wbc->range_start; __entry->range_end = wbc->range_end; - __entry->nonblocking = wbc->nonblocking; __entry->for_kupdate = wbc->for_kupdate; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->writeback_index = inode->i_mapping->writeback_index; ), - TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu", + TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld " + "range_start %llu range_end %llu " + "for_kupdate %d for_reclaim %d " + "range_cyclic %d writeback_index %lu", jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, - __entry->range_end, __entry->nonblocking, + __entry->range_end, __entry->for_kupdate, __entry->for_reclaim, __entry->range_cyclic, (unsigned long) __entry->writeback_index) diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 370aa5a87322..c255fcc587bf 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -10,6 +10,7 @@ #define RECLAIM_WB_ANON 0x0001u #define RECLAIM_WB_FILE 0x0002u +#define RECLAIM_WB_MIXED 0x0010u #define RECLAIM_WB_SYNC 0x0004u #define RECLAIM_WB_ASYNC 0x0008u @@ -17,13 +18,20 @@ (flags) ? __print_flags(flags, "|", \ {RECLAIM_WB_ANON, "RECLAIM_WB_ANON"}, \ {RECLAIM_WB_FILE, "RECLAIM_WB_FILE"}, \ + {RECLAIM_WB_MIXED, "RECLAIM_WB_MIXED"}, \ {RECLAIM_WB_SYNC, "RECLAIM_WB_SYNC"}, \ {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \ ) : "RECLAIM_WB_NONE" #define trace_reclaim_flags(page, sync) ( \ (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ - (sync == PAGEOUT_IO_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ + ) + +#define trace_shrink_flags(file, sync) ( \ + (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_MIXED : \ + (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ + (sync == LUMPY_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ ) TRACE_EVENT(mm_vmscan_kswapd_sleep, @@ -269,6 +277,40 @@ TRACE_EVENT(mm_vmscan_writepage, show_reclaim_flags(__entry->reclaim_flags)) ); +TRACE_EVENT(mm_vmscan_lru_shrink_inactive, + + TP_PROTO(int nid, int zid, + unsigned long nr_scanned, unsigned long nr_reclaimed, + int priority, int reclaim_flags), + + TP_ARGS(nid, zid, nr_scanned, nr_reclaimed, priority, reclaim_flags), + + TP_STRUCT__entry( + __field(int, nid) + __field(int, zid) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_reclaimed) + __field(int, priority) + __field(int, reclaim_flags) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->zid = zid; + __entry->nr_scanned = nr_scanned; + __entry->nr_reclaimed = nr_reclaimed; + __entry->priority = priority; + __entry->reclaim_flags = reclaim_flags; + ), + + TP_printk("nid=%d zid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s", + __entry->nid, __entry->zid, + __entry->nr_scanned, __entry->nr_reclaimed, + __entry->priority, + show_reclaim_flags(__entry->reclaim_flags)) +); + + #endif /* _TRACE_VMSCAN_H */ /* This part must be outside protection */ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index f345f66ae9d1..89a2b2db4375 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -96,8 +96,6 @@ DECLARE_EVENT_CLASS(wbc_class, __field(long, nr_to_write) __field(long, pages_skipped) __field(int, sync_mode) - __field(int, nonblocking) - __field(int, encountered_congestion) __field(int, for_kupdate) __field(int, for_background) __field(int, for_reclaim) @@ -153,6 +151,41 @@ DEFINE_WBC_EVENT(wbc_balance_dirty_written); DEFINE_WBC_EVENT(wbc_balance_dirty_wait); DEFINE_WBC_EVENT(wbc_writepage); +DECLARE_EVENT_CLASS(writeback_congest_waited_template, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed), + + TP_STRUCT__entry( + __field( unsigned int, usec_timeout ) + __field( unsigned int, usec_delayed ) + ), + + TP_fast_assign( + __entry->usec_timeout = usec_timeout; + __entry->usec_delayed = usec_delayed; + ), + + TP_printk("usec_timeout=%u usec_delayed=%u", + __entry->usec_timeout, + __entry->usec_delayed) +); + +DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed) +); + +DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested, + + TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), + + TP_ARGS(usec_timeout, usec_delayed) +); + #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ |