diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-31 23:16:36 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-01-31 23:16:36 +0300 |
commit | 7eec11d3a784a283f916590e5aa30b855c2ccfd7 (patch) | |
tree | e1bafb0d159b787684e392ae613933f9211c7d7a /include | |
parent | ddaefe8947b48b638f726cf89730ecc1000ebcc3 (diff) | |
parent | 43e76af85fa7e75ac9b71fc2fcc250abb1889bff (diff) | |
download | linux-7eec11d3a784a283f916590e5aa30b855c2ccfd7.tar.xz |
Merge branch 'akpm' (patches from Andrew)
Pull updates from Andrew Morton:
"Most of -mm and quite a number of other subsystems: hotfixes, scripts,
ocfs2, misc, lib, binfmt, init, reiserfs, exec, dma-mapping, kcov.
MM is fairly quiet this time. Holidays, I assume"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (118 commits)
kcov: ignore fault-inject and stacktrace
include/linux/io-mapping.h-mapping: use PHYS_PFN() macro in io_mapping_map_atomic_wc()
execve: warn if process starts with executable stack
reiserfs: prevent NULL pointer dereference in reiserfs_insert_item()
init/main.c: fix misleading "This architecture does not have kernel memory protection" message
init/main.c: fix quoted value handling in unknown_bootoption
init/main.c: remove unnecessary repair_env_string in do_initcall_level
init/main.c: log arguments and environment passed to init
fs/binfmt_elf.c: coredump: allow process with empty address space to coredump
fs/binfmt_elf.c: coredump: delete duplicated overflow check
fs/binfmt_elf.c: coredump: allocate core ELF header on stack
fs/binfmt_elf.c: make BAD_ADDR() unlikely
fs/binfmt_elf.c: better codegen around current->mm
fs/binfmt_elf.c: don't copy ELF header around
fs/binfmt_elf.c: fix ->start_code calculation
fs/binfmt_elf.c: smaller code generation around auxv vector fill
lib/find_bit.c: uninline helper _find_next_bit()
lib/find_bit.c: join _find_next_bit{_le}
uapi: rename ext2_swab() to swab() and share globally in swab.h
lib/scatterlist.c: adjust indentation in __sg_alloc_table
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/backing-dev.h | 10 | ||||
-rw-r--r-- | include/linux/bitops.h | 1 | ||||
-rw-r--r-- | include/linux/fs.h | 6 | ||||
-rw-r--r-- | include/linux/io-mapping.h | 5 | ||||
-rw-r--r-- | include/linux/memblock.h | 7 | ||||
-rw-r--r-- | include/linux/memory.h | 29 | ||||
-rw-r--r-- | include/linux/memory_hotplug.h | 3 | ||||
-rw-r--r-- | include/linux/mm.h | 104 | ||||
-rw-r--r-- | include/linux/mmzone.h | 2 | ||||
-rw-r--r-- | include/linux/page-isolation.h | 4 | ||||
-rw-r--r-- | include/linux/swab.h | 1 | ||||
-rw-r--r-- | include/linux/thermal.h | 11 | ||||
-rw-r--r-- | include/linux/units.h | 84 | ||||
-rw-r--r-- | include/linux/zlib.h | 6 | ||||
-rw-r--r-- | include/trace/events/kmem.h | 4 | ||||
-rw-r--r-- | include/trace/events/writeback.h | 37 | ||||
-rw-r--r-- | include/uapi/linux/swab.h | 10 | ||||
-rw-r--r-- | include/uapi/linux/sysctl.h | 2 |
18 files changed, 217 insertions, 109 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 97967ce06de3..f88197c1ffc2 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,6 +13,7 @@ #include <linux/fs.h> #include <linux/sched.h> #include <linux/blkdev.h> +#include <linux/device.h> #include <linux/writeback.h> #include <linux/blk-cgroup.h> #include <linux/backing-dev-defs.h> @@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } +extern const char *bdi_unknown_name; + +static inline const char *bdi_dev_name(struct backing_dev_info *bdi) +{ + if (!bdi || !bdi->dev) + return bdi_unknown_name; + return dev_name(bdi->dev); +} + #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index e479067c202c..6c7c4133c25c 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -13,6 +13,7 @@ #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) +#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/fs.h b/include/linux/fs.h index 40be2ccb87f3..41584f50af0d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2737,7 +2737,6 @@ static inline int filemap_fdatawait(struct address_space *mapping) extern bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); -extern int filemap_write_and_wait(struct address_space *mapping); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); extern int __filemap_fdatawrite_range(struct address_space *mapping, @@ -2747,6 +2746,11 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, extern int filemap_check_errors(struct address_space *mapping); extern void __filemap_set_wb_err(struct address_space *mapping, int err); +static inline int filemap_write_and_wait(struct address_space *mapping) +{ + return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); +} + extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, loff_t lend); extern int __must_check file_check_and_advance_wb_err(struct file *file); diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 6e125e9b4187..837058bc1c9f 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -28,6 +28,7 @@ struct io_mapping { #ifdef CONFIG_HAVE_ATOMIC_IOMAP +#include <linux/pfn.h> #include <asm/iomap.h> /* * For small address space machines, mapping large objects @@ -64,12 +65,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { resource_size_t phys_addr; - unsigned long pfn; BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); - return iomap_atomic_prot_pfn(pfn, mapping->prot); + return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot); } static inline void diff --git a/include/linux/memblock.h b/include/linux/memblock.h index b38bbefabfab..079d17d96410 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -113,6 +113,9 @@ int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_remove(phys_addr_t base, phys_addr_t size); int memblock_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP +int memblock_physmem_add(phys_addr_t base, phys_addr_t size); +#endif void memblock_trim_memory(phys_addr_t align); bool memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); @@ -127,10 +130,6 @@ void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); /* Low level functions */ -int memblock_add_range(struct memblock_type *type, - phys_addr_t base, phys_addr_t size, - int nid, enum memblock_flags flags); - void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, diff --git a/include/linux/memory.h b/include/linux/memory.h index 4c75dae8dd29..0b8d791b6669 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -29,8 +29,6 @@ struct memory_block { int section_count; /* serialized by mem_sysfs_mutex */ int online_type; /* for passing data to online routine */ int phys_device; /* to which fru does this belong? */ - void *hw; /* optional pointer to fw/hw data */ - int (*phys_callback)(struct memory_block *); struct device dev; int nid; /* NID for this memory block */ }; @@ -55,19 +53,6 @@ struct memory_notify { int status_change_nid; }; -/* - * During pageblock isolation, count the number of pages within the - * range [start_pfn, start_pfn + nr_pages) which are owned by code - * in the notifier chain. - */ -#define MEM_ISOLATE_COUNT (1<<0) - -struct memory_isolate_notify { - unsigned long start_pfn; /* Start of range to check */ - unsigned int nr_pages; /* # pages in range to check */ - unsigned int pages_found; /* # pages owned found by callbacks */ -}; - struct notifier_block; struct mem_section; @@ -94,27 +79,13 @@ static inline int memory_notify(unsigned long val, void *v) { return 0; } -static inline int register_memory_isolate_notifier(struct notifier_block *nb) -{ - return 0; -} -static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) -{ -} -static inline int memory_isolate_notify(unsigned long val, void *v) -{ - return 0; -} #else extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); -extern int register_memory_isolate_notifier(struct notifier_block *nb); -extern void unregister_memory_isolate_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); -extern int memory_isolate_notify(unsigned long val, void *v); extern struct memory_block *find_memory_block(struct mem_section *); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); extern int walk_memory_blocks(unsigned long start, unsigned long size, diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ba0dca6aac6e..ffa6ad12d84a 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -94,7 +94,8 @@ extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ -extern int online_pages(unsigned long, unsigned long, int); +extern int online_pages(unsigned long pfn, unsigned long nr_pages, + int online_type, int nid); extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, unsigned long *valid_start, unsigned long *valid_end); extern unsigned long __offline_isolated_pages(unsigned long start_pfn, diff --git a/include/linux/mm.h b/include/linux/mm.h index 1233bf45164d..73a044ed6981 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -70,11 +70,6 @@ static inline void totalram_pages_add(long count) atomic_long_add(count, &_totalram_pages); } -static inline void totalram_pages_set(long val) -{ - atomic_long_set(&_totalram_pages, val); -} - extern void * high_memory; extern int page_cluster; @@ -916,10 +911,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) -#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS -#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS -#endif - #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) @@ -947,9 +938,10 @@ static inline bool is_zone_device_page(const struct page *page) #endif #ifdef CONFIG_DEV_PAGEMAP_OPS -void __put_devmap_managed_page(struct page *page); +void free_devmap_managed_page(struct page *page); DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -static inline bool put_devmap_managed_page(struct page *page) + +static inline bool page_is_devmap_managed(struct page *page) { if (!static_branch_unlikely(&devmap_managed_key)) return false; @@ -958,7 +950,6 @@ static inline bool put_devmap_managed_page(struct page *page) switch (page->pgmap->type) { case MEMORY_DEVICE_PRIVATE: case MEMORY_DEVICE_FS_DAX: - __put_devmap_managed_page(page); return true; default: break; @@ -966,11 +957,17 @@ static inline bool put_devmap_managed_page(struct page *page) return false; } +void put_devmap_managed_page(struct page *page); + #else /* CONFIG_DEV_PAGEMAP_OPS */ -static inline bool put_devmap_managed_page(struct page *page) +static inline bool page_is_devmap_managed(struct page *page) { return false; } + +static inline void put_devmap_managed_page(struct page *page) +{ +} #endif /* CONFIG_DEV_PAGEMAP_OPS */ static inline bool is_device_private_page(const struct page *page) @@ -1023,37 +1020,37 @@ static inline void put_page(struct page *page) * need to inform the device driver through callback. See * include/linux/memremap.h and HMM for details. */ - if (put_devmap_managed_page(page)) + if (page_is_devmap_managed(page)) { + put_devmap_managed_page(page); return; + } if (put_page_testzero(page)) __put_page(page); } /** - * put_user_page() - release a gup-pinned page + * unpin_user_page() - release a gup-pinned page * @page: pointer to page to be released * - * Pages that were pinned via get_user_pages*() must be released via - * either put_user_page(), or one of the put_user_pages*() routines - * below. This is so that eventually, pages that are pinned via - * get_user_pages*() can be separately tracked and uniquely handled. In - * particular, interactions with RDMA and filesystems need special - * handling. + * Pages that were pinned via pin_user_pages*() must be released via either + * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so + * that eventually such pages can be separately tracked and uniquely handled. In + * particular, interactions with RDMA and filesystems need special handling. * - * put_user_page() and put_page() are not interchangeable, despite this early - * implementation that makes them look the same. put_user_page() calls must - * be perfectly matched up with get_user_page() calls. + * unpin_user_page() and put_page() are not interchangeable, despite this early + * implementation that makes them look the same. unpin_user_page() calls must + * be perfectly matched up with pin*() calls. */ -static inline void put_user_page(struct page *page) +static inline void unpin_user_page(struct page *page) { put_page(page); } -void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, - bool make_dirty); +void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, + bool make_dirty); -void put_user_pages(struct page **pages, unsigned long npages); +void unpin_user_pages(struct page **pages, unsigned long npages); #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS @@ -1501,9 +1498,16 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); +long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); +long pin_user_pages(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, @@ -1511,6 +1515,8 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); +int pin_user_pages_fast(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc); int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, @@ -2575,13 +2581,15 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_ANON 0x8000 /* don't do file mappings */ #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ +#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ /* - * NOTE on FOLL_LONGTERM: + * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each + * other. Here is what they mean, and how to use them: * * FOLL_LONGTERM indicates that the page will be held for an indefinite time - * period _often_ under userspace control. This is contrasted with - * iov_iter_get_pages() where usages which are transient. + * period _often_ under userspace control. This is in contrast to + * iov_iter_get_pages(), whose usages are transient. * * FIXME: For pages which are part of a filesystem, mappings are subject to the * lifetime enforced by the filesystem and we need guarantees that longterm @@ -2596,11 +2604,39 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, * Currently only get_user_pages() and get_user_pages_fast() support this flag * and calls to get_user_pages_[un]locked are specifically not allowed. This * is due to an incompatibility with the FS DAX check and - * FAULT_FLAG_ALLOW_RETRY + * FAULT_FLAG_ALLOW_RETRY. * - * In the CMA case: longterm pins in a CMA region would unnecessarily fragment - * that region. And so CMA attempts to migrate the page before pinning when + * In the CMA case: long term pins in a CMA region would unnecessarily fragment + * that region. And so, CMA attempts to migrate the page before pinning, when * FOLL_LONGTERM is specified. + * + * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount, + * but an additional pin counting system) will be invoked. This is intended for + * anything that gets a page reference and then touches page data (for example, + * Direct IO). This lets the filesystem know that some non-file-system entity is + * potentially changing the pages' data. In contrast to FOLL_GET (whose pages + * are released via put_page()), FOLL_PIN pages must be released, ultimately, by + * a call to unpin_user_page(). + * + * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different + * and separate refcounting mechanisms, however, and that means that each has + * its own acquire and release mechanisms: + * + * FOLL_GET: get_user_pages*() to acquire, and put_page() to release. + * + * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release. + * + * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call. + * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based + * calls applied to them, and that's perfectly OK. This is a constraint on the + * callers, not on the pages.) + * + * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never + * directly by the caller. That's in order to help avoid mismatches when + * releasing pages: get_user_pages*() pages must be released via put_page(), + * while pin_user_pages*() pages must be released via unpin_user_page(). + * + * Please see Documentation/vm/pin_user_pages.rst for more information. */ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5334ad8fc7bd..c2bc309d1634 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -758,7 +758,7 @@ typedef struct pglist_data { #ifdef CONFIG_NUMA /* - * zone reclaim becomes active if more unmapped pages exist. + * node reclaim becomes active if more unmapped pages exist. */ unsigned long min_unmapped_pages; unsigned long min_slab_pages; diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 6861df759fad..572458016331 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -33,8 +33,8 @@ static inline bool is_migrate_isolate(int migratetype) #define MEMORY_OFFLINE 0x1 #define REPORT_FAILURE 0x2 -bool has_unmovable_pages(struct zone *zone, struct page *page, int count, - int migratetype, int flags); +struct page *has_unmovable_pages(struct zone *zone, struct page *page, + int migratetype, int flags); void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable); diff --git a/include/linux/swab.h b/include/linux/swab.h index e466fd159c85..bcff5149861a 100644 --- a/include/linux/swab.h +++ b/include/linux/swab.h @@ -7,6 +7,7 @@ # define swab16 __swab16 # define swab32 __swab32 # define swab64 __swab64 +# define swab __swab # define swahw32 __swahw32 # define swahb32 __swahb32 # define swab16p __swab16p diff --git a/include/linux/thermal.h b/include/linux/thermal.h index d9111aebb97d..126913c6a53b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -32,17 +32,6 @@ /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 -/* Unit conversion macros */ -#define DECI_KELVIN_TO_CELSIUS(t) ({ \ - long _t = (t); \ - ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \ -}) -#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732) -#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100) -#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732) -#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off)) -#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732) - /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" diff --git a/include/linux/units.h b/include/linux/units.h new file mode 100644 index 000000000000..aaf716364ec3 --- /dev/null +++ b/include/linux/units.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_UNITS_H +#define _LINUX_UNITS_H + +#include <linux/kernel.h> + +#define ABSOLUTE_ZERO_MILLICELSIUS -273150 + +static inline long milli_kelvin_to_millicelsius(long t) +{ + return t + ABSOLUTE_ZERO_MILLICELSIUS; +} + +static inline long millicelsius_to_milli_kelvin(long t) +{ + return t - ABSOLUTE_ZERO_MILLICELSIUS; +} + +#define MILLIDEGREE_PER_DEGREE 1000 +#define MILLIDEGREE_PER_DECIDEGREE 100 + +static inline long kelvin_to_millicelsius(long t) +{ + return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE); +} + +static inline long millicelsius_to_kelvin(long t) +{ + t = millicelsius_to_milli_kelvin(t); + + return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE); +} + +static inline long deci_kelvin_to_celsius(long t) +{ + t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE); + + return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE); +} + +static inline long celsius_to_deci_kelvin(long t) +{ + t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE); + + return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE); +} + +/** + * deci_kelvin_to_millicelsius_with_offset - convert Kelvin to Celsius + * @t: temperature value in decidegrees Kelvin + * @offset: difference between Kelvin and Celsius in millidegrees + * + * Return: temperature value in millidegrees Celsius + */ +static inline long deci_kelvin_to_millicelsius_with_offset(long t, long offset) +{ + return t * MILLIDEGREE_PER_DECIDEGREE - offset; +} + +static inline long deci_kelvin_to_millicelsius(long t) +{ + return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE); +} + +static inline long millicelsius_to_deci_kelvin(long t) +{ + t = millicelsius_to_milli_kelvin(t); + + return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE); +} + +static inline long kelvin_to_celsius(long t) +{ + return t + DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS, + MILLIDEGREE_PER_DEGREE); +} + +static inline long celsius_to_kelvin(long t) +{ + return t - DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS, + MILLIDEGREE_PER_DEGREE); +} + +#endif /* _LINUX_UNITS_H */ diff --git a/include/linux/zlib.h b/include/linux/zlib.h index 92dbbd3f6c75..c757d848a758 100644 --- a/include/linux/zlib.h +++ b/include/linux/zlib.h @@ -191,6 +191,12 @@ extern int zlib_deflate_workspacesize (int windowBits, int memLevel); exceed those passed here. */ +extern int zlib_deflate_dfltcc_enabled (void); +/* + Returns 1 if Deflate-Conversion facility is installed and enabled, + otherwise 0. +*/ + /* extern int deflateInit (z_streamp strm, int level); diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index ad7e642bd497..f65b1f6db22d 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -88,8 +88,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->node = node; ), - TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", - __entry->call_site, + TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", + (void *)__entry->call_site, __entry->ptr, __entry->bytes_req, __entry->bytes_alloc, diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index ef50be4e5e6c..d94def25e4dc 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -67,8 +67,8 @@ DECLARE_EVENT_CLASS(writeback_page_template, TP_fast_assign( strscpy_pad(__entry->name, - mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", - 32); + bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : + NULL), 32); __entry->ino = mapping ? mapping->host->i_ino : 0; __entry->index = page->index; ), @@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ - strscpy_pad(__entry->name, - bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); + strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->flags = flags; @@ -193,7 +192,7 @@ TRACE_EVENT(inode_foreign_history, ), TP_fast_assign( - strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32); + strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); __entry->history = history; @@ -222,7 +221,7 @@ TRACE_EVENT(inode_switch_wbs, ), TP_fast_assign( - strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32); + strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32); __entry->ino = inode->i_ino; __entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb); __entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb); @@ -255,7 +254,7 @@ TRACE_EVENT(track_foreign_dirty, struct address_space *mapping = page_mapping(page); struct inode *inode = mapping ? mapping->host : NULL; - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->bdi_id = wb->bdi->id; __entry->ino = inode ? inode->i_ino : 0; __entry->memcg_id = wb->memcg_css->id; @@ -288,7 +287,7 @@ TRACE_EVENT(flush_foreign, ), TP_fast_assign( - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); + strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); __entry->frn_bdi_id = frn_bdi_id; __entry->frn_memcg_id = frn_memcg_id; @@ -318,7 +317,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_fast_assign( strscpy_pad(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); @@ -361,9 +360,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(ino_t, cgroup_ino) ), TP_fast_assign( - strscpy_pad(__entry->name, - wb->bdi->dev ? dev_name(wb->bdi->dev) : - "(unknown)", 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; @@ -416,7 +413,7 @@ DECLARE_EVENT_CLASS(writeback_class, __field(ino_t, cgroup_ino) ), TP_fast_assign( - strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: cgroup_ino=%lu", @@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register, __array(char, name, 32) ), TP_fast_assign( - strscpy_pad(__entry->name, dev_name(bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); ), TP_printk("bdi %s", __entry->name @@ -463,7 +460,7 @@ DECLARE_EVENT_CLASS(wbc_class, ), TP_fast_assign( - strscpy_pad(__entry->name, dev_name(bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(bdi), 32); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->sync_mode = wbc->sync_mode; @@ -514,7 +511,7 @@ TRACE_EVENT(writeback_queue_io, ), TP_fast_assign( unsigned long *older_than_this = work->older_than_this; - strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); __entry->older = older_than_this ? *older_than_this : 0; __entry->age = older_than_this ? (jiffies - *older_than_this) * 1000 / HZ : -1; @@ -600,7 +597,7 @@ TRACE_EVENT(bdi_dirty_ratelimit, ), TP_fast_assign( - strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->write_bw = KBps(wb->write_bandwidth); __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); __entry->dirty_rate = KBps(dirty_rate); @@ -665,7 +662,7 @@ TRACE_EVENT(balance_dirty_pages, TP_fast_assign( unsigned long freerun = (thresh + bg_thresh) / 2; - strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32); + strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); __entry->limit = global_wb_domain.dirty_limit; __entry->setpoint = (global_wb_domain.dirty_limit + @@ -726,7 +723,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue, TP_fast_assign( strscpy_pad(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; @@ -800,7 +797,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_fast_assign( strscpy_pad(__entry->name, - dev_name(inode_to_bdi(inode)->dev), 32); + bdi_dev_name(inode_to_bdi(inode)), 32); __entry->ino = inode->i_ino; __entry->state = inode->i_state; __entry->dirtied_when = inode->dirtied_when; diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 23cd84868cc3..fa7f97da5b76 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/compiler.h> +#include <asm/bitsperlong.h> #include <asm/swab.h> /* @@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) __fswab64(x)) #endif +static __always_inline unsigned long __swab(const unsigned long y) +{ +#if BITS_PER_LONG == 64 + return __swab64(y); +#else /* BITS_PER_LONG == 32 */ + return __swab32(y); +#endif +} + /** * __swahw32 - return a word-swapped 32-bit value * @x: value to wordswap diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 87aa2a6d9125..27c1ed2822e6 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -195,7 +195,7 @@ enum VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */ VM_PANIC_ON_OOM=33, /* panic at out-of-memory */ VM_VDSO_ENABLED=34, /* map VDSO into new processes? */ - VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */ + VM_MIN_SLAB=35, /* Percent pages ignored by node reclaim */ }; |