diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 21:26:41 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-03 21:26:41 +0400 |
commit | 8f5759aeb88a47448cd92ab55a016d013b154a98 (patch) | |
tree | a9c0536e10300a95292b99332171837675af1e16 /include | |
parent | e5c4ecdc55b6d824365ba7964bcd3185223f9688 (diff) | |
parent | 63aef00b55d37e9fad837a8b38a2c261f0d32041 (diff) | |
download | linux-8f5759aeb88a47448cd92ab55a016d013b154a98.tar.xz |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux into next
Pull first set of s390 updates from Martin Schwidefsky:
"The biggest change in this patchset is conversion from the bootmem
bitmaps to the memblock code. This conversion requires two common
code patches to introduce the 'physmem' memblock list.
We experimented with ticket spinlocks but in the end decided against
them as they perform poorly on virtualized systems. But the spinlock
cleanup and some small improvements are included.
The uaccess code got another optimization, the get_user/put_user calls
are now inline again for kernel compiles targeted at z10 or newer
machines. This makes the text segment shorter and the code gets a
little bit faster.
And as always some bug fixes"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (31 commits)
s390/lowcore: replace lowcore irb array with a per-cpu variable
s390/lowcore: reserve 96 bytes for IRB in lowcore
s390/facilities: remove extract-cpu-time facility check
s390: require mvcos facility for z10 and newer machines
s390/boot: fix boot of compressed kernel built with gcc 4.9
s390/cio: remove weird assignment during argument evaluation
s390/time: cast tv_nsec to u64 prior to shift in update_vsyscall
s390/oprofile: make return of 0 explicit
s390/spinlock: refactor arch_spin_lock_wait[_flags]
s390/rwlock: add missing local_irq_restore calls
s390/spinlock,rwlock: always to a load-and-test first
s390/cio: fix multiple structure definitions
s390/spinlock: fix system hang with spin_retry <= 0
s390/appldata: add slab.h for kzalloc/kfree
s390/uaccess: provide inline variants of get_user/put_user
s390/pci: add some new arch specific pci attributes
s390/pci: use pdev->dev.groups for attribute creation
s390/pci: use macro for attribute creation
s390/pci: improve state check when processing hotplug events
s390: split TIF bits into CIF, PIF and TIF bits
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/memblock.h | 79 |
1 files changed, 65 insertions, 14 deletions
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 8a20a51ed42d..73dc382e72d8 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -18,6 +18,7 @@ #include <linux/mm.h> #define INIT_MEMBLOCK_REGIONS 128 +#define INIT_PHYSMEM_REGIONS 4 /* Definition of memblock flags. */ #define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ @@ -43,6 +44,9 @@ struct memblock { phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP + struct memblock_type physmem; +#endif }; extern struct memblock memblock; @@ -71,6 +75,63 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size); void memblock_trim_memory(phys_addr_t align); int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); + +/* Low level functions */ +int memblock_add_range(struct memblock_type *type, + phys_addr_t base, phys_addr_t size, + int nid, unsigned long flags); + +int memblock_remove_range(struct memblock_type *type, + phys_addr_t base, + phys_addr_t size); + +void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, + struct memblock_type *type_b, phys_addr_t *out_start, + phys_addr_t *out_end, int *out_nid); + +/** + * for_each_mem_range - iterate through memblock areas from type_a and not + * included in type_b. Or just type_a if type_b is NULL. + * @i: u64 used as loop variable + * @type_a: ptr to memblock_type to iterate + * @type_b: ptr to memblock_type which excludes from the iteration + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + */ +#define for_each_mem_range(i, type_a, type_b, nid, \ + p_start, p_end, p_nid) \ + for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid)) + +/** + * for_each_mem_range_rev - reverse iterate through memblock areas from + * type_a and not included in type_b. Or just type_a if type_b is NULL. + * @i: u64 used as loop variable + * @type_a: ptr to memblock_type to iterate + * @type_b: ptr to memblock_type which excludes from the iteration + * @nid: node selector, %NUMA_NO_NODE for all nodes + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + */ +#define for_each_mem_range_rev(i, type_a, type_b, nid, \ + p_start, p_end, p_nid) \ + for (i = (u64)ULLONG_MAX, \ + __next_mem_range_rev(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid); \ + i != (u64)ULLONG_MAX; \ + __next_mem_range_rev(&i, nid, type_a, type_b, \ + p_start, p_end, p_nid)) + #ifdef CONFIG_MOVABLE_NODE static inline bool memblock_is_hotpluggable(struct memblock_region *m) { @@ -113,9 +174,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ -void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid); - /** * for_each_free_mem_range - iterate through free memblock areas * @i: u64 used as loop variable @@ -128,13 +186,8 @@ void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start, * soon as memblock is initialized. */ #define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ - for (i = 0, \ - __next_free_mem_range(&i, nid, p_start, p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_free_mem_range(&i, nid, p_start, p_end, p_nid)) - -void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, - phys_addr_t *out_end, int *out_nid); + for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ + nid, p_start, p_end, p_nid) /** * for_each_free_mem_range_reverse - rev-iterate through free memblock areas @@ -148,10 +201,8 @@ void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start, * order. Available as soon as memblock is initialized. */ #define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ - for (i = (u64)ULLONG_MAX, \ - __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid)) + for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ + nid, p_start, p_end, p_nid) static inline void memblock_set_region_flags(struct memblock_region *r, unsigned long flags) |