diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-30 03:29:11 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-30 03:29:11 +0300 |
commit | 65090f30ab791810a3dc840317e57df05018559c (patch) | |
tree | f417526656da37109777e89613e140ffc59228bc /arch | |
parent | 349a2d52ffe59b7a0c5876fa7ee9f3eaf188b830 (diff) | |
parent | 0ed950d1f28142ccd9a9453c60df87853530d778 (diff) | |
download | linux-65090f30ab791810a3dc840317e57df05018559c.tar.xz |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
"191 patches.
Subsystems affected by this patch series: kthread, ia64, scripts,
ntfs, squashfs, ocfs2, kernel/watchdog, and mm (gup, pagealloc, slab,
slub, kmemleak, dax, debug, pagecache, gup, swap, memcg, pagemap,
mprotect, bootmem, dma, tracing, vmalloc, kasan, initialization,
pagealloc, and memory-failure)"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (191 commits)
mm,hwpoison: make get_hwpoison_page() call get_any_page()
mm,hwpoison: send SIGBUS with error virutal address
mm/page_alloc: split pcp->high across all online CPUs for cpuless nodes
mm/page_alloc: allow high-order pages to be stored on the per-cpu lists
mm: replace CONFIG_FLAT_NODE_MEM_MAP with CONFIG_FLATMEM
mm: replace CONFIG_NEED_MULTIPLE_NODES with CONFIG_NUMA
docs: remove description of DISCONTIGMEM
arch, mm: remove stale mentions of DISCONIGMEM
mm: remove CONFIG_DISCONTIGMEM
m68k: remove support for DISCONTIGMEM
arc: remove support for DISCONTIGMEM
arc: update comment about HIGHMEM implementation
alpha: remove DISCONTIGMEM and NUMA
mm/page_alloc: move free_the_page
mm/page_alloc: fix counting of managed_pages
mm/page_alloc: improve memmap_pages dbg msg
mm: drop SECTION_SHIFT in code comments
mm/page_alloc: introduce vm.percpu_pagelist_high_fraction
mm/page_alloc: limit the number of pages on PCP lists when reclaim is active
mm/page_alloc: scale the number of pages that are batch freed
...
Diffstat (limited to 'arch')
75 files changed, 94 insertions, 794 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5998106faa60..8954216b9956 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -549,29 +549,12 @@ config NR_CPUS MARVEL support can handle a maximum of 32 CPUs, all the others with working support have a maximum of 4 CPUs. -config ARCH_DISCONTIGMEM_ENABLE - bool "Discontiguous Memory Support" - depends on BROKEN - help - Say Y to support efficient handling of discontiguous physical memory, - for architectures which are either NUMA (Non-Uniform Memory Access) - or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa.rst> for more. - config ARCH_SPARSEMEM_ENABLE bool "Sparse Memory Support" help Say Y to support efficient handling of discontiguous physical memory, for systems that have huge holes in the physical address space. -config NUMA - bool "NUMA Support (EXPERIMENTAL)" - depends on DISCONTIGMEM && BROKEN - help - Say Y to compile the kernel to support NUMA (Non-Uniform Memory - Access). This option is for configuring high-end multiprocessor - server machines. If in doubt, say N. - config ALPHA_WTINT bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC default y if ALPHA_QEMU @@ -596,11 +579,6 @@ config ALPHA_WTINT If unsure, say N. -config NODES_SHIFT - int - default "7" - depends on NEED_MULTIPLE_NODES - # LARGE_VMALLOC is racy, if you *really* need it then fix it first config ALPHA_LARGE_VMALLOC bool diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h index a4e96e2bec74..e49fabce7b33 100644 --- a/arch/alpha/include/asm/machvec.h +++ b/arch/alpha/include/asm/machvec.h @@ -99,12 +99,6 @@ struct alpha_machine_vector const char *vector_name; - /* NUMA information */ - int (*pa_to_nid)(unsigned long); - int (*cpuid_to_nid)(int); - unsigned long (*node_mem_start)(int); - unsigned long (*node_mem_size)(int); - /* System specific parameters. */ union { struct { diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h deleted file mode 100644 index 86644604d977..000000000000 --- a/arch/alpha/include/asm/mmzone.h +++ /dev/null @@ -1,100 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 - * Adapted for the alpha wildfire architecture Jan 2001. - */ -#ifndef _ASM_MMZONE_H_ -#define _ASM_MMZONE_H_ - -#ifdef CONFIG_DISCONTIGMEM - -#include <asm/smp.h> - -/* - * Following are macros that are specific to this numa platform. - */ - -extern pg_data_t node_data[]; - -#define alpha_pa_to_nid(pa) \ - (alpha_mv.pa_to_nid \ - ? alpha_mv.pa_to_nid(pa) \ - : (0)) -#define node_mem_start(nid) \ - (alpha_mv.node_mem_start \ - ? alpha_mv.node_mem_start(nid) \ - : (0UL)) -#define node_mem_size(nid) \ - (alpha_mv.node_mem_size \ - ? alpha_mv.node_mem_size(nid) \ - : ((nid) ? (0UL) : (~0UL))) - -#define pa_to_nid(pa) alpha_pa_to_nid(pa) -#define NODE_DATA(nid) (&node_data[(nid)]) - -#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) - -#if 1 -#define PLAT_NODE_DATA_LOCALNR(p, n) \ - (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn) -#else -static inline unsigned long -PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) -{ - unsigned long temp; - temp = p >> PAGE_SHIFT; - return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn; -} -#endif - -/* - * Following are macros that each numa implementation must define. - */ - -/* - * Given a kernel address, find the home node of the underlying memory. - */ -#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) - -/* - * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory - * and returns the kaddr corresponding to first physical page in the - * node's mem_map. - */ -#define LOCAL_BASE_ADDR(kaddr) \ - ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \ - << PAGE_SHIFT)) - -/* XXX: FIXME -- nyc */ -#define kern_addr_valid(kaddr) (0) - -#define mk_pte(page, pgprot) \ -({ \ - pte_t pte; \ - unsigned long pfn; \ - \ - pfn = page_to_pfn(page) << 32; \ - pte_val(pte) = pfn | pgprot_val(pgprot); \ - \ - pte; \ -}) - -#define pte_page(x) \ -({ \ - unsigned long kvirt; \ - struct page * __xx; \ - \ - kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \ - __xx = virt_to_page(kvirt); \ - \ - __xx; \ -}) - -#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) -#define pfn_valid(pfn) \ - (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \ - node_spanned_pages(pfn_to_nid(pfn))) \ - -#endif /* CONFIG_DISCONTIGMEM */ - -#endif /* _ASM_MMZONE_H_ */ diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 8d856c62e22a..e1757b7cfe3d 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -206,7 +206,6 @@ extern unsigned long __zero_page(void); #define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) #define pte_pfn(pte) (pte_val(pte) >> 32) -#ifndef CONFIG_DISCONTIGMEM #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page, pgprot) \ ({ \ @@ -215,7 +214,6 @@ extern unsigned long __zero_page(void); pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \ pte; \ }) -#endif extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } @@ -330,9 +328,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#ifndef CONFIG_DISCONTIGMEM #define kern_addr_valid(addr) (1) -#endif #define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h index 5a77a40567fa..7d393036aa8f 100644 --- a/arch/alpha/include/asm/topology.h +++ b/arch/alpha/include/asm/topology.h @@ -7,45 +7,6 @@ #include <linux/numa.h> #include <asm/machvec.h> -#ifdef CONFIG_NUMA -static inline int cpu_to_node(int cpu) -{ - int node; - - if (!alpha_mv.cpuid_to_nid) - return 0; - - node = alpha_mv.cpuid_to_nid(cpu); - -#ifdef DEBUG_NUMA - BUG_ON(node < 0); -#endif - - return node; -} - -extern struct cpumask node_to_cpumask_map[]; -/* FIXME: This is dumb, recalculating every time. But simple. */ -static const struct cpumask *cpumask_of_node(int node) -{ - int cpu; - - if (node == NUMA_NO_NODE) - return cpu_all_mask; - - cpumask_clear(&node_to_cpumask_map[node]); - - for_each_online_cpu(cpu) { - if (cpu_to_node(cpu) == node) - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); - } - - return &node_to_cpumask_map[node]; -} - -#define cpumask_of_pcibus(bus) (cpu_online_mask) - -#endif /* !CONFIG_NUMA */ # include <asm-generic/topology.h> #endif /* _ASM_ALPHA_TOPOLOGY_H */ diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index 4485b77f8658..1efca79ac83c 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -287,8 +287,7 @@ io7_init_hose(struct io7 *io7, int port) /* * Set up window 0 for scatter-gather 8MB at 8MB. */ - hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe), - hose, 0x00800000, 0x00800000, 0); + hose->sg_isa = iommu_arena_new_node(0, hose, 0x00800000, 0x00800000, 0); hose->sg_isa->align_entry = 8; /* cache line boundary */ csrs->POx_WBASE[0].csr = hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg; @@ -305,8 +304,7 @@ io7_init_hose(struct io7 *io7, int port) /* * Set up window 2 for scatter-gather (up-to) 1GB at 3GB. */ - hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe), - hose, 0xc0000000, 0x40000000, 0); + hose->sg_pci = iommu_arena_new_node(0, hose, 0xc0000000, 0x40000000, 0); hose->sg_pci->align_entry = 8; /* cache line boundary */ csrs->POx_WBASE[2].csr = hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg; @@ -843,53 +841,8 @@ EXPORT_SYMBOL(marvel_ioportmap); EXPORT_SYMBOL(marvel_ioread8); EXPORT_SYMBOL(marvel_iowrite8); #endif - -/* - * NUMA Support - */ -/********** - * FIXME - for now each cpu is a node by itself - * -- no real support for striped mode - ********** - */ -int -marvel_pa_to_nid(unsigned long pa) -{ - int cpuid; - if ((pa >> 43) & 1) /* I/O */ - cpuid = (~(pa >> 35) & 0xff); - else /* mem */ - cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2)); - - return marvel_cpuid_to_nid(cpuid); -} - -int -marvel_cpuid_to_nid(int cpuid) -{ - return cpuid; -} - -unsigned long -marvel_node_mem_start(int nid) -{ - unsigned long pa; - - pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1); - pa <<= 34; - - return pa; -} - -unsigned long -marvel_node_mem_size(int nid) -{ - return 16UL * 1024 * 1024 * 1024; /* 16GB */ -} - - -/* +/* * AGP GART Support. */ #include <linux/agp_backend.h> diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c index e8d3b033018d..3a804b67f9da 100644 --- a/arch/alpha/kernel/core_wildfire.c +++ b/arch/alpha/kernel/core_wildfire.c @@ -434,39 +434,12 @@ wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where, return PCIBIOS_SUCCESSFUL; } -struct pci_ops wildfire_pci_ops = +struct pci_ops wildfire_pci_ops = { .read = wildfire_read_config, .write = wildfire_write_config, }; - -/* - * NUMA Support - */ -int wildfire_pa_to_nid(unsigned long pa) -{ - return pa >> 36; -} - -int wildfire_cpuid_to_nid(int cpuid) -{ - /* assume 4 CPUs per node */ - return cpuid >> 2; -} - -unsigned long wildfire_node_mem_start(int nid) -{ - /* 64GB per node */ - return (unsigned long)nid * (64UL * 1024 * 1024 * 1024); -} - -unsigned long wildfire_node_mem_size(int nid) -{ - /* 64GB per node */ - return 64UL * 1024 * 1024 * 1024; -} - #if DEBUG_DUMP_REGS static void __init diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index d84b19aa8e9d..35d7b3096d6e 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -71,33 +71,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, if (align < mem_size) align = mem_size; - -#ifdef CONFIG_DISCONTIGMEM - - arena = memblock_alloc_node(sizeof(*arena), align, nid); - if (!NODE_DATA(nid) || !arena) { - printk("%s: couldn't allocate arena from node %d\n" - " falling back to system-wide allocation\n", - __func__, nid); - arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); - if (!arena) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*arena)); - } - - arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); - if (!NODE_DATA(nid) || !arena->ptes) { - printk("%s: couldn't allocate arena ptes from node %d\n" - " falling back to system-wide allocation\n", - __func__, nid); - arena->ptes = memblock_alloc(mem_size, align); - if (!arena->ptes) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, mem_size, align); - } - -#else /* CONFIG_DISCONTIGMEM */ - arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); if (!arena) panic("%s: Failed to allocate %zu bytes\n", __func__, @@ -107,8 +80,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, mem_size, align); -#endif /* CONFIG_DISCONTIGMEM */ - spin_lock_init(&arena->lock); arena->hose = hose; arena->dma_base = base; diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index 701a05090141..5816a31c1b38 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h @@ -49,10 +49,6 @@ extern void marvel_init_arch(void); extern void marvel_kill_arch(int); extern void marvel_machine_check(unsigned long, unsigned long); extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); -extern int marvel_pa_to_nid(unsigned long); -extern int marvel_cpuid_to_nid(int); -extern unsigned long marvel_node_mem_start(int); -extern unsigned long marvel_node_mem_size(int); extern struct _alpha_agp_info *marvel_agp_info(void); struct io7 *marvel_find_io7(int pe); struct io7 *marvel_next_io7(struct io7 *prev); @@ -101,10 +97,6 @@ extern void wildfire_init_arch(void); extern void wildfire_kill_arch(int); extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr); extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); -extern int wildfire_pa_to_nid(unsigned long); -extern int wildfire_cpuid_to_nid(int); -extern unsigned long wildfire_node_mem_start(int); -extern unsigned long wildfire_node_mem_size(int); /* console.c */ #ifdef CONFIG_VGA_HOSE diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 03dda3beb3bd..5f6858e9dc28 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -79,11 +79,6 @@ int alpha_l3_cacheshape; unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; #endif -#ifdef CONFIG_NUMA -struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_to_cpumask_map); -#endif - /* Which processor we booted from. */ int boot_cpuid; @@ -305,7 +300,6 @@ move_initrd(unsigned long mem_limit) } #endif -#ifndef CONFIG_DISCONTIGMEM static void __init setup_memory(void *kernel_end) { @@ -389,9 +383,6 @@ setup_memory(void *kernel_end) } #endif /* CONFIG_BLK_DEV_INITRD */ } -#else -extern void setup_memory(void *); -#endif /* !CONFIG_DISCONTIGMEM */ int __init page_is_ram(unsigned long pfn) @@ -618,13 +609,6 @@ setup_arch(char **cmdline_p) "VERBOSE_MCHECK " #endif -#ifdef CONFIG_DISCONTIGMEM - "DISCONTIGMEM " -#ifdef CONFIG_NUMA - "NUMA " -#endif -#endif - #ifdef CONFIG_DEBUG_SPINLOCK "DEBUG_SPINLOCK " #endif diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 83d6c53d6d4d..1f99b03effc2 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -461,10 +461,5 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = { .kill_arch = marvel_kill_arch, .pci_map_irq = marvel_map_irq, .pci_swizzle = common_swizzle, - - .pa_to_nid = marvel_pa_to_nid, - .cpuid_to_nid = marvel_cpuid_to_nid, - .node_mem_start = marvel_node_mem_start, - .node_mem_size = marvel_node_mem_size, }; ALIAS_MV(marvel_ev7) diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index 2c54d707142a..3cee05443f07 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c @@ -337,10 +337,5 @@ struct alpha_machine_vector wildfire_mv __initmv = { .kill_arch = wildfire_kill_arch, .pci_map_irq = wildfire_map_irq, .pci_swizzle = common_swizzle, - - .pa_to_nid = wildfire_pa_to_nid, - .cpuid_to_nid = wildfire_cpuid_to_nid, - .node_mem_start = wildfire_node_mem_start, - .node_mem_size = wildfire_node_mem_size, }; ALIAS_MV(wildfire) diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile index 08ac6612edad..bd770302eb82 100644 --- a/arch/alpha/mm/Makefile +++ b/arch/alpha/mm/Makefile @@ -6,5 +6,3 @@ ccflags-y := -Werror obj-y := init.o fault.o - -obj-$(CONFIG_DISCONTIGMEM) += numa.o diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index a97650a618f1..f6114d03357c 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -235,8 +235,6 @@ callback_init(void * kernel_end) return kernel_end; } - -#ifndef CONFIG_DISCONTIGMEM /* * paging_init() sets up the memory map. */ @@ -257,7 +255,6 @@ void __init paging_init(void) /* Initialize the kernel's ZERO_PGE. */ memset((void *)ZERO_PGE, 0, PAGE_SIZE); } -#endif /* CONFIG_DISCONTIGMEM */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM) void diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c deleted file mode 100644 index 0636e254a22f..000000000000 --- a/arch/alpha/mm/numa.c +++ /dev/null @@ -1,223 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/alpha/mm/numa.c - * - * DISCONTIGMEM NUMA alpha support. - * - * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE - */ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/memblock.h> -#include <linux/swap.h> -#include <linux/initrd.h> -#include <linux/pfn.h> -#include <linux/module.h> - -#include <asm/hwrpb.h> -#include <asm/sections.h> - -pg_data_t node_data[MAX_NUMNODES]; -EXPORT_SYMBOL(node_data); - -#undef DEBUG_DISCONTIG -#ifdef DEBUG_DISCONTIG -#define DBGDCONT(args...) printk(args) -#else -#define DBGDCONT(args...) -#endif - -#define for_each_mem_cluster(memdesc, _cluster, i) \ - for ((_cluster) = (memdesc)->cluster, (i) = 0; \ - (i) < (memdesc)->numclusters; (i)++, (_cluster)++) - -static void __init show_mem_layout(void) -{ - struct memclust_struct * cluster; - struct memdesc_struct * memdesc; - int i; - - /* Find free clusters, and init and free the bootmem accordingly. */ - memdesc = (struct memdesc_struct *) - (hwrpb->mddt_offset + (unsigned long) hwrpb); - - printk("Raw memory layout:\n"); - for_each_mem_cluster(memdesc, cluster, i) { - printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", - i, cluster->usage, cluster->start_pfn, - cluster->start_pfn + cluster->numpages); - } -} - -static void __init -setup_memory_node(int nid, void *kernel_end) -{ - extern unsigned long mem_size_limit; - struct memclust_struct * cluster; - struct memdesc_struct * memdesc; - unsigned long start_kernel_pfn, end_kernel_pfn; - unsigned long start, end; - unsigned long node_pfn_start, node_pfn_end; - unsigned long node_min_pfn, node_max_pfn; - int i; - int show_init = 0; - - /* Find the bounds of current node */ - node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; - node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); - - /* Find free clusters, and init and free the bootmem accordingly. */ - memdesc = (struct memdesc_struct *) - (hwrpb->mddt_offset + (unsigned long) hwrpb); - - /* find the bounds of this node (node_min_pfn/node_max_pfn) */ - node_min_pfn = ~0UL; - node_max_pfn = 0UL; - for_each_mem_cluster(memdesc, cluster, i) { - /* Bit 0 is console/PALcode reserved. Bit 1 is - non-volatile memory -- we might want to mark - this for later. */ - if (cluster->usage & 3) - continue; - - start = cluster->start_pfn; - end = start + cluster->numpages; - - if (start >= node_pfn_end || end <= node_pfn_start) - continue; - - if (!show_init) { - show_init = 1; - printk("Initializing bootmem allocator on Node ID %d\n", nid); - } - printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", - i, cluster->usage, cluster->start_pfn, - cluster->start_pfn + cluster->numpages); - - if (start < node_pfn_start) - start = node_pfn_start; - if (end > node_pfn_end) - end = node_pfn_end; - - if (start < node_min_pfn) - node_min_pfn = start; - if (end > node_max_pfn) - node_max_pfn = end; - } - - if (mem_size_limit && node_max_pfn > mem_size_limit) { - static int msg_shown = 0; - if (!msg_shown) { - msg_shown = 1; - printk("setup: forcing memory size to %ldK (from %ldK).\n", - mem_size_limit << (PAGE_SHIFT - 10), - node_max_pfn << (PAGE_SHIFT - 10)); - } - node_max_pfn = mem_size_limit; - } - - if (node_min_pfn >= node_max_pfn) - return; - - /* Update global {min,max}_low_pfn from node information. */ - if (node_min_pfn < min_low_pfn) - min_low_pfn = node_min_pfn; - if (node_max_pfn > max_low_pfn) - max_pfn = max_low_pfn = node_max_pfn; - -#if 0 /* we'll try this one again in a little while */ - /* Cute trick to make sure our local node data is on local memory */ - node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); -#endif - printk(" Detected node memory: start %8lu, end %8lu\n", - node_min_pfn, node_max_pfn); - - DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); - - /* Find the bounds of kernel memory. */ - start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); - end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); - - if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) - panic("kernel loaded out of ram"); - - memblock_add_node(PFN_PHYS(node_min_pfn), - (node_max_pfn - node_min_pfn) << PAGE_SHIFT, nid); - - /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. - Note that we round this down, not up - node memory - has much larger alignment than 8Mb, so it's safe. */ - node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); - - NODE_DATA(nid)->node_start_pfn = node_min_pfn; - NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; - - node_set_online(nid); -} - -void __init -setup_memory(void *kernel_end) -{ - unsigned long kernel_size; - int nid; - - show_mem_layout(); - - nodes_clear(node_online_map); - - min_low_pfn = ~0UL; - max_low_pfn = 0UL; - for (nid = 0; nid < MAX_NUMNODES; nid++) - setup_memory_node(nid, kernel_end); - - kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; - memblock_reserve(KERNEL_START_PHYS, kernel_size); - -#ifdef CONFIG_BLK_DEV_INITRD - initrd_start = INITRD_START; - if (initrd_start) { - extern void *move_initrd(unsigned long); - - initrd_end = initrd_start+INITRD_SIZE; - printk("Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *) initrd_start, INITRD_SIZE); - - if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { - if (!move_initrd(PFN_PHYS(max_low_pfn))) - printk("initrd extends beyond end of memory " - "(0x%08lx > 0x%p)\ndisabling initrd\n", - initrd_end, - phys_to_virt(PFN_PHYS(max_low_pfn))); - } else { - nid = kvaddr_to_nid(initrd_start); - memblock_reserve(virt_to_phys((void *)initrd_start), - INITRD_SIZE); - } - } -#endif /* CONFIG_BLK_DEV_INITRD */ -} - -void __init paging_init(void) -{ - unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; - unsigned long dma_local_pfn; - - /* - * The old global MAX_DMA_ADDRESS per-arch API doesn't fit - * in the NUMA model, for now we convert it to a pfn and - * we interpret this pfn as a local per-node information. - * This issue isn't very important since none of these machines - * have legacy ISA slots anyways. - */ - dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; - - max_zone_pfn[ZONE_DMA] = dma_local_pfn; - max_zone_pfn[ZONE_NORMAL] = max_pfn; - - free_area_init(max_zone_pfn); - - /* Initialize the kernel's ZERO_PGE. */ - memset((void *)ZERO_PGE, 0, PAGE_SIZE); -} diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 2d98501c0897..d8f51eb8963b 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -62,10 +62,6 @@ config SCHED_OMIT_FRAME_POINTER config GENERIC_CSUM def_bool y -config ARCH_DISCONTIGMEM_ENABLE - def_bool n - depends on BROKEN - config ARCH_FLATMEM_ENABLE def_bool y @@ -344,15 +340,6 @@ config ARC_HUGEPAGE_16M endchoice -config NODES_SHIFT - int "Maximum NUMA Nodes (as a power of 2)" - default "0" if !DISCONTIGMEM - default "1" if DISCONTIGMEM - depends on NEED_MULTIPLE_NODES - help - Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory - zones. - config ARC_COMPACT_IRQ_LEVELS depends on ISA_ARCOMPACT bool "Setup Timer IRQ as high Priority" diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h deleted file mode 100644 index b86b9d1e54dc..000000000000 --- a/arch/arc/include/asm/mmzone.h +++ /dev/null @@ -1,40 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) - */ - -#ifndef _ASM_ARC_MMZONE_H -#define _ASM_ARC_MMZONE_H - -#ifdef CONFIG_DISCONTIGMEM - -extern struct pglist_data node_data[]; -#define NODE_DATA(nid) (&node_data[nid]) - -static inline int pfn_to_nid(unsigned long pfn) -{ - int is_end_low = 1; - - if (IS_ENABLED(CONFIG_ARC_HAS_PAE40)) - is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); - - /* - * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF - * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF - * HIGHMEM with PAE40: 0x1_0000_0000 to ... - */ - if (pfn >= ARCH_PFN_OFFSET && is_end_low) - return 0; - - return 1; -} - -static inline int pfn_valid(unsigned long pfn) -{ - int nid = pfn_to_nid(pfn); - - return (pfn <= node_end_pfn(nid)); -} -#endif /* CONFIG_DISCONTIGMEM */ - -#endif diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index a331bb5d8319..7654c2e42dc0 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -83,12 +83,12 @@ static void show_faulting_vma(unsigned long address) * non-inclusive vma */ mmap_read_lock(active_mm); - vma = find_vma(active_mm, address); + vma = vma_lookup(active_mm, address); - /* check against the find_vma( ) behaviour which returns the next VMA - * if the container VMA is not found + /* Lookup the vma at the address and report if the container VMA is not + * found */ - if (vma && (vma->vm_start <= address)) { + if (vma) { char buf[ARC_PATH_MAX]; char *nm = "?"; diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index e2ed355438c9..abfeef7bf6f8 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -32,11 +32,6 @@ unsigned long arch_pfn_offset; EXPORT_SYMBOL(arch_pfn_offset); #endif -#ifdef CONFIG_DISCONTIGMEM -struct pglist_data node_data[MAX_NUMNODES] __read_mostly; -EXPORT_SYMBOL(node_data); -#endif - long __init arc_get_mem_sz(void) { return low_mem_sz; @@ -139,20 +134,14 @@ void __init setup_arch_memory(void) #ifdef CONFIG_HIGHMEM /* - * Populate a new node with highmem - * * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based) - * than addresses in normal ala low memory (0x8000_0000 based). + * than addresses in normal aka low memory (0x8000_0000 based). * Even with PAE, the huge peripheral space hole would waste a lot of - * mem with single mem_map[]. This warrants a mem_map per region design. - * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM. - * - * DISCONTIGMEM in turns requires multiple nodes. node 0 above is - * populated with normal memory zone while node 1 only has highmem + * mem with single contiguous mem_map[]. + * Thus when HIGHMEM on ARC is enabled the memory map corresponding + * to the hole is freed and ARC specific version of pfn_valid() + * handles the hole in the memory map. */ -#ifdef CONFIG_DISCONTIGMEM - node_set_online(1); -#endif min_high_pfn = PFN_DOWN(high_mem_start); max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 24cbfc112dfa..0ccc985b90af 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -253,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb; * space. * - mm - mm_struct describing address space * - * flush_tlb_range(mm,start,end) + * flush_tlb_range(vma,start,end) * * Invalidate a range of TLB entries in the specified * address space. @@ -261,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb; * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) * - * flush_tlb_page(vaddr,vma) + * flush_tlb_page(vma, uaddr) * * Invalidate the specified page in the specified address range. + * - vma - vm_area_struct describing address range * - vaddr - virtual address (may not be aligned) - * - vma - vma_struct describing address range - * - * flush_kern_tlb_page(kaddr) - * - * Invalidate the TLB entry for the specified page. The address - * will be in the kernels virtual memory space. Current uses - * only require the D-TLB to be invalidated. - * - kaddr - Kernel virtual memory address */ /* diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S index 5335b9687297..74f4b383afe3 100644 --- a/arch/arm/mm/tlb-v6.S +++ b/arch/arm/mm/tlb-v6.S @@ -24,7 +24,7 @@ * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) - * - vma - vma_struct describing address range + * - vma - vm_area_struct describing address range * * It is assumed that: * - the "Invalidate single entry" instruction will invalidate diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 1bb28d7db567..87bf4ab17721 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -23,7 +23,7 @@ * * - start - start address (may not be aligned) * - end - end address (exclusive, may not be aligned) - * - vma - vma_struct describing address range + * - vma - vm_area_struct describing address range * * It is assumed that: * - the "Invalidate single entry" instruction will invalidate diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dabe9b81012f..a6a09cb95cc7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1035,7 +1035,7 @@ config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" range 1 10 default "4" - depends on NEED_MULTIPLE_NODES + depends on NUMA help Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 57292dc5ce35..f23dfa06433b 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -929,7 +929,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * get block mapping for device MMIO region. */ mmap_read_lock(current->mm); - vma = find_vma_intersection(current->mm, hva, hva + 1); + vma = vma_lookup(current->mm, hva); if (unlikely(!vma)) { kvm_err("Failed to find VMA for hva 0x%lx\n", hva); mmap_read_unlock(current->mm); diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c index 0281f92eea3d..c3590b2e9592 100644 --- a/arch/h8300/kernel/setup.c +++ b/arch/h8300/kernel/setup.c @@ -69,8 +69,6 @@ void __init h8300_fdt_init(void *fdt, char *bootargs) static void __init bootmem_init(void) { - struct memblock_region *region; - memory_end = memory_start = 0; /* Find main memory where is the kernel */ diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 279252e3e0f7..da22a35e6f03 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -302,7 +302,7 @@ config NODES_SHIFT int "Max num nodes shift(3-10)" range 3 10 default "10" - depends on NEED_MULTIPLE_NODES + depends on NUMA help This option specifies the maximum number of nodes in your SSI system. MAX_NUMNODES will be 2^(This value). diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h index b1d87955e8cc..5c51fceedaf9 100644 --- a/arch/ia64/include/asm/pal.h +++ b/arch/ia64/include/asm/pal.h @@ -1086,7 +1086,7 @@ static inline long ia64_pal_freq_base(unsigned long *platform_base_freq) /* * Get the ratios for processor frequency, bus frequency and interval timer to - * to base frequency of the platform + * the base frequency of the platform */ static inline s64 ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio, diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 5f620e66384e..864775970c50 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -26,7 +26,7 @@ * the queue, and the other indicating the current tail. The lock is acquired * by atomically noting the tail and incrementing it by one (thus adding * ourself to the queue and noting our position), then waiting until the head - * becomes equal to the the initial value of the tail. + * becomes equal to the initial value of the tail. * The pad bits in the middle are used to prevent the next_ticket number * overflowing into the now_serving number. * diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h index 2a88c7204e52..809ddb6896db 100644 --- a/arch/ia64/include/asm/uv/uv_hub.h +++ b/arch/ia64/include/asm/uv/uv_hub.h @@ -257,7 +257,7 @@ static inline int uv_numa_blade_id(void) return 0; } -/* Convert a cpu number to the the UV blade number */ +/* Convert a cpu number to the UV blade number */ static inline int uv_cpu_to_blade_id(int cpu) { return 0; diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S index 58233bb7976d..1fd61b78fb29 100644 --- a/arch/ia64/kernel/efi_stub.S +++ b/arch/ia64/kernel/efi_stub.S @@ -7,7 +7,7 @@ * * This stub allows us to make EFI calls in physical mode with interrupts * turned off. We need this because we can't call SetVirtualMap() until - * the kernel has booted far enough to allow allocation of struct vma_struct + * the kernel has booted far enough to allow allocation of struct vm_area_struct * entries (which we would need to map stuff with memory attributes other * than uncached or writeback...). Since the GetTime() service gets called * earlier than that, we need to be able to make physical mode EFI calls from diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 36a69b4e6169..5bfc79be4cef 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -343,7 +343,7 @@ init_record_index_pools(void) /* - 2 - */ sect_min_size = sal_log_sect_min_sizes[0]; - for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++) + for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++) if (sect_min_size > sal_log_sect_min_sizes[i]) sect_min_size = sal_log_sect_min_sizes[i]; diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 09fc385c2acd..3639e0a7cb3b 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -3,9 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * This file contains NUMA specific variables and functions which can - * be split away from DISCONTIGMEM and are used on NUMA machines with - * contiguous memory. + * This file contains NUMA specific variables and functions which are used on + * NUMA machines with contiguous memory. * 2002/08/07 Erich Focht <efocht@ess.nec.de> * Populate cpu entries in sysfs for non-numa systems as well * Intel Corporation - Ashok Raj diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 46b6e5f3a40f..d6579ec3ea32 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -3,9 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * This file contains NUMA specific variables and functions which can - * be split away from DISCONTIGMEM and are used on NUMA machines with - * contiguous memory. + * This file contains NUMA specific variables and functions which are used on + * NUMA machines with contiguous memory. * * 2002/08/07 Erich Focht <efocht@ess.nec.de> */ diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu index f4d23977d2a5..29e946394fdb 100644 --- a/arch/m68k/Kconfig.cpu +++ b/arch/m68k/Kconfig.cpu @@ -408,10 +408,6 @@ config SINGLE_MEMORY_CHUNK order" to save memory that could be wasted for unused memory map. Say N if not sure. -config ARCH_DISCONTIGMEM_ENABLE - depends on BROKEN - def_bool MMU && !SINGLE_MEMORY_CHUNK - config FORCE_MAX_ZONEORDER int "Maximum zone order" if ADVANCED depends on !SINGLE_MEMORY_CHUNK @@ -451,11 +447,6 @@ config M68K_L2_CACHE depends on MAC default y -config NODES_SHIFT - int - default "3" - depends on DISCONTIGMEM - config CPU_HAS_NO_BITFIELDS bool @@ -553,4 +544,3 @@ config CACHE_COPYBACK The ColdFire CPU cache is set into Copy-back mode. endchoice endif - diff --git a/arch/m68k/include/asm/mmzone.h b/arch/m68k/include/asm/mmzone.h deleted file mode 100644 index 64573fe8e60d..000000000000 --- a/arch/m68k/include/asm/mmzone.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_M68K_MMZONE_H_ -#define _ASM_M68K_MMZONE_H_ - -extern pg_data_t pg_data_map[]; - -#define NODE_DATA(nid) (&pg_data_map[nid]) -#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map) - -#endif /* _ASM_M68K_MMZONE_H_ */ diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h index 97087dd3ca6d..2f1c54e4725d 100644 --- a/arch/m68k/include/asm/page.h +++ b/arch/m68k/include/asm/page.h @@ -62,7 +62,7 @@ extern unsigned long _ramend; #include <asm/page_no.h> #endif -#if !defined(CONFIG_MMU) || defined(CONFIG_DISCONTIGMEM) +#ifndef CONFIG_MMU #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) #define __pfn_to_phys(pfn) PFN_PHYS(pfn) #endif diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 2411ea9ef578..a5b459bcb7d8 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -126,26 +126,6 @@ static inline void *__va(unsigned long x) extern int m68k_virt_to_node_shift; -#ifndef CONFIG_DISCONTIGMEM -#define __virt_to_node(addr) (&pg_data_map[0]) -#else -extern struct pglist_data *pg_data_table[]; - -static inline __attribute_const__ int __virt_to_node_shift(void) -{ - int shift; - - asm ( - "1: moveq #0,%0\n" - m68k_fixup(%c1, 1b) - : "=d" (shift) - : "i" (m68k_fixup_vnode_shift)); - return shift; -} - -#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()]) -#endif - #define virt_to_page(addr) ({ \ pfn_to_page(virt_to_pfn(addr)); \ }) @@ -153,23 +133,8 @@ static inline __attribute_const__ int __virt_to_node_shift(void) pfn_to_virt(page_to_pfn(page)); \ }) -#ifdef CONFIG_DISCONTIGMEM -#define pfn_to_page(pfn) ({ \ - unsigned long __pfn = (pfn); \ - struct pglist_data *pgdat; \ - pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \ - pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ -}) -#define page_to_pfn(_page) ({ \ - const struct page *__p = (_page); \ - struct pglist_data *pgdat; \ - pgdat = &pg_data_map[page_to_nid(__p)]; \ - ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ -}) -#else #define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT) #include <asm-generic/memory_model.h> -#endif #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h index 5337bc2c262f..a6318ccd308f 100644 --- a/arch/m68k/include/asm/tlbflush.h +++ b/arch/m68k/include/asm/tlbflush.h @@ -263,7 +263,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr BUG(); } -static inline void flush_tlb_range(struct mm_struct *mm, +static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { BUG(); diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index f55bdcb8e4f1..bd0274c7592e 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -402,8 +402,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) * to this process. */ mmap_read_lock(current->mm); - vma = find_vma(current->mm, addr); - if (!vma || addr < vma->vm_start || addr + len > vma->vm_end) + vma = vma_lookup(current->mm, addr); + if (!vma || addr + len > vma->vm_end) goto out_unlock; } diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 1759ab875d47..5d749e188246 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -44,28 +44,8 @@ EXPORT_SYMBOL(empty_zero_page); int m68k_virt_to_node_shift; -#ifdef CONFIG_DISCONTIGMEM -pg_data_t pg_data_map[MAX_NUMNODES]; -EXPORT_SYMBOL(pg_data_map); - -pg_data_t *pg_data_table[65]; -EXPORT_SYMBOL(pg_data_table); -#endif - void __init m68k_setup_node(int node) { -#ifdef CONFIG_DISCONTIGMEM - struct m68k_mem_info *info = m68k_memory + node; - int i, end; - - i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); - end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift(); - for (; i <= end; i++) { - if (pg_data_table[i]) - pr_warn("overlap at %u for chunk %u\n", i, node); - pg_data_table[i] = pg_data_map + node; - } -#endif node_set_online(node); } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ed51970c08e7..4704a16c2e44 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2867,7 +2867,7 @@ config RANDOMIZE_BASE_MAX_OFFSET config NODES_SHIFT int default "6" - depends on NEED_MULTIPLE_NODES + depends on NUMA config HW_PERF_EVENTS bool "Enable hardware performance counter support for perf events" diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h index b826b8473e95..602a21aee9d4 100644 --- a/arch/mips/include/asm/mmzone.h +++ b/arch/mips/include/asm/mmzone.h @@ -8,7 +8,7 @@ #include <asm/page.h> -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA # include <mmzone.h> #endif @@ -20,10 +20,4 @@ #define nid_to_addrbase(nid) 0 #endif -#ifdef CONFIG_DISCONTIGMEM - -#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT) - -#endif /* CONFIG_DISCONTIGMEM */ - #endif /* _ASM_MMZONE_H_ */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 195ff4e9771f..96bc798c1ec1 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -239,7 +239,7 @@ static inline int pfn_valid(unsigned long pfn) /* pfn_valid is defined in linux/mmzone.h */ -#elif defined(CONFIG_NEED_MULTIPLE_NODES) +#elif defined(CONFIG_NUMA) #define pfn_valid(pfn) \ ({ \ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0b4e06303c55..6f07362de5ce 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -784,7 +784,6 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) { int si_code; - struct vm_area_struct *vma; switch (sig) { case 0: @@ -800,8 +799,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) case SIGSEGV: mmap_read_lock(current->mm); - vma = find_vma(current->mm, (unsigned long)fault_addr); - if (vma && (vma->vm_start <= (unsigned long)fault_addr)) + if (vma_lookup(current->mm, (unsigned long)fault_addr)) si_code = SEGV_ACCERR; else si_code = SEGV_MAPERR; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index c36358758969..19347dc6bbf8 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -394,7 +394,7 @@ void maar_init(void) } } -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; @@ -454,9 +454,6 @@ void __init mem_init(void) BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT)); #ifdef CONFIG_HIGHMEM -#ifdef CONFIG_DISCONTIGMEM -#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" -#endif max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; #else max_mapnr = max_low_pfn; @@ -476,7 +473,7 @@ void __init mem_init(void) 0x80000000 - 4, KCORE_TEXT); #endif } -#endif /* !CONFIG_NEED_MULTIPLE_NODES */ +#endif /* !CONFIG_NUMA */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) { diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h index 940d32842793..62faafbc28e4 100644 --- a/arch/nds32/include/asm/memory.h +++ b/arch/nds32/include/asm/memory.h @@ -76,18 +76,12 @@ * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#ifndef CONFIG_DISCONTIGMEM - #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) -#else /* CONFIG_DISCONTIGMEM */ -#error CONFIG_DISCONTIGMEM is not supported yet. -#endif /* !CONFIG_DISCONTIGMEM */ - #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #endif diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h index 185dcd3731ed..dbf030365ab4 100644 --- a/arch/openrisc/include/asm/tlbflush.h +++ b/arch/openrisc/include/asm/tlbflush.h @@ -25,7 +25,7 @@ * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(mm, start, end) flushes a range of pages + * - flush_tlb_range(vma, start, end) flushes a range of pages */ extern void local_flush_tlb_all(void); extern void local_flush_tlb_mm(struct mm_struct *mm); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 088dd2afcfe4..14b132cf95e2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -671,7 +671,7 @@ config NODES_SHIFT int default "8" if PPC64 default "4" - depends on NEED_MULTIPLE_NODES + depends on NUMA config USE_PERCPU_NUMA_NODE_ID def_bool y diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index 6cda76b57c5d..4c6c6dbd182f 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h @@ -18,7 +18,7 @@ * flags field of the struct page */ -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA extern struct pglist_data *node_data[]; /* @@ -41,7 +41,7 @@ u64 memory_hotplug_max(void); #else #define memory_hotplug_max() memblock_end_of_DRAM() -#endif /* CONFIG_NEED_MULTIPLE_NODES */ +#endif /* CONFIG_NUMA */ #ifdef CONFIG_FA_DUMP #define __HAVE_ARCH_RESERVED_KERNEL_PAGES #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e42b85e4f1aa..a35fbf4d0bce 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -788,7 +788,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, size_t align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int node = early_cpu_to_node(cpu); void *ptr; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 6c6e4d934d86..7ddc2d32c39e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1047,7 +1047,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA /* * numa_node_id() works after this. */ diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 56da5eb2b923..48525e8b5730 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -68,11 +68,11 @@ void machine_kexec_cleanup(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA VMCOREINFO_SYMBOL(node_data); VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA VMCOREINFO_SYMBOL(contig_page_data); #endif #if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index cd544a46183e..260e860d53a2 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4924,8 +4924,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) /* Look up the VMA for the start of this memory slot */ hva = memslot->userspace_addr; mmap_read_lock(kvm->mm); - vma = find_vma(kvm->mm, hva); - if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) + vma = vma_lookup(kvm->mm, hva); + if (!vma || (vma->vm_flags & VM_IO)) goto up_out; psize = vma_kernel_pagesize(vma); diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index b898a596db42..a7061ee3b157 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -615,7 +615,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot, /* Fetch the VMA if addr is not in the latest fetched one */ if (!vma || addr >= vma->vm_end) { - vma = find_vma_intersection(kvm->mm, addr, addr+1); + vma = vma_lookup(kvm->mm, addr); if (!vma) { pr_err("Can't find VMA for gfn:0x%lx\n", gfn); break; diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index c3df3a8501d4..2ffcf540f08b 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -13,7 +13,7 @@ obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \ obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/ obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/ obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/ -obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o +obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_PPC_MM_SLICES) += slice.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a6b36a40897a..c5e520c6f13b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -127,7 +127,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, } #endif -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA void __init mem_topology_setup(void) { max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; @@ -162,7 +162,7 @@ static int __init mark_nonram_nosave(void) return 0; } -#else /* CONFIG_NEED_MULTIPLE_NODES */ +#else /* CONFIG_NUMA */ static int __init mark_nonram_nosave(void) { return 0; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 18ec0f9bb8d5..15f9490a7aad 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -332,7 +332,7 @@ config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" range 1 10 default "2" - depends on NEED_MULTIPLE_NODES + depends on NUMA help Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b4c7c34069f8..707afbcd81c2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -475,7 +475,7 @@ config NUMA config NODES_SHIFT int - depends on NEED_MULTIPLE_NODES + depends on NUMA default "1" config SCHED_SMT diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 29c7ecd5ad1d..b38f7b781564 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -344,8 +344,6 @@ static inline int is_module_addr(void *addr) #define PTRS_PER_P4D _CRST_ENTRIES #define PTRS_PER_PGD _CRST_ENTRIES -#define MAX_PTRS_PER_P4D PTRS_PER_P4D - /* * Segment table and region3 table entry encoding * (R = read-only, I = invalid, y = young bit): diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 6552a088dc97..7b8dead2723d 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -2,7 +2,7 @@ #ifndef __ASM_SH_MMZONE_H #define __ASM_SH_MMZONE_H -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA #include <linux/numa.h> extern struct pglist_data *node_data[]; @@ -31,7 +31,7 @@ static inline void setup_bootmem_node(int nid, unsigned long start, unsigned long end) { } -#endif /* CONFIG_NEED_MULTIPLE_NODES */ +#endif /* CONFIG_NUMA */ /* Platform specific mem init */ void __init plat_mem_setup(void); diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c index 7a989eed3b18..76af6db9daa2 100644 --- a/arch/sh/kernel/topology.c +++ b/arch/sh/kernel/topology.c @@ -46,7 +46,7 @@ static int __init topology_init(void) { int i, ret; -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index d551a9cac41e..ba569cfb4368 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -120,7 +120,7 @@ config NODES_SHIFT int default "3" if CPU_SUBTYPE_SHX3 default "1" - depends on NEED_MULTIPLE_NODES + depends on NUMA config ARCH_FLATMEM_ENABLE def_bool y diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 168d7d4dd735..ce26c7f8950a 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -211,7 +211,7 @@ void __init allocate_pgdat(unsigned int nid) get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA NODE_DATA(nid) = memblock_alloc_try_nid( sizeof(struct pglist_data), SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 164a5254c91c..c72f52c704cd 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -265,7 +265,7 @@ config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" range 4 5 if SPARC64 default "5" - depends on NEED_MULTIPLE_NODES + depends on NUMA help Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h index 6543fb97a849..a236d8aa893a 100644 --- a/arch/sparc/include/asm/mmzone.h +++ b/arch/sparc/include/asm/mmzone.h @@ -2,7 +2,7 @@ #ifndef _SPARC64_MMZONE_H #define _SPARC64_MMZONE_H -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA #include <linux/cpumask.h> @@ -13,6 +13,6 @@ extern struct pglist_data *node_data[]; extern int numa_cpu_lookup_table[]; extern cpumask_t numa_cpumask_lookup_table[]; -#endif /* CONFIG_NEED_MULTIPLE_NODES */ +#endif /* CONFIG_NUMA */ #endif /* _SPARC64_MMZONE_H */ diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index ae5faa1d989d..0224d8f19ed6 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1543,7 +1543,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, size_t align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int node = cpu_to_node(cpu); void *ptr; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index e454f179cf5d..06e938d03f3b 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -903,7 +903,7 @@ struct node_mem_mask { static struct node_mem_mask node_masks[MAX_NUMNODES]; static int num_node_masks; -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA struct mdesc_mlgroup { u64 node; @@ -1059,7 +1059,7 @@ static void __init allocate_node_data(int nid) { struct pglist_data *p; unsigned long start_pfn, end_pfn; -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); @@ -1080,7 +1080,7 @@ static void __init allocate_node_data(int nid) static void init_node_masks_nonnuma(void) { -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int i; #endif @@ -1090,7 +1090,7 @@ static void init_node_masks_nonnuma(void) node_masks[0].match = 0; num_node_masks = 1; -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA for (i = 0; i < NR_CPUS; i++) numa_cpu_lookup_table[i] = 0; @@ -1098,7 +1098,7 @@ static void init_node_masks_nonnuma(void) #endif } -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); @@ -2487,7 +2487,7 @@ int page_in_phys_avail(unsigned long paddr) static void __init register_page_bootmem_info(void) { -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int i; for_each_online_node(i) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 86dae426798b..49ffb69e34dd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1597,7 +1597,7 @@ config NODES_SHIFT default "10" if MAXSMP default "6" if X86_64 default "3" - depends on NEED_MULTIPLE_NODES + depends on NUMA help Specify the maximum number of NUMA Nodes available on the target system. Increases memory reserved to accommodate various tables. diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index a09fc37ead9d..5e5b9fc2747f 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -203,7 +203,7 @@ static int load_aout_binary(struct linux_binprm *bprm) error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | - MAP_EXECUTABLE | MAP_32BIT, + MAP_32BIT, fd_offset); if (error != N_TXTADDR(ex)) @@ -212,7 +212,7 @@ static int load_aout_binary(struct linux_binprm *bprm) error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | - MAP_EXECUTABLE | MAP_32BIT, + MAP_32BIT, fd_offset + ex.a_text); if (error != N_DATADDR(ex)) return error; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index bf7fe87a7e88..22791aadc085 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1257,19 +1257,28 @@ static void kill_me_maybe(struct callback_head *cb) { struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me); int flags = MF_ACTION_REQUIRED; + int ret; pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); if (!p->mce_ripv) flags |= MF_MUST_KILL; - if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) && - !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) { + ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags); + if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) { set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page); sync_core(); return; } + /* + * -EHWPOISON from memory_failure() means that it already sent SIGBUS + * to the current process with the proper error info, so no need to + * send SIGBUS here again. + */ + if (ret == -EHWPOISON) + return; + if (p->mce_vaddr != (void __user *)-1l) { force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT); } else { diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h index 6e74f85b6264..fec43ca65065 100644 --- a/arch/x86/kernel/cpu/sgx/encl.h +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -91,8 +91,8 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr, { struct vm_area_struct *result; - result = find_vma(mm, addr); - if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start) + result = vma_lookup(mm, addr); + if (!result || result->vm_ops != &sgx_vm_ops) return -EINVAL; *vma = result; diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 0941d2f44f2a..78a32b956e81 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -66,7 +66,7 @@ EXPORT_SYMBOL(__per_cpu_offset); */ static bool __init pcpu_need_numa(void) { -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA pg_data_t *last = NULL; unsigned int cpu; @@ -101,7 +101,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, unsigned long align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA int node = early_cpu_to_node(cpu); void *ptr; @@ -140,7 +140,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size) static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) { -#ifdef CONFIG_NEED_MULTIPLE_NODES +#ifdef CONFIG_NUMA if (early_cpu_to_node(from) == early_cpu_to_node(to)) return LOCAL_DISTANCE; else diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 21ffb03f6c72..74b78840182d 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -651,7 +651,7 @@ void __init find_low_pfn_range(void) highmem_pfn_init(); } -#ifndef CONFIG_NEED_MULTIPLE_NODES +#ifndef CONFIG_NUMA void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM @@ -677,7 +677,7 @@ void __init initmem_init(void) setup_bootmem_allocator(); } -#endif /* !CONFIG_NEED_MULTIPLE_NODES */ +#endif /* !CONFIG_NUMA */ void __init setup_bootmem_allocator(void) { diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 37ce25ef92d6..493eb7083b1a 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -192,10 +192,6 @@ static inline unsigned long ___pa(unsigned long va) #define pfn_valid(pfn) \ ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) -#ifdef CONFIG_DISCONTIGMEM -# error CONFIG_DISCONTIGMEM not supported -#endif - #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h index 856e2da2e397..573df8cea200 100644 --- a/arch/xtensa/include/asm/tlbflush.h +++ b/arch/xtensa/include/asm/tlbflush.h @@ -26,8 +26,8 @@ * * - flush_tlb_all() flushes all processes TLB entries * - flush_tlb_mm(mm) flushes the specified mm context TLB entries - * - flush_tlb_page(mm, vmaddr) flushes a single page - * - flush_tlb_range(mm, start, end) flushes a range of pages + * - flush_tlb_page(vma, page) flushes a single page + * - flush_tlb_range(vma, vmaddr, end) flushes a range of pages */ void local_flush_tlb_all(void); |