diff options
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/fault.c | 34 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 167 | ||||
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 111 | ||||
-rw-r--r-- | arch/x86_64/mm/k8topology.c | 6 | ||||
-rw-r--r-- | arch/x86_64/mm/numa.c | 32 | ||||
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 24 | ||||
-rw-r--r-- | arch/x86_64/mm/srat.c | 83 |
7 files changed, 170 insertions, 287 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index ac8ea66ccb94..3751b4788e28 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -40,8 +40,7 @@ #define PF_RSVD (1<<3) #define PF_INSTR (1<<4) -#ifdef CONFIG_KPROBES -ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); +static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); /* Hook to register for page fault notifications */ int register_page_fault_notifier(struct notifier_block *nb) @@ -49,11 +48,13 @@ int register_page_fault_notifier(struct notifier_block *nb) vmalloc_sync_all(); return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); } +EXPORT_SYMBOL_GPL(register_page_fault_notifier); int unregister_page_fault_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); } +EXPORT_SYMBOL_GPL(unregister_page_fault_notifier); static inline int notify_page_fault(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) @@ -67,13 +68,6 @@ static inline int notify_page_fault(enum die_val val, const char *str, }; return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); } -#else -static inline int notify_page_fault(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - return NOTIFY_DONE; -} -#endif void bust_spinlocks(int yes) { @@ -102,7 +96,7 @@ void bust_spinlocks(int yes) static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, unsigned long error_code) { - unsigned char *instr; + unsigned char __user *instr; int scan_more = 1; int prefetch = 0; unsigned char *max_instr; @@ -111,7 +105,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, if (error_code & PF_INSTR) return 0; - instr = (unsigned char *)convert_rip_to_linear(current, regs); + instr = (unsigned char __user *)convert_rip_to_linear(current, regs); max_instr = instr + 15; if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) @@ -122,7 +116,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, unsigned char instr_hi; unsigned char instr_lo; - if (__get_user(opcode, instr)) + if (__get_user(opcode, (char __user *)instr)) break; instr_hi = opcode & 0xf0; @@ -160,7 +154,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, case 0x00: /* Prefetch instruction is 0x0F0D or 0x0F18 */ scan_more = 0; - if (__get_user(opcode, instr)) + if (__get_user(opcode, (char __user *)instr)) break; prefetch = (instr_lo == 0xF) && (opcode == 0x0D || opcode == 0x18); @@ -176,7 +170,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr, static int bad_address(void *p) { unsigned long dummy; - return __get_user(dummy, (unsigned long *)p); + return __get_user(dummy, (unsigned long __user *)p); } void dump_pagetable(unsigned long address) @@ -250,7 +244,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) int unhandled_signal(struct task_struct *tsk, int sig) { - if (tsk->pid == 1) + if (is_init(tsk)) return 1; if (tsk->ptrace & PT_PTRACED) return 0; @@ -299,7 +293,7 @@ static int vmalloc_fault(unsigned long address) if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else - BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref)); + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); /* Below here mismatches are bugs because these lower tables are shared */ @@ -308,7 +302,7 @@ static int vmalloc_fault(unsigned long address) pud_ref = pud_offset(pgd_ref, address); if (pud_none(*pud_ref)) return -1; - if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref)) + if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) BUG(); pmd = pmd_offset(pud, address); pmd_ref = pmd_offset(pud_ref, address); @@ -470,7 +464,7 @@ good_area: case PF_PROT: /* read, present */ goto bad_area; case 0: /* read, not present */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; } @@ -586,7 +580,7 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - if (current->pid == 1) { + if (is_init(current)) { yield(); goto again; } @@ -641,7 +635,7 @@ void vmalloc_sync_all(void) if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else - BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref)); + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); } spin_unlock(&pgd_lock); set_bit(pgd_index(address), insync); diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index d14fb2dfbfc4..19c72520a868 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -229,7 +229,6 @@ __init void *early_ioremap(unsigned long addr, unsigned long size) /* actually usually some more */ if (size >= LARGE_PAGE_SIZE) { - printk("SMBIOS area too long %lu\n", size); return NULL; } set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); @@ -250,12 +249,13 @@ __init void early_iounmap(void *addr, unsigned long size) } static void __meminit -phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) +phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) { - int i; + int i = pmd_index(address); - for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { + for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { unsigned long entry; + pmd_t *pmd = pmd_page + pmd_index(address); if (address >= end) { if (!after_bootmem) @@ -263,6 +263,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) set_pmd(pmd, __pmd(0)); break; } + + if (pmd_val(*pmd)) + continue; + entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; entry &= __supported_pte_mask; set_pmd(pmd, __pmd(entry)); @@ -272,45 +276,41 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) static void __meminit phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) { - pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address)); - - if (pmd_none(*pmd)) { - spin_lock(&init_mm.page_table_lock); - phys_pmd_init(pmd, address, end); - spin_unlock(&init_mm.page_table_lock); - __flush_tlb_all(); - } + pmd_t *pmd = pmd_offset(pud,0); + spin_lock(&init_mm.page_table_lock); + phys_pmd_init(pmd, address, end); + spin_unlock(&init_mm.page_table_lock); + __flush_tlb_all(); } -static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) +static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) { - long i = pud_index(address); + int i = pud_index(addr); - pud = pud + i; - if (after_bootmem && pud_val(*pud)) { - phys_pmd_update(pud, address, end); - return; - } - - for (; i < PTRS_PER_PUD; pud++, i++) { + for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { int map; - unsigned long paddr, pmd_phys; + unsigned long pmd_phys; + pud_t *pud = pud_page + pud_index(addr); pmd_t *pmd; - paddr = (address & PGDIR_MASK) + i*PUD_SIZE; - if (paddr >= end) + if (addr >= end) break; - if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) { + if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) { set_pud(pud, __pud(0)); continue; } + if (pud_val(*pud)) { + phys_pmd_update(pud, addr, end); + continue; + } + pmd = alloc_low_page(&map, &pmd_phys); spin_lock(&init_mm.page_table_lock); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); - phys_pmd_init(pmd, paddr, end); + phys_pmd_init(pmd, addr, end); spin_unlock(&init_mm.page_table_lock); unmap_low_page(map); } @@ -403,69 +403,15 @@ void __cpuinit zap_low_mappings(int cpu) __flush_tlb_all(); } -/* Compute zone sizes for the DMA and DMA32 zones in a node. */ -__init void -size_zones(unsigned long *z, unsigned long *h, - unsigned long start_pfn, unsigned long end_pfn) -{ - int i; - unsigned long w; - - for (i = 0; i < MAX_NR_ZONES; i++) - z[i] = 0; - - if (start_pfn < MAX_DMA_PFN) - z[ZONE_DMA] = MAX_DMA_PFN - start_pfn; - if (start_pfn < MAX_DMA32_PFN) { - unsigned long dma32_pfn = MAX_DMA32_PFN; - if (dma32_pfn > end_pfn) - dma32_pfn = end_pfn; - z[ZONE_DMA32] = dma32_pfn - start_pfn; - } - z[ZONE_NORMAL] = end_pfn - start_pfn; - - /* Remove lower zones from higher ones. */ - w = 0; - for (i = 0; i < MAX_NR_ZONES; i++) { - if (z[i]) - z[i] -= w; - w += z[i]; - } - - /* Compute holes */ - w = start_pfn; - for (i = 0; i < MAX_NR_ZONES; i++) { - unsigned long s = w; - w += z[i]; - h[i] = e820_hole_size(s, w); - } - - /* Add the space pace needed for mem_map to the holes too. */ - for (i = 0; i < MAX_NR_ZONES; i++) - h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE; - - /* The 16MB DMA zone has the kernel and other misc mappings. - Account them too */ - if (h[ZONE_DMA]) { - h[ZONE_DMA] += dma_reserve; - if (h[ZONE_DMA] >= z[ZONE_DMA]) { - printk(KERN_WARNING - "Kernel too large and filling up ZONE_DMA?\n"); - h[ZONE_DMA] = z[ZONE_DMA]; - } - } -} - #ifndef CONFIG_NUMA void __init paging_init(void) { - unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES]; - + unsigned long max_zone_pfns[MAX_NR_ZONES] = {MAX_DMA_PFN, + MAX_DMA32_PFN, + end_pfn}; memory_present(0, 0, end_pfn); sparse_init(); - size_zones(zones, holes, 0, end_pfn); - free_area_init_node(0, NODE_DATA(0), zones, - __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes); + free_area_init_nodes(max_zone_pfns); } #endif @@ -517,36 +463,23 @@ void online_page(struct page *page) #ifdef CONFIG_MEMORY_HOTPLUG /* - * XXX: memory_add_physaddr_to_nid() is to find node id from physical address - * via probe interface of sysfs. If acpi notifies hot-add event, then it - * can tell node id by searching dsdt. But, probe interface doesn't have - * node id. So, return 0 as node id at this time. - */ -#ifdef CONFIG_NUMA -int memory_add_physaddr_to_nid(u64 start) -{ - return 0; -} -#endif - -/* * Memory is added always to NORMAL zone. This means you will never get * additional DMA/DMA32 memory. */ int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdat = NODE_DATA(nid); - struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; + struct zone *zone = pgdat->node_zones + ZONE_NORMAL; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; + init_memory_mapping(start, (start + size -1)); + ret = __add_pages(zone, start_pfn, nr_pages); if (ret) goto error; - init_memory_mapping(start, (start + size -1)); - return ret; error: printk("%s: Problem encountered in __add_pages!\n", __func__); @@ -560,7 +493,24 @@ int remove_memory(u64 start, u64 size) } EXPORT_SYMBOL_GPL(remove_memory); -#else /* CONFIG_MEMORY_HOTPLUG */ +#ifndef CONFIG_ACPI_NUMA +int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif + +#ifndef CONFIG_ACPI_NUMA +int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +#endif + +#endif /* CONFIG_MEMORY_HOTPLUG */ + +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE /* * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, * just online the pages. @@ -586,7 +536,7 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) } return err; } -#endif /* CONFIG_MEMORY_HOTPLUG */ +#endif static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, kcore_vsyscall; @@ -597,12 +547,6 @@ void __init mem_init(void) pci_iommu_alloc(); - /* How many end-of-memory variables you have, grandma! */ - max_low_pfn = end_pfn; - max_pfn = end_pfn; - num_physpages = end_pfn; - high_memory = (void *) __va(end_pfn * PAGE_SIZE); - /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); @@ -614,7 +558,8 @@ void __init mem_init(void) #else totalram_pages = free_all_bootmem(); #endif - reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn); + reservedpages = end_pfn - totalram_pages - + absent_pages_in_range(0, end_pfn); after_bootmem = 1; @@ -714,8 +659,10 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len) #else reserve_bootmem(phys, len); #endif - if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) + if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { dma_reserve += len / PAGE_SIZE; + set_dma_reserve(dma_reserve); + } } int kern_addr_valid(unsigned long addr) diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index 45d7d823c3b8..c6e5e8d401a4 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -12,117 +12,16 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> -#include <asm/cacheflush.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> #include <asm/proto.h> #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 -static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, - unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - unsigned long pfn; - - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - if (address >= end) - BUG(); - pfn = phys_addr >> PAGE_SHIFT; - do { - if (!pte_none(*pte)) { - printk("remap_area_pte: page already exists\n"); - BUG(); - } - set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | - _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); - address += PAGE_SIZE; - pfn++; - pte++; - } while (address && (address < end)); -} - -static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, - unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - - address &= ~PUD_MASK; - end = address + size; - if (end > PUD_SIZE) - end = PUD_SIZE; - phys_addr -= address; - if (address >= end) - BUG(); - do { - pte_t * pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - remap_area_pte(pte, address, end - address, address + phys_addr, flags); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return 0; -} - -static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size, - unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - phys_addr -= address; - if (address >= end) - BUG(); - do { - pmd_t * pmd = pmd_alloc(&init_mm, pud, address); - if (!pmd) - return -ENOMEM; - remap_area_pmd(pmd, address, end - address, address + phys_addr, flags); - address = (address + PUD_SIZE) & PUD_MASK; - pud++; - } while (address && (address < end)); - return 0; -} - -static int remap_area_pages(unsigned long address, unsigned long phys_addr, - unsigned long size, unsigned long flags) -{ - int error; - pgd_t *pgd; - unsigned long end = address + size; - - phys_addr -= address; - pgd = pgd_offset_k(address); - flush_cache_all(); - if (address >= end) - BUG(); - do { - pud_t *pud; - pud = pud_alloc(&init_mm, pgd, address); - error = -ENOMEM; - if (!pud) - break; - if (remap_area_pud(pud, address, end - address, - phys_addr + address, flags)) - break; - error = 0; - address = (address + PGDIR_SIZE) & PGDIR_MASK; - pgd++; - } while (address && (address < end)); - flush_tlb_all(); - return error; -} - /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. @@ -165,6 +64,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l void * addr; struct vm_struct * area; unsigned long offset, last_addr; + pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -194,6 +94,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l } #endif + pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL + | _PAGE_DIRTY | _PAGE_ACCESSED | flags); /* * Mappings have to be page-aligned */ @@ -209,7 +111,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l return NULL; area->phys_addr = phys_addr; addr = area->addr; - if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { + if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, + phys_addr, pgprot)) { remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); return NULL; } diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c index 7c45c2d2b8b2..b5b8dba28b4e 100644 --- a/arch/x86_64/mm/k8topology.c +++ b/arch/x86_64/mm/k8topology.c @@ -54,6 +54,9 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) nodes_clear(nodes_parsed); + if (!early_pci_allowed()) + return -1; + nb = find_northbridge(); if (nb < 0) return nb; @@ -146,6 +149,9 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) nodes[nodeid].start = base; nodes[nodeid].end = limit; + e820_register_active_regions(nodeid, + nodes[nodeid].start >> PAGE_SHIFT, + nodes[nodeid].end >> PAGE_SHIFT); prevbase = base; diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index b2fac14baac0..829a008bd39b 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -161,7 +161,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en bootmap_start >> PAGE_SHIFT, start_pfn, end_pfn); - e820_bootmem_free(NODE_DATA(nodeid), start, end); + free_bootmem_with_active_regions(nodeid, end); reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); @@ -175,13 +175,11 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en void __init setup_node_zones(int nodeid) { unsigned long start_pfn, end_pfn, memmapsize, limit; - unsigned long zones[MAX_NR_ZONES]; - unsigned long holes[MAX_NR_ZONES]; start_pfn = node_start_pfn(nodeid); end_pfn = node_end_pfn(nodeid); - Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n", + Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", nodeid, start_pfn, end_pfn); /* Try to allocate mem_map at end to not fill up precious <4GB @@ -195,10 +193,6 @@ void __init setup_node_zones(int nodeid) round_down(limit - memmapsize, PAGE_SIZE), limit); #endif - - size_zones(zones, holes, start_pfn, end_pfn); - free_area_init_node(nodeid, NODE_DATA(nodeid), zones, - start_pfn, holes); } void __init numa_init_array(void) @@ -225,7 +219,7 @@ void __init numa_init_array(void) int numa_fake __initdata = 0; /* Numa emulation */ -static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn) +static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) { int i; struct bootnode nodes[MAX_NUMNODES]; @@ -259,8 +253,11 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn) printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n"); return -1; } - for_each_online_node(i) + for_each_online_node(i) { + e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, + nodes[i].end >> PAGE_SHIFT); setup_node_bootmem(i, nodes[i].start, nodes[i].end); + } numa_init_array(); return 0; } @@ -299,6 +296,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) for (i = 0; i < NR_CPUS; i++) numa_set_node(i, 0); node_to_cpumask[0] = cpumask_of_cpu(0); + e820_register_active_regions(0, start_pfn, end_pfn); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); } @@ -340,17 +338,23 @@ static void __init arch_sparse_init(void) void __init paging_init(void) { int i; + unsigned long max_zone_pfns[MAX_NR_ZONES] = { MAX_DMA_PFN, + MAX_DMA32_PFN, + end_pfn}; arch_sparse_init(); for_each_online_node(i) { setup_node_zones(i); } + + free_area_init_nodes(max_zone_pfns); } -/* [numa=off] */ -__init int numa_setup(char *opt) +static __init int numa_setup(char *opt) { + if (!opt) + return -EINVAL; if (!strncmp(opt,"off",3)) numa_off = 1; #ifdef CONFIG_NUMA_EMU @@ -366,9 +370,11 @@ __init int numa_setup(char *opt) if (!strncmp(opt,"hotadd=", 7)) hotadd_percent = simple_strtoul(opt+7, NULL, 10); #endif - return 1; + return 0; } +early_param("numa", numa_setup); + /* * Setup early cpu_to_node. * diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 2685b1f3671c..3e231d762aaa 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c @@ -108,8 +108,8 @@ static void revert_page(unsigned long address, pgprot_t ref_prot) BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); BUG_ON(pmd_val(*pmd) & _PAGE_PSE); - pgprot_val(ref_prot) |= _PAGE_PSE; large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot); + large_pte = pte_mkhuge(large_pte); set_pte((pte_t *)pmd, large_pte); } @@ -119,32 +119,28 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, { pte_t *kpte; struct page *kpte_page; - unsigned kpte_flags; pgprot_t ref_prot2; kpte = lookup_address(address); if (!kpte) return 0; kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); - kpte_flags = pte_val(*kpte); if (pgprot_val(prot) != pgprot_val(ref_prot)) { - if ((kpte_flags & _PAGE_PSE) == 0) { + if (!pte_huge(*kpte)) { set_pte(kpte, pfn_pte(pfn, prot)); } else { /* * split_large_page will take the reference for this * change_page_attr on the split page. */ - struct page *split; - ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE)); - + ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); split = split_large_page(address, prot, ref_prot2); if (!split) return -ENOMEM; - set_pte(kpte,mk_pte(split, ref_prot2)); + set_pte(kpte, mk_pte(split, ref_prot2)); kpte_page = split; - } + } page_private(kpte_page)++; - } else if ((kpte_flags & _PAGE_PSE) == 0) { + } else if (!pte_huge(*kpte)) { set_pte(kpte, pfn_pte(pfn, ref_prot)); BUG_ON(page_private(kpte_page) == 0); page_private(kpte_page)--; @@ -190,10 +186,12 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) * lowmem */ if (__pa(address) < KERNEL_TEXT_SIZE) { unsigned long addr2; - pgprot_t prot2 = prot; + pgprot_t prot2; addr2 = __START_KERNEL_map + __pa(address); - pgprot_val(prot2) &= ~_PAGE_NX; - err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC); + /* Make sure the kernel mappings stay executable */ + prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); + err = __change_page_attr(addr2, pfn, prot2, + PAGE_KERNEL_EXEC); } } up_write(&init_mm.mmap_sem); diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 502fce65e96a..3cc0544e25f5 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c @@ -21,22 +21,15 @@ #include <asm/numa.h> #include <asm/e820.h> -#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \ - defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \ - && !defined(CONFIG_MEMORY_HOTPLUG) -#define RESERVE_HOTADD 1 -#endif +int acpi_numa __initdata; static struct acpi_table_slit *acpi_slit; static nodemask_t nodes_parsed __initdata; static struct bootnode nodes[MAX_NUMNODES] __initdata; -static struct bootnode nodes_add[MAX_NUMNODES] __initdata; +static struct bootnode nodes_add[MAX_NUMNODES]; static int found_add_area __initdata; int hotadd_percent __initdata = 0; -#ifndef RESERVE_HOTADD -#define hotadd_percent 0 /* Ignore all settings */ -#endif /* Too small nodes confuse the VM badly. Usually they result from BIOS bugs. */ @@ -91,6 +84,7 @@ static __init void bad_srat(void) apicid_to_node[i] = NUMA_NO_NODE; for (i = 0; i < MAX_NUMNODES; i++) nodes_add[i].start = nodes[i].end = 0; + remove_all_active_ranges(); } static __init inline int srat_disabled(void) @@ -157,7 +151,7 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) pxm, pa->apic_id, node); } -#ifdef RESERVE_HOTADD +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE /* * Protect against too large hotadd areas that would fill up memory. */ @@ -173,7 +167,7 @@ static int hotadd_enough_memory(struct bootnode *nd) if (mem < 0) return 0; - allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; + allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE; allowed = (allowed / 100) * hotadd_percent; if (allocated + mem > allowed) { unsigned long range; @@ -200,15 +194,37 @@ static int hotadd_enough_memory(struct bootnode *nd) return 1; } +static int update_end_of_memory(unsigned long end) +{ + found_add_area = 1; + if ((end >> PAGE_SHIFT) > end_pfn) + end_pfn = end >> PAGE_SHIFT; + return 1; +} + +static inline int save_add_info(void) +{ + return hotadd_percent > 0; +} +#else +int update_end_of_memory(unsigned long end) {return 0;} +static int hotadd_enough_memory(struct bootnode *nd) {return 1;} +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +static inline int save_add_info(void) {return 1;} +#else +static inline int save_add_info(void) {return 0;} +#endif +#endif /* - * It is fine to add this area to the nodes data it will be used later + * Update nodes_add and decide if to include add are in the zone. + * Both SPARSE and RESERVE need nodes_add infomation. * This code supports one contigious hot add area per node. */ static int reserve_hotadd(int node, unsigned long start, unsigned long end) { unsigned long s_pfn = start >> PAGE_SHIFT; unsigned long e_pfn = end >> PAGE_SHIFT; - int changed = 0; + int ret = 0, changed = 0; struct bootnode *nd = &nodes_add[node]; /* I had some trouble with strange memory hotadd regions breaking @@ -223,8 +239,10 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) } /* This check might be a bit too strict, but I'm keeping it for now. */ - if (e820_hole_size(s_pfn, e_pfn) != e_pfn - s_pfn) { - printk(KERN_ERR "SRAT: Hotplug area has existing memory\n"); + if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) { + printk(KERN_ERR + "SRAT: Hotplug area %lu -> %lu has existing memory\n", + s_pfn, e_pfn); return -1; } @@ -235,7 +253,6 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) /* Looks good */ - found_add_area = 1; if (nd->start == nd->end) { nd->start = start; nd->end = end; @@ -253,14 +270,12 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); } - if ((nd->end >> PAGE_SHIFT) > end_pfn) - end_pfn = nd->end >> PAGE_SHIFT; + ret = update_end_of_memory(nd->end); if (changed) printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); - return 0; + return ret; } -#endif /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ void __init @@ -279,7 +294,7 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) } if (ma->flags.enabled == 0) return; - if (ma->flags.hot_pluggable && hotadd_percent == 0) + if (ma->flags.hot_pluggable && !save_add_info()) return; start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); @@ -317,16 +332,18 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, nd->start, nd->end); + e820_register_active_regions(node, nd->start >> PAGE_SHIFT, + nd->end >> PAGE_SHIFT); + push_node_boundaries(node, nd->start >> PAGE_SHIFT, + nd->end >> PAGE_SHIFT); -#ifdef RESERVE_HOTADD - if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) { + if (ma->flags.hot_pluggable && !reserve_hotadd(node, start, end) < 0) { /* Ignore hotadd region. Undo damage */ printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); *nd = oldnode; if ((nd->start | nd->end) == 0) node_clear(node, nodes_parsed); } -#endif } /* Sanity check to catch more bad SRATs (they are amazingly common). @@ -341,13 +358,12 @@ static int nodes_cover_memory(void) unsigned long s = nodes[i].start >> PAGE_SHIFT; unsigned long e = nodes[i].end >> PAGE_SHIFT; pxmram += e - s; - pxmram -= e820_hole_size(s, e); - pxmram -= nodes_add[i].end - nodes_add[i].start; + pxmram -= absent_pages_in_range(s, e); if ((long)pxmram < 0) pxmram = 0; } - e820ram = end_pfn - e820_hole_size(0, end_pfn); + e820ram = end_pfn - absent_pages_in_range(0, end_pfn); /* We seem to lose 3 pages somewhere. Allow a bit of slack. */ if ((long)(e820ram - pxmram) >= 1*1024*1024) { printk(KERN_ERR @@ -450,3 +466,16 @@ int __node_distance(int a, int b) } EXPORT_SYMBOL(__node_distance); + +int memory_add_physaddr_to_nid(u64 start) +{ + int i, ret = 0; + + for_each_node(i) + if (nodes_add[i].start <= start && nodes_add[i].end > start) + ret = i; + + return ret; +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); + |