summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/boot_ioremap_32.c2
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--arch/x86/mm/fault_32.c13
-rw-r--r--arch/x86/mm/fault_64.c22
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr_64.c15
-rw-r--r--arch/x86/mm/srat_64.c2
7 files changed, 26 insertions, 32 deletions
diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c
index 4de95a17a7d4..f14da2a53ece 100644
--- a/arch/x86/mm/boot_ioremap_32.c
+++ b/arch/x86/mm/boot_ioremap_32.c
@@ -10,7 +10,7 @@
/*
* We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
- * keeps that from happenning. If anyone has a better way, I'm listening.
+ * keeps that from happening. If anyone has a better way, I'm listening.
*
* boot_pte_t is defined only if this all works correctly
*/
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 13893772cc48..fe608a45ffb6 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -273,7 +273,7 @@ unsigned long __init setup_memory(void)
* When mapping a NUMA machine we allocate the node_mem_map arrays
* from node local memory. They are then mapped directly into KVA
* between zone normal and vmalloc space. Calculate the size of
- * this space and use it to adjust the boundry between ZONE_NORMAL
+ * this space and use it to adjust the boundary between ZONE_NORMAL
* and ZONE_HIGHMEM.
*/
find_max_pfn();
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 6555c3d14371..503dfc05111b 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -354,7 +354,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
@@ -362,7 +362,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
@@ -471,8 +471,8 @@ bad_area_nosemaphore:
printk_ratelimit()) {
printk("%s%s[%d]: segfault at %08lx eip %08lx "
"esp %08lx error %lx\n",
- tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, tsk->pid, address, regs->eip,
+ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, task_pid_nr(tsk), address, regs->eip,
regs->esp, error_code);
}
tsk->thread.cr2 = address;
@@ -564,7 +564,8 @@ no_context:
* it's allocated already.
*/
if ((page >> PAGE_SHIFT) < max_low_pfn
- && (page & _PAGE_PRESENT)) {
+ && (page & _PAGE_PRESENT)
+ && !(page & _PAGE_PSE)) {
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
& (PTRS_PER_PTE - 1)];
@@ -587,7 +588,7 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_init(tsk)) {
+ if (is_global_init(tsk)) {
yield();
down_read(&mm->mmap_sem);
goto survive;
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 5e0e54906c48..644b4f7ece10 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address)
pmd = pmd_offset(pud, address);
if (bad_address(pmd)) goto bad;
printk("PMD %lx ", pmd_val(*pmd));
- if (!pmd_present(*pmd)) goto ret;
+ if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
pte = pte_offset_kernel(pmd, address);
if (bad_address(pte)) goto bad;
@@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address)
return 0;
}
-static int page_fault_trace;
int show_unhandled_signals = 1;
/*
@@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (likely(regs->eflags & X86_EFLAGS_IF))
local_irq_enable();
- if (unlikely(page_fault_trace))
- printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
- regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
-
if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code);
@@ -378,7 +373,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
again:
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
@@ -386,7 +381,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibilty of a deadlock.
+ * the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
@@ -488,7 +483,7 @@ bad_area_nosemaphore:
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
printk(
- "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
+ "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, tsk->pid, address, regs->rip,
regs->rsp, error_code);
@@ -554,7 +549,7 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- if (is_init(current)) {
+ if (is_global_init(current)) {
yield();
goto again;
}
@@ -621,10 +616,3 @@ void vmalloc_sync_all(void)
BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
(__START_KERNEL & PGDIR_MASK)));
}
-
-static int __init enable_pagefaulttrace(char *str)
-{
- page_fault_trace = 1;
- return 1;
-}
-__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 5eec5e56d07f..3d6926ba8995 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -612,7 +612,7 @@ void __init init_cpu_to_node(void)
{
int i;
for (i = 0; i < NR_CPUS; i++) {
- u8 apicid = x86_cpu_to_apicid[i];
+ u8 apicid = x86_cpu_to_apicid_init[i];
if (apicid == BAD_APICID)
continue;
if (apicid_to_node[apicid] == NUMA_NO_NODE)
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 8a4f65bf956e..c40afbaaf93d 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -61,10 +61,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
return base;
}
-static void cache_flush_page(void *adr)
+void clflush_cache_range(void *adr, int size)
{
int i;
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
clflush(adr+i);
}
@@ -80,7 +80,7 @@ static void flush_kernel_map(void *arg)
asm volatile("wbinvd" ::: "memory");
else list_for_each_entry(pg, l, lru) {
void *adr = page_address(pg);
- cache_flush_page(adr);
+ clflush_cache_range(adr, PAGE_SIZE);
}
__flush_tlb_all();
}
@@ -230,9 +230,14 @@ void global_flush_tlb(void)
struct page *pg, *next;
struct list_head l;
- down_read(&init_mm.mmap_sem);
+ /*
+ * Write-protect the semaphore, to exclude two contexts
+ * doing a list_replace_init() call in parallel and to
+ * exclude new additions to the deferred_pages list:
+ */
+ down_write(&init_mm.mmap_sem);
list_replace_init(&deferred_pages, &l);
- up_read(&init_mm.mmap_sem);
+ up_write(&init_mm.mmap_sem);
flush_map(&l);
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 56089ccc3949..ea85172fc0cc 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;}
/*
* Update nodes_add and decide if to include add are in the zone.
* Both SPARSE and RESERVE need nodes_add infomation.
- * This code supports one contigious hot add area per node.
+ * This code supports one contiguous hot add area per node.
*/
static int reserve_hotadd(int node, unsigned long start, unsigned long end)
{