summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-07-18 12:36:53 +0300
committerIngo Molnar <mingo@kernel.org>2017-07-18 12:36:53 +0300
commit1ed7d32763857fbdc8e406352404538e41050d22 (patch)
treef97c5d8f548141bf7cdec0a9d7f22adb61da6efc /arch/x86/mm
parent660da7c9228f685b2ebe664f9fd69aaddcc420b5 (diff)
parent27aac20574110abfd594175a668dc58b23b2b14a (diff)
downloadlinux-1ed7d32763857fbdc8e406352404538e41050d22.tar.xz
Merge branch 'x86/boot' into x86/mm, to pick up interacting changes
The SME patches we are about to apply add some E820 logic, so merge in pending E820 code changes first, to have a single code base. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init_32.c7
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/mm/kasan_init_64.c7
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/pageattr.c6
-rw-r--r--arch/x86/mm/pat.c28
7 files changed, 30 insertions, 38 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index adad702b39cd..2824607df108 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -33,7 +33,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
if (!vma || !is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
- pte = huge_pte_offset(mm, address);
+ pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON(!pte || pte_none(*pte));
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 99fb83819a5f..8a64a6f2848d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -823,15 +823,12 @@ void __init mem_init(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdata = NODE_DATA(nid);
- struct zone *zone = pgdata->node_zones +
- zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- return __add_pages(nid, zone, start_pfn, nr_pages);
+ return __add_pages(nid, start_pfn, nr_pages, want_memblock);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dae6a5e5ad4a..136422d7d539 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -772,22 +772,15 @@ static void update_end_of_memory_vars(u64 start, u64 size)
}
}
-/*
- * Memory is added always to NORMAL zone. This means you will never get
- * additional DMA/DMA32 memory.
- */
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdat = NODE_DATA(nid);
- struct zone *zone = pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
init_memory_mapping(start, start + size);
- ret = __add_pages(nid, zone, start_pfn, nr_pages);
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 88215ac16b24..02c9d7553409 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -23,12 +23,7 @@ static int __init map_range(struct range *range)
start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
- /*
- * end + 1 here is intentional. We check several shadow bytes in advance
- * to slightly speed up fastpath. In some rare cases we could cross
- * boundary of mapped shadow, so we just map some more here.
- */
- return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
+ return vmemmap_populate(start, end, NUMA_NO_NODE);
}
static void __init clear_pgds(unsigned long start,
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 797295e792b2..229d04a83f85 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -92,13 +92,18 @@ unsigned long arch_mmap_rnd(void)
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size)
{
unsigned long gap = rlimit(RLIMIT_STACK);
+ unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
unsigned long gap_min, gap_max;
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+ gap += pad;
+
/*
* Top of mmap area (just below the process stack).
* Leave an at least ~128 MB hole with possible stack randomization.
*/
- gap_min = SIZE_128M + stack_maxrandom_size(task_size);
+ gap_min = SIZE_128M;
gap_max = (task_size / 6) * 5;
if (gap < gap_min)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index c8520b2c62d2..757b0bcdf712 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -150,6 +150,12 @@ void clflush_cache_range(void *vaddr, unsigned int size)
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
+void arch_invalidate_pmem(void *addr, size_t size)
+{
+ clflush_cache_range(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+
static void __cpa_flush_all(void *arg)
{
unsigned long cache = (unsigned long)arg;
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 9b78685b66e6..45979502f64b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -37,14 +37,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
-static bool boot_cpu_done;
-
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
-static void init_cache_modes(void);
+static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __read_mostly pat_initialized;
+static bool __read_mostly init_cm_done;
void pat_disable(const char *reason)
{
- if (!__pat_enabled)
+ if (pat_disabled)
return;
if (boot_cpu_done) {
@@ -52,10 +52,8 @@ void pat_disable(const char *reason)
return;
}
- __pat_enabled = 0;
+ pat_disabled = true;
pr_info("x86/PAT: %s\n", reason);
-
- init_cache_modes();
}
static int __init nopat(char *str)
@@ -67,7 +65,7 @@ early_param("nopat", nopat);
bool pat_enabled(void)
{
- return !!__pat_enabled;
+ return pat_initialized;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@@ -205,6 +203,8 @@ static void __init_cache_modes(u64 pat)
update_cache_mode_entry(i, cache);
}
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
+
+ init_cm_done = true;
}
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -225,6 +225,7 @@ static void pat_bsp_init(u64 pat)
}
wrmsrl(MSR_IA32_CR_PAT, pat);
+ pat_initialized = true;
__init_cache_modes(pat);
}
@@ -242,10 +243,9 @@ static void pat_ap_init(u64 pat)
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-static void init_cache_modes(void)
+void init_cache_modes(void)
{
u64 pat = 0;
- static int init_cm_done;
if (init_cm_done)
return;
@@ -287,8 +287,6 @@ static void init_cache_modes(void)
}
__init_cache_modes(pat);
-
- init_cm_done = 1;
}
/**
@@ -306,10 +304,8 @@ void pat_init(void)
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!pat_enabled()) {
- init_cache_modes();
+ if (pat_disabled)
return;
- }
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||