summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/dump_pagetables.c24
-rw-r--r--arch/s390/mm/extmem.c14
-rw-r--r--arch/s390/mm/fault.c36
-rw-r--r--arch/s390/mm/gup.c4
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/maccess.c70
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/mmap.c59
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c8
-rw-r--r--arch/s390/mm/vmem.c10
11 files changed, 55 insertions, 181 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cadeda204..8556d6be9b54 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
KERNEL_END_NR,
VMEMMAP_NR,
VMALLOC_NR,
-#ifdef CONFIG_64BIT
MODULES_NR,
-#endif
};
static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
[KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
[VMEMMAP_NR] = {0, "vmemmap Area"},
[VMALLOC_NR] = {0, "vmalloc Area"},
-#ifdef CONFIG_64BIT
[MODULES_NR] = {0, "Modules Area"},
-#endif
{ -1, NULL }
};
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
}
}
-#ifdef CONFIG_64BIT
-#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
-#else
-#define _PMD_PROT_MASK 0
-#endif
-
static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pud_t *pud, unsigned long addr)
{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (pmd_large(*pmd)) {
- prot = pmd_val(*pmd) & _PMD_PROT_MASK;
+ prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
note_page(m, st, prot, 3);
} else
walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
}
}
-#ifdef CONFIG_64BIT
-#define _PUD_PROT_MASK _REGION3_ENTRY_RO
-#else
-#define _PUD_PROT_MASK 0
-#endif
-
static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pgd_t *pgd, unsigned long addr)
{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
- prot = pud_val(*pud) & _PUD_PROT_MASK;
+ prot = pud_val(*pud) & _REGION3_ENTRY_RO;
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
-#ifdef CONFIG_32BIT
- max_addr = 1UL << 31;
-#else
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[MODULES_NR].start_address = MODULES_VADDR;
-#endif
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 519bba716cc3..23c496957c22 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -51,7 +51,6 @@ struct qout64 {
struct qrange range[6];
};
-#ifdef CONFIG_64BIT
struct qrange_old {
unsigned int start; /* last byte type */
unsigned int end; /* last byte reserved */
@@ -65,7 +64,6 @@ struct qout64_old {
int segrcnt;
struct qrange_old range[6];
};
-#endif
struct qin64 {
char qopcode;
@@ -103,7 +101,6 @@ static int scode_set;
static int
dcss_set_subcodes(void)
{
-#ifdef CONFIG_64BIT
char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
unsigned long rx, ry;
int rc;
@@ -135,7 +132,6 @@ dcss_set_subcodes(void)
segext_scode = DCSS_SEGEXTX;
return 0;
}
-#endif
/* Diag x'64' new subcodes are not supported, set to old subcodes */
loadshr_scode = DCSS_LOADNOLY;
loadnsr_scode = DCSS_LOADNSR;
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter,
rx = (unsigned long) parameter;
ry = (unsigned long) *func;
-#ifdef CONFIG_64BIT
/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
if (*func > DCSS_SEGEXT)
asm volatile(
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter,
" ipm %2\n"
" srl %2,28\n"
: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#else
- asm volatile(
- " diag %0,%1,0x64\n"
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-#endif
*ret1 = rx;
*ret2 = ry;
return rc;
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg)
goto out_free;
}
-#ifdef CONFIG_64BIT
/* Only old format of output area of Diagnose x'64' is supported,
copy data for the new format. */
if (segext_scode == DCSS_SEGEXT) {
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg)
}
kfree(qout_old);
}
-#endif
if (qout->segcnt > 6) {
rc = -EOPNOTSUPP;
goto out_free;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3ff86533f7db..76515bcea2f1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -36,15 +36,9 @@
#include <asm/facility.h>
#include "../kernel/entry.h"
-#ifndef CONFIG_64BIT
-#define __FAIL_ADDR_MASK 0x7ffff000
-#define __SUBCODE_MASK 0x0200
-#define __PF_RES_FIELD 0ULL
-#else /* CONFIG_64BIT */
#define __FAIL_ADDR_MASK -4096L
#define __SUBCODE_MASK 0x0600
#define __PF_RES_FIELD 0x8000000000000000ULL
-#endif /* CONFIG_64BIT */
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
@@ -54,7 +48,6 @@
static unsigned long store_indication __read_mostly;
-#ifdef CONFIG_64BIT
static int __init fault_init(void)
{
if (test_facility(75))
@@ -62,7 +55,6 @@ static int __init fault_init(void)
return 0;
}
early_initcall(fault_init);
-#endif
static inline int notify_page_fault(struct pt_regs *regs)
{
@@ -133,7 +125,6 @@ static int bad_address(void *p)
return probe_kernel_address((unsigned long *)p, dummy);
}
-#ifdef CONFIG_64BIT
static void dump_pagetable(unsigned long asce, unsigned long address)
{
unsigned long *table = __va(asce & PAGE_MASK);
@@ -187,33 +178,6 @@ bad:
pr_cont("BAD\n");
}
-#else /* CONFIG_64BIT */
-
-static void dump_pagetable(unsigned long asce, unsigned long address)
-{
- unsigned long *table = __va(asce & PAGE_MASK);
-
- pr_alert("AS:%08lx ", asce);
- table = table + ((address >> 20) & 0x7ff);
- if (bad_address(table))
- goto bad;
- pr_cont("S:%08lx ", *table);
- if (*table & _SEGMENT_ENTRY_INVALID)
- goto out;
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- table = table + ((address >> 12) & 0xff);
- if (bad_address(table))
- goto bad;
- pr_cont("P:%08lx ", *table);
-out:
- pr_cont("\n");
- return;
-bad:
- pr_cont("BAD\n");
-}
-
-#endif /* CONFIG_64BIT */
-
static void dump_fault_info(struct pt_regs *regs)
{
unsigned long asce;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5c586c78ca8d..1eb41bb3010c 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
pmd_t *pmdp, pmd;
pmdp = (pmd_t *) pudp;
-#ifdef CONFIG_64BIT
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pmdp = (pmd_t *) pud_deref(pud);
pmdp += pmd_index(addr);
-#endif
do {
pmd = *pmdp;
barrier();
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
pud_t *pudp, pud;
pudp = (pud_t *) pgdp;
-#ifdef CONFIG_64BIT
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pudp = (pud_t *) pgd_deref(pgd);
pudp += pud_index(addr);
-#endif
do {
pud = *pudp;
barrier();
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d35b15113b17..80875c43a4a4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,6 @@ void __init paging_init(void)
unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
-#ifdef CONFIG_64BIT
if (VMALLOC_END > (1UL << 42)) {
asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION2_ENTRY_EMPTY;
@@ -113,10 +112,6 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
-#else
- asce_bits = _ASCE_TABLE_LENGTH;
- pgd_type = _SEGMENT_ENTRY_EMPTY;
-#endif
S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 2eb34bdfc613..8a993a53fcd6 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -1,7 +1,7 @@
/*
* Access kernel memory without faulting -- s390 specific implementation.
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2015
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
@@ -16,51 +16,55 @@
#include <asm/ctl_reg.h>
#include <asm/io.h>
-/*
- * This function writes to kernel memory bypassing DAT and possible
- * write protection. It copies one to four bytes from src to dst
- * using the stura instruction.
- * Returns the number of bytes copied or -EFAULT.
- */
-static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
+static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
- unsigned long count, aligned;
- int offset, mask;
- int rc = -EFAULT;
+ unsigned long aligned, offset, count;
+ char tmp[8];
- aligned = (unsigned long) dst & ~3UL;
- offset = (unsigned long) dst & 3;
- count = min_t(unsigned long, 4 - offset, size);
- mask = (0xf << (4 - count)) & 0xf;
- mask >>= offset;
+ aligned = (unsigned long) dst & ~7UL;
+ offset = (unsigned long) dst & 7UL;
+ size = min(8UL - offset, size);
+ count = size - 1;
asm volatile(
" bras 1,0f\n"
- " icm 0,0,0(%3)\n"
- "0: l 0,0(%1)\n"
- " lra %1,0(%1)\n"
- "1: ex %2,0(1)\n"
- "2: stura 0,%1\n"
- " la %0,0\n"
- "3:\n"
- EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
- : "+d" (rc), "+a" (aligned)
- : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
- return rc ? rc : count;
+ " mvc 0(1,%4),0(%5)\n"
+ "0: mvc 0(8,%3),0(%0)\n"
+ " ex %1,0(1)\n"
+ " lg %1,0(%3)\n"
+ " lra %0,0(%0)\n"
+ " sturg %1,%0\n"
+ : "+&a" (aligned), "+&a" (count), "=m" (tmp)
+ : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
+ : "cc", "memory", "1");
+ return size;
}
-long probe_kernel_write(void *dst, const void *src, size_t size)
+/*
+ * s390_kernel_write - write to kernel memory bypassing DAT
+ * @dst: destination address
+ * @src: source address
+ * @size: number of bytes to copy
+ *
+ * This function writes to kernel memory bypassing DAT and possible page table
+ * write protection. It writes to the destination using the sturg instruction.
+ * Therefore we have a read-modify-write sequence: the function reads eight
+ * bytes from destination at an eight byte boundary, modifies the bytes
+ * requested and writes the result back in a loop.
+ *
+ * Note: this means that this function may not be called concurrently on
+ * several cpus with overlapping words, since this may potentially
+ * cause data corruption.
+ */
+void notrace s390_kernel_write(void *dst, const void *src, size_t size)
{
- long copied = 0;
+ long copied;
while (size) {
- copied = probe_kernel_write_odd(dst, src, size);
- if (copied < 0)
- break;
+ copied = s390_kernel_write_odd(dst, src, size);
dst += copied;
src += copied;
size -= copied;
}
- return copied < 0 ? -EFAULT : 0;
}
static int __memcpy_real(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 5535cfe0ee11..0f3604395805 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void)
memsize = rzm * rnmax;
if (!rzm)
rzm = 1ULL << 17;
- if (IS_ENABLED(CONFIG_32BIT)) {
- rzm = min(ADDR2G, rzm);
- memsize = min(ADDR2G, memsize);
- }
max_physmem_end = memsize;
addr = 0;
/* keep memblock lists close to the kernel */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 179a2c20b01f..6e552af08c76 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -32,7 +32,7 @@
#include <asm/pgalloc.h>
unsigned long mmap_rnd_mask;
-unsigned long mmap_align_mask;
+static unsigned long mmap_align_mask;
static unsigned long stack_maxrandom_size(void)
{
@@ -60,22 +60,20 @@ static inline int mmap_is_legacy(void)
return sysctl_legacy_va_layout;
}
-static unsigned long mmap_rnd(void)
+unsigned long arch_mmap_rnd(void)
{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
if (is_32bit_task())
return (get_random_int() & 0x7ff) << PAGE_SHIFT;
else
return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
}
-static unsigned long mmap_base_legacy(void)
+static unsigned long mmap_base_legacy(unsigned long rnd)
{
- return TASK_UNMAPPED_BASE + mmap_rnd();
+ return TASK_UNMAPPED_BASE + rnd;
}
-static inline unsigned long mmap_base(void)
+static inline unsigned long mmap_base(unsigned long rnd)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@@ -84,7 +82,7 @@ static inline unsigned long mmap_base(void)
else if (gap > MAX_GAP)
gap = MAX_GAP;
gap &= PAGE_MASK;
- return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
+ return STACK_TOP - stack_maxrandom_size() - rnd - gap;
}
unsigned long
@@ -179,40 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
-unsigned long randomize_et_dyn(void)
-{
- unsigned long base;
-
- base = STACK_TOP / 3 * 2;
- if (!is_32bit_task())
- /* Align to 4GB */
- base &= ~((1UL << 32) - 1);
- return base + mmap_rnd();
-}
-
-#ifndef CONFIG_64BIT
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm)
-{
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base();
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
-
-#else
-
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
{
if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
@@ -273,15 +237,20 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ unsigned long random_factor = 0UL;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
/*
* Fall back to the standard layout if the personality
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_base_legacy();
+ mm->mmap_base = mmap_base_legacy(random_factor);
mm->get_unmapped_area = s390_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
+ mm->mmap_base = mmap_base(random_factor);
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
}
}
@@ -317,5 +286,3 @@ static int __init setup_mmap_rnd(void)
return 0;
}
early_initcall(setup_mmap_rnd);
-
-#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 426c9d462d1c..749c98407b41 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
int i;
- if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) {
+ if (test_facility(13)) {
__ptep_ipte_range(address, nr - 1, pte);
return;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542f2ba2..33f589459113 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -27,14 +27,8 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#ifndef CONFIG_64BIT
-#define ALLOC_ORDER 1
-#define FRAG_MASK 0x0f
-#else
#define ALLOC_ORDER 2
#define FRAG_MASK 0x03
-#endif
-
unsigned long *crst_table_alloc(struct mm_struct *mm)
{
@@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
free_pages((unsigned long) table, ALLOC_ORDER);
}
-#ifdef CONFIG_64BIT
static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
@@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
if (current->active_mm == mm)
set_user_asce(mm);
}
-#endif
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2f751a..ef7d6c8fea66 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
-#ifdef CONFIG_64BIT
pud = vmem_alloc_pages(2);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
return pud;
}
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
-#ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
-#endif
return pmd;
}
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pgd_populate(&init_mm, pg_dir, pu_dir);
}
pu_dir = pud_offset(pg_dir, address);
-#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+#ifndef CONFIG_DEBUG_PAGEALLOC
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
pud_populate(&init_mm, pu_dir, pm_dir);
}
pm_dir = pmd_offset(pu_dir, address);
-#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
+#ifndef CONFIG_DEBUG_PAGEALLOC
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
-#ifdef CONFIG_64BIT
/* Use 1MB frames for vmemmap if available. We always
* use large frames even if they are only partially
* used.
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
-#endif
pt_dir = vmem_pte_alloc(address);
if (!pt_dir)
goto out;