summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/pgtable.h23
-rw-r--r--arch/s390/include/asm/sparsemem.h4
-rw-r--r--arch/s390/kernel/setup.c67
-rw-r--r--arch/s390/mm/init.c16
-rw-r--r--arch/s390/mm/pgtable.c11
5 files changed, 68 insertions, 53 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4f289ff0b7fe..011358c1b18e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -128,28 +128,11 @@ static inline int is_zero_pfn(unsigned long pfn)
* effect, this also makes sure that 64 bit module code cannot be used
* as system call address.
*/
-
extern unsigned long VMALLOC_START;
+extern unsigned long VMALLOC_END;
+extern struct page *vmemmap;
-#ifndef __s390x__
-#define VMALLOC_SIZE (96UL << 20)
-#define VMALLOC_END 0x7e000000UL
-#define VMEM_MAP_END 0x80000000UL
-#else /* __s390x__ */
-#define VMALLOC_SIZE (128UL << 30)
-#define VMALLOC_END 0x3e000000000UL
-#define VMEM_MAP_END 0x40000000000UL
-#endif /* __s390x__ */
-
-/*
- * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
- * mapping. This needs to be calculated at compile time since the size of the
- * VMEM_MAP is static but the size of struct page can change.
- */
-#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
-#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
-#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
-#define vmemmap ((struct page *) VMALLOC_END)
+#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
/*
* A 31 bit pagetable entry of S390 has following format:
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 545d219e6a2d..0fb34027d3f6 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,8 +4,8 @@
#ifdef CONFIG_64BIT
#define SECTION_SIZE_BITS 28
-#define MAX_PHYSADDR_BITS 42
-#define MAX_PHYSMEM_BITS 42
+#define MAX_PHYSADDR_BITS 46
+#define MAX_PHYSMEM_BITS 46
#else
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 26b601c2b137..66903eed36e6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -94,6 +94,15 @@ struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
int __initdata memory_end_set;
unsigned long __initdata memory_end;
+unsigned long VMALLOC_START;
+EXPORT_SYMBOL(VMALLOC_START);
+
+unsigned long VMALLOC_END;
+EXPORT_SYMBOL(VMALLOC_END);
+
+struct page *vmemmap;
+EXPORT_SYMBOL(vmemmap);
+
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
@@ -277,6 +286,15 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
+static int __init parse_vmalloc(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+ VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
+ return 0;
+}
+early_param("vmalloc", parse_vmalloc);
+
unsigned int user_mode = HOME_SPACE_MODE;
EXPORT_SYMBOL_GPL(user_mode);
@@ -478,8 +496,7 @@ EXPORT_SYMBOL_GPL(real_memory_size);
static void __init setup_memory_end(void)
{
- unsigned long memory_size;
- unsigned long max_mem;
+ unsigned long vmax, vmalloc_size, tmp;
int i;
@@ -489,12 +506,9 @@ static void __init setup_memory_end(void)
memory_end_set = 1;
}
#endif
- memory_size = 0;
+ real_memory_size = 0;
memory_end &= PAGE_MASK;
- max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
- memory_end = min(max_mem, memory_end);
-
/*
* Make sure all chunks are MAX_ORDER aligned so we don't need the
* extra checks that HOLES_IN_ZONE would require.
@@ -514,23 +528,48 @@ static void __init setup_memory_end(void)
chunk->addr = start;
chunk->size = end - start;
}
+ real_memory_size = max(real_memory_size,
+ chunk->addr + chunk->size);
}
+ /* Choose kernel address space layout: 2, 3, or 4 levels. */
+#ifdef CONFIG_64BIT
+ vmalloc_size = VMALLOC_END ?: 128UL << 30;
+ tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
+ tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
+ if (tmp <= (1UL << 42))
+ vmax = 1UL << 42; /* 3-level kernel page table */
+ else
+ vmax = 1UL << 53; /* 4-level kernel page table */
+#else
+ vmalloc_size = VMALLOC_END ?: 96UL << 20;
+ vmax = 1UL << 31; /* 2-level kernel page table */
+#endif
+ /* vmalloc area is at the end of the kernel address space. */
+ VMALLOC_END = vmax;
+ VMALLOC_START = vmax - vmalloc_size;
+
+ /* Split remaining virtual space between 1:1 mapping & vmemmap array */
+ tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+ tmp = VMALLOC_START - tmp * sizeof(struct page);
+ tmp &= ~((vmax >> 11) - 1); /* align to page table level */
+ tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
+ vmemmap = (struct page *) tmp;
+
+ /* Take care that memory_end is set and <= vmemmap */
+ memory_end = min(memory_end ?: real_memory_size, tmp);
+
+ /* Fixup memory chunk array to fit into 0..memory_end */
for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i];
- real_memory_size = max(real_memory_size,
- chunk->addr + chunk->size);
- if (chunk->addr >= max_mem) {
+ if (chunk->addr >= memory_end) {
memset(chunk, 0, sizeof(*chunk));
continue;
}
- if (chunk->addr + chunk->size > max_mem)
- chunk->size = max_mem - chunk->addr;
- memory_size = max(memory_size, chunk->addr + chunk->size);
+ if (chunk->addr + chunk->size > memory_end)
+ chunk->size = memory_end - chunk->addr;
}
- if (!memory_end)
- memory_end = memory_size;
}
void *restart_stack __attribute__((__section__(".data")));
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d4b9fb4d0042..5d633019d8f3 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -93,18 +93,22 @@ static unsigned long setup_zero_pages(void)
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long pgd_type;
+ unsigned long pgd_type, asce_bits;
init_mm.pgd = swapper_pg_dir;
- S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT
- /* A three level page table (4TB) is enough for the kernel space. */
- S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
- pgd_type = _REGION3_ENTRY_EMPTY;
+ if (VMALLOC_END > (1UL << 42)) {
+ asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION2_ENTRY_EMPTY;
+ } else {
+ asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ pgd_type = _REGION3_ENTRY_EMPTY;
+ }
#else
- S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
+ asce_bits = _ASCE_TABLE_LENGTH;
pgd_type = _SEGMENT_ENTRY_EMPTY;
#endif
+ S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index dc2269f1821c..9a4d02f64f16 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -33,17 +33,6 @@
#define FRAG_MASK 0x03
#endif
-unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
-EXPORT_SYMBOL(VMALLOC_START);
-
-static int __init parse_vmalloc(char *arg)
-{
- if (!arg)
- return -EINVAL;
- VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
- return 0;
-}
-early_param("vmalloc", parse_vmalloc);
unsigned long *crst_table_alloc(struct mm_struct *mm)
{