diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2015-02-12 15:08:27 +0300 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2015-03-25 13:49:33 +0300 |
commit | 5a79859ae0f35d25c67a03e82bf0c80592f16a39 (patch) | |
tree | 37264d49f069812f19ced94e6ae171814fb7e498 /arch/s390/mm | |
parent | 1833c9f647e9bda1cd24653ff8f9c207b5f5b911 (diff) | |
download | linux-5a79859ae0f35d25c67a03e82bf0c80592f16a39.tar.xz |
s390: remove 31 bit support
Remove the 31 bit support in order to reduce maintenance cost and
effectively remove dead code. Since a couple of years there is no
distribution left that comes with a 31 bit kernel.
The 31 bit kernel also has been broken since more than a year before
anybody noticed. In addition I added a removal warning to the kernel
shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning
message") which let everybody know about the plan to remove 31 bit
code. We didn't get any response.
Given that the last 31 bit only machine was introduced in 1999 let's
remove the code.
Anybody with 31 bit user space code can still use the compat mode.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 24 | ||||
-rw-r--r-- | arch/s390/mm/extmem.c | 14 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 36 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 5 | ||||
-rw-r--r-- | arch/s390/mm/mem_detect.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/mmap.c | 25 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 10 |
10 files changed, 5 insertions, 127 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index d46cadeda204..8556d6be9b54 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -18,9 +18,7 @@ enum address_markers_idx { KERNEL_END_NR, VMEMMAP_NR, VMALLOC_NR, -#ifdef CONFIG_64BIT MODULES_NR, -#endif }; static struct addr_marker address_markers[] = { @@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = { [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, [VMEMMAP_NR] = {0, "vmemmap Area"}, [VMALLOC_NR] = {0, "vmalloc Area"}, -#ifdef CONFIG_64BIT [MODULES_NR] = {0, "Modules Area"}, -#endif { -1, NULL } }; @@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, } } -#ifdef CONFIG_64BIT -#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT -#else -#define _PMD_PROT_MASK 0 -#endif - static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t *pud, unsigned long addr) { @@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { if (pmd_large(*pmd)) { - prot = pmd_val(*pmd) & _PMD_PROT_MASK; + prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT; note_page(m, st, prot, 3); } else walk_pte_level(m, st, pmd, addr); @@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, } } -#ifdef CONFIG_64BIT -#define _PUD_PROT_MASK _REGION3_ENTRY_RO -#else -#define _PUD_PROT_MASK 0 -#endif - static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t *pgd, unsigned long addr) { @@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pud = pud_offset(pgd, addr); if (!pud_none(*pud)) if (pud_large(*pud)) { - prot = pud_val(*pud) & _PUD_PROT_MASK; + prot = pud_val(*pud) & _REGION3_ENTRY_RO; note_page(m, st, prot, 2); } else walk_pmd_level(m, st, pud, addr); @@ -230,13 +214,9 @@ static int pt_dump_init(void) * kernel ASCE. We need this to keep the page table walker functions * from accessing non-existent entries. */ -#ifdef CONFIG_32BIT - max_addr = 1UL << 31; -#else max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[MODULES_NR].start_address = MODULES_VADDR; -#endif address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; address_markers[VMALLOC_NR].start_address = VMALLOC_START; debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 519bba716cc3..23c496957c22 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -51,7 +51,6 @@ struct qout64 { struct qrange range[6]; }; -#ifdef CONFIG_64BIT struct qrange_old { unsigned int start; /* last byte type */ unsigned int end; /* last byte reserved */ @@ -65,7 +64,6 @@ struct qout64_old { int segrcnt; struct qrange_old range[6]; }; -#endif struct qin64 { char qopcode; @@ -103,7 +101,6 @@ static int scode_set; static int dcss_set_subcodes(void) { -#ifdef CONFIG_64BIT char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); unsigned long rx, ry; int rc; @@ -135,7 +132,6 @@ dcss_set_subcodes(void) segext_scode = DCSS_SEGEXTX; return 0; } -#endif /* Diag x'64' new subcodes are not supported, set to old subcodes */ loadshr_scode = DCSS_LOADNOLY; loadnsr_scode = DCSS_LOADNSR; @@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter, rx = (unsigned long) parameter; ry = (unsigned long) *func; -#ifdef CONFIG_64BIT /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ if (*func > DCSS_SEGEXT) asm volatile( @@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter, " ipm %2\n" " srl %2,28\n" : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); -#else - asm volatile( - " diag %0,%1,0x64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); -#endif *ret1 = rx; *ret2 = ry; return rc; @@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg) goto out_free; } -#ifdef CONFIG_64BIT /* Only old format of output area of Diagnose x'64' is supported, copy data for the new format. */ if (segext_scode == DCSS_SEGEXT) { @@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg) } kfree(qout_old); } -#endif if (qout->segcnt > 6) { rc = -EOPNOTSUPP; goto out_free; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 3ff86533f7db..76515bcea2f1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -36,15 +36,9 @@ #include <asm/facility.h> #include "../kernel/entry.h" -#ifndef CONFIG_64BIT -#define __FAIL_ADDR_MASK 0x7ffff000 -#define __SUBCODE_MASK 0x0200 -#define __PF_RES_FIELD 0ULL -#else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL -#endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 @@ -54,7 +48,6 @@ static unsigned long store_indication __read_mostly; -#ifdef CONFIG_64BIT static int __init fault_init(void) { if (test_facility(75)) @@ -62,7 +55,6 @@ static int __init fault_init(void) return 0; } early_initcall(fault_init); -#endif static inline int notify_page_fault(struct pt_regs *regs) { @@ -133,7 +125,6 @@ static int bad_address(void *p) return probe_kernel_address((unsigned long *)p, dummy); } -#ifdef CONFIG_64BIT static void dump_pagetable(unsigned long asce, unsigned long address) { unsigned long *table = __va(asce & PAGE_MASK); @@ -187,33 +178,6 @@ bad: pr_cont("BAD\n"); } -#else /* CONFIG_64BIT */ - -static void dump_pagetable(unsigned long asce, unsigned long address) -{ - unsigned long *table = __va(asce & PAGE_MASK); - - pr_alert("AS:%08lx ", asce); - table = table + ((address >> 20) & 0x7ff); - if (bad_address(table)) - goto bad; - pr_cont("S:%08lx ", *table); - if (*table & _SEGMENT_ENTRY_INVALID) - goto out; - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); - table = table + ((address >> 12) & 0xff); - if (bad_address(table)) - goto bad; - pr_cont("P:%08lx ", *table); -out: - pr_cont("\n"); - return; -bad: - pr_cont("BAD\n"); -} - -#endif /* CONFIG_64BIT */ - static void dump_fault_info(struct pt_regs *regs) { unsigned long asce; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 5c586c78ca8d..1eb41bb3010c 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, pmd_t *pmdp, pmd; pmdp = (pmd_t *) pudp; -#ifdef CONFIG_64BIT if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) pmdp = (pmd_t *) pud_deref(pud); pmdp += pmd_index(addr); -#endif do { pmd = *pmdp; barrier(); @@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, pud_t *pudp, pud; pudp = (pud_t *) pgdp; -#ifdef CONFIG_64BIT if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) pudp = (pud_t *) pgd_deref(pgd); pudp += pud_index(addr); -#endif do { pud = *pudp; barrier(); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d35b15113b17..80875c43a4a4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -105,7 +105,6 @@ void __init paging_init(void) unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; -#ifdef CONFIG_64BIT if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; @@ -113,10 +112,6 @@ void __init paging_init(void) asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } -#else - asce_bits = _ASCE_TABLE_LENGTH; - pgd_type = _SEGMENT_ENTRY_EMPTY; -#endif S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c index 5535cfe0ee11..0f3604395805 100644 --- a/arch/s390/mm/mem_detect.c +++ b/arch/s390/mm/mem_detect.c @@ -36,10 +36,6 @@ void __init detect_memory_memblock(void) memsize = rzm * rnmax; if (!rzm) rzm = 1ULL << 17; - if (IS_ENABLED(CONFIG_32BIT)) { - rzm = min(ADDR2G, rzm); - memsize = min(ADDR2G, memsize); - } max_physmem_end = memsize; addr = 0; /* keep memblock lists close to the kernel */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 179a2c20b01f..2e8378796e87 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -190,29 +190,6 @@ unsigned long randomize_et_dyn(void) return base + mmap_rnd(); } -#ifndef CONFIG_64BIT - -/* - * This function, called very early during the creation of a new - * process VM image, sets up which VM layout function to use: - */ -void arch_pick_mmap_layout(struct mm_struct *mm) -{ - /* - * Fall back to the standard layout if the personality - * bit is set, or if the expected stack growth is unlimited: - */ - if (mmap_is_legacy()) { - mm->mmap_base = mmap_base_legacy(); - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } -} - -#else - int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) @@ -317,5 +294,3 @@ static int __init setup_mmap_rnd(void) return 0; } early_initcall(setup_mmap_rnd); - -#endif diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 426c9d462d1c..749c98407b41 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr) { int i; - if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { + if (test_facility(13)) { __ptep_ipte_range(address, nr - 1, pte); return; } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b2c1542f2ba2..33f589459113 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -27,14 +27,8 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -#ifndef CONFIG_64BIT -#define ALLOC_ORDER 1 -#define FRAG_MASK 0x0f -#else #define ALLOC_ORDER 2 #define FRAG_MASK 0x03 -#endif - unsigned long *crst_table_alloc(struct mm_struct *mm) { @@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) free_pages((unsigned long) table, ALLOC_ORDER); } -#ifdef CONFIG_64BIT static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; @@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) if (current->active_mm == mm) set_user_asce(mm); } -#endif #ifdef CONFIG_PGSTE diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index b1593c2f751a..ef7d6c8fea66 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; -#ifdef CONFIG_64BIT pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); -#endif return pud; } @@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; -#ifdef CONFIG_64BIT pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); -#endif return pmd; } @@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pgd_populate(&init_mm, pg_dir, pu_dir); } pu_dir = pud_offset(pg_dir, address); -#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) +#ifndef CONFIG_DEBUG_PAGEALLOC if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { pud_val(*pu_dir) = __pa(address) | @@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pud_populate(&init_mm, pu_dir, pm_dir); } pm_dir = pmd_offset(pu_dir, address); -#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) +#ifndef CONFIG_DEBUG_PAGEALLOC if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { pmd_val(*pm_dir) = __pa(address) | @@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { -#ifdef CONFIG_64BIT /* Use 1MB frames for vmemmap if available. We always * use large frames even if they are only partially * used. @@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) address = (address + PMD_SIZE) & PMD_MASK; continue; } -#endif pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; |