diff options
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 3 | ||||
-rw-r--r-- | arch/s390/mm/hugetlbpage.c | 134 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 23 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 55 |
5 files changed, 186 insertions, 31 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index 66401930f83e..fb988a48a754 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -4,4 +4,4 @@ obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o obj-$(CONFIG_CMM) += cmm.o - +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 2650f46001d0..4d537205e83c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -28,6 +28,7 @@ #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/uaccess.h> +#include <linux/hugetlb.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/s390_ext.h> @@ -367,6 +368,8 @@ good_area: } survive: + if (is_vm_hugetlb_page(vma)) + address &= HPAGE_MASK; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c new file mode 100644 index 000000000000..f4b6124fdb75 --- /dev/null +++ b/arch/s390/mm/hugetlbpage.c @@ -0,0 +1,134 @@ +/* + * IBM System z Huge TLB Page Support for Kernel. + * + * Copyright 2007 IBM Corp. + * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> + */ + +#include <linux/mm.h> +#include <linux/hugetlb.h> + + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *pteptr, pte_t pteval) +{ + pmd_t *pmdp = (pmd_t *) pteptr; + pte_t shadow_pteval = pteval; + unsigned long mask; + + if (!MACHINE_HAS_HPAGE) { + pteptr = (pte_t *) pte_page(pteval)[1].index; + mask = pte_val(pteval) & + (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); + pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; + if (mm->context.noexec) { + pteptr += PTRS_PER_PTE; + pte_val(shadow_pteval) = + (_SEGMENT_ENTRY + __pa(pteptr)) | mask; + } + } + + pmd_val(*pmdp) = pte_val(pteval); + if (mm->context.noexec) { + pmdp = get_shadow_table(pmdp); + pmd_val(*pmdp) = pte_val(shadow_pteval); + } +} + +int arch_prepare_hugepage(struct page *page) +{ + unsigned long addr = page_to_phys(page); + pte_t pte; + pte_t *ptep; + int i; + + if (MACHINE_HAS_HPAGE) + return 0; + + ptep = (pte_t *) pte_alloc_one(&init_mm, address); + if (!ptep) + return -ENOMEM; + + pte = mk_pte(page, PAGE_RW); + for (i = 0; i < PTRS_PER_PTE; i++) { + set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte); + pte_val(pte) += PAGE_SIZE; + } + page[1].index = (unsigned long) ptep; + return 0; +} + +void arch_release_hugepage(struct page *page) +{ + pte_t *ptep; + + if (MACHINE_HAS_HPAGE) + return; + + ptep = (pte_t *) page[1].index; + if (!ptep) + return; + pte_free(&init_mm, ptep); + page[1].index = 0; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp = NULL; + + pgdp = pgd_offset(mm, addr); + pudp = pud_alloc(mm, pgdp, addr); + if (pudp) + pmdp = pmd_alloc(mm, pudp, addr); + return (pte_t *) pmdp; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp = NULL; + + pgdp = pgd_offset(mm, addr); + if (pgd_present(*pgdp)) { + pudp = pud_offset(pgdp, addr); + if (pud_present(*pudp)) + pmdp = pmd_offset(pudp, addr); + } + return (pte_t *) pmdp; +} + +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + +struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, + int write) +{ + return ERR_PTR(-EINVAL); +} + +int pmd_huge(pmd_t pmd) +{ + if (!MACHINE_HAS_HPAGE) + return 0; + + return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); +} + +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmdp, int write) +{ + struct page *page; + + if (!MACHINE_HAS_HPAGE) + return NULL; + + page = pmd_page(*pmdp); + if (page) + page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); + return page; +} diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 202c952a29b4..acc92f46a096 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -77,28 +77,6 @@ void show_mem(void) printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE)); } -static void __init setup_ro_region(void) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t new_pte; - unsigned long address, end; - - address = ((unsigned long)&_stext) & PAGE_MASK; - end = PFN_ALIGN((unsigned long)&_eshared); - - for (; address < end; address += PAGE_SIZE) { - pgd = pgd_offset_k(address); - pud = pud_offset(pgd, address); - pmd = pmd_offset(pud, address); - pte = pte_offset_kernel(pmd, address); - new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); - *pte = new_pte; - } -} - /* * paging_init() sets up the page tables */ @@ -121,7 +99,6 @@ void __init paging_init(void) clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); - setup_ro_region(); /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 3ffc0211dc85..97bce6c97574 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -10,10 +10,12 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/list.h> +#include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/tlbflush.h> +#include <asm/sections.h> static DEFINE_MUTEX(vmem_mutex); @@ -113,7 +115,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) /* * Add a physical memory range to the 1:1 mapping. */ -static int vmem_add_range(unsigned long start, unsigned long size) +static int vmem_add_range(unsigned long start, unsigned long size, int ro) { unsigned long address; pgd_t *pg_dir; @@ -140,7 +142,19 @@ static int vmem_add_range(unsigned long start, unsigned long size) pud_populate_kernel(&init_mm, pu_dir, pm_dir); } + pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); pm_dir = pmd_offset(pu_dir, address); + +#ifdef __s390x__ + if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && + (address + HPAGE_SIZE <= start + size) && + (address >= HPAGE_SIZE)) { + pte_val(pte) |= _SEGMENT_ENTRY_LARGE; + pmd_val(*pm_dir) = pte_val(pte); + address += HPAGE_SIZE - PAGE_SIZE; + continue; + } +#endif if (pmd_none(*pm_dir)) { pt_dir = vmem_pte_alloc(); if (!pt_dir) @@ -149,7 +163,6 @@ static int vmem_add_range(unsigned long start, unsigned long size) } pt_dir = pte_offset_kernel(pm_dir, address); - pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); *pt_dir = pte; } ret = 0; @@ -180,6 +193,13 @@ static void vmem_remove_range(unsigned long start, unsigned long size) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) continue; + + if (pmd_huge(*pm_dir)) { + pmd_clear_kernel(pm_dir); + address += HPAGE_SIZE - PAGE_SIZE; + continue; + } + pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; } @@ -248,14 +268,14 @@ out: return ret; } -static int vmem_add_mem(unsigned long start, unsigned long size) +static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { int ret; ret = vmem_add_mem_map(start, size); if (ret) return ret; - return vmem_add_range(start, size); + return vmem_add_range(start, size, ro); } /* @@ -338,7 +358,7 @@ int add_shared_memory(unsigned long start, unsigned long size) if (ret) goto out_free; - ret = vmem_add_mem(start, size); + ret = vmem_add_mem(start, size, 0); if (ret) goto out_remove; @@ -374,14 +394,35 @@ out: */ void __init vmem_map_init(void) { + unsigned long ro_start, ro_end; + unsigned long start, end; int i; INIT_LIST_HEAD(&init_mm.context.crst_list); INIT_LIST_HEAD(&init_mm.context.pgtable_list); init_mm.context.noexec = 0; NODE_DATA(0)->node_mem_map = VMEM_MAP; - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) - vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); + ro_start = ((unsigned long)&_stext) & PAGE_MASK; + ro_end = PFN_ALIGN((unsigned long)&_eshared); + for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { + start = memory_chunk[i].addr; + end = memory_chunk[i].addr + memory_chunk[i].size; + if (start >= ro_end || end <= ro_start) + vmem_add_mem(start, end - start, 0); + else if (start >= ro_start && end <= ro_end) + vmem_add_mem(start, end - start, 1); + else if (start >= ro_start) { + vmem_add_mem(start, ro_end - start, 1); + vmem_add_mem(ro_end, end - ro_end, 0); + } else if (end < ro_end) { + vmem_add_mem(start, ro_start - start, 0); + vmem_add_mem(ro_start, end - ro_start, 1); + } else { + vmem_add_mem(start, ro_start - start, 0); + vmem_add_mem(ro_start, ro_end - ro_start, 1); + vmem_add_mem(ro_end, end - ro_end, 0); + } + } } /* |