diff options
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 226 | ||||
-rw-r--r-- | arch/s390/mm/extable.c | 81 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/gup.c | 48 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 40 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 114 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 47 |
9 files changed, 526 insertions, 45 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index d98fe9004a52..1bea6d1f55ab 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -3,7 +3,8 @@ # obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ - page-states.o gup.o + page-states.o gup.o extable.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o +obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c new file mode 100644 index 000000000000..cbc6668acb85 --- /dev/null +++ b/arch/s390/mm/dump_pagetables.c @@ -0,0 +1,226 @@ +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/mm.h> +#include <asm/sections.h> +#include <asm/pgtable.h> + +static unsigned long max_addr; + +struct addr_marker { + unsigned long start_address; + const char *name; +}; + +enum address_markers_idx { + IDENTITY_NR = 0, + KERNEL_START_NR, + KERNEL_END_NR, + VMEMMAP_NR, + VMALLOC_NR, +#ifdef CONFIG_64BIT + MODULES_NR, +#endif +}; + +static struct addr_marker address_markers[] = { + [IDENTITY_NR] = {0, "Identity Mapping"}, + [KERNEL_START_NR] = {(unsigned long)&_stext, "Kernel Image Start"}, + [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, + [VMEMMAP_NR] = {0, "vmemmap Area"}, + [VMALLOC_NR] = {0, "vmalloc Area"}, +#ifdef CONFIG_64BIT + [MODULES_NR] = {0, "Modules Area"}, +#endif + { -1, NULL } +}; + +struct pg_state { + int level; + unsigned int current_prot; + unsigned long start_address; + unsigned long current_address; + const struct addr_marker *marker; +}; + +static void print_prot(struct seq_file *m, unsigned int pr, int level) +{ + static const char * const level_name[] = + { "ASCE", "PGD", "PUD", "PMD", "PTE" }; + + seq_printf(m, "%s ", level_name[level]); + if (pr & _PAGE_INVALID) + seq_printf(m, "I\n"); + else + seq_printf(m, "%s\n", pr & _PAGE_RO ? "RO" : "RW"); +} + +static void note_page(struct seq_file *m, struct pg_state *st, + unsigned int new_prot, int level) +{ + static const char units[] = "KMGTPE"; + int width = sizeof(unsigned long) * 2; + const char *unit = units; + unsigned int prot, cur; + unsigned long delta; + + /* + * If we have a "break" in the series, we need to flush the state + * that we have now. "break" is either changing perms, levels or + * address space marker. + */ + prot = new_prot; + cur = st->current_prot; + + if (!st->level) { + /* First entry */ + st->current_prot = new_prot; + st->level = level; + st->marker = address_markers; + seq_printf(m, "---[ %s ]---\n", st->marker->name); + } else if (prot != cur || level != st->level || + st->current_address >= st->marker[1].start_address) { + /* Print the actual finished series */ + seq_printf(m, "0x%0*lx-0x%0*lx", + width, st->start_address, + width, st->current_address); + delta = (st->current_address - st->start_address) >> 10; + while (!(delta & 0x3ff) && unit[1]) { + delta >>= 10; + unit++; + } + seq_printf(m, "%9lu%c ", delta, *unit); + print_prot(m, st->current_prot, st->level); + if (st->current_address >= st->marker[1].start_address) { + st->marker++; + seq_printf(m, "---[ %s ]---\n", st->marker->name); + } + st->start_address = st->current_address; + st->current_prot = new_prot; + st->level = level; + } +} + +/* + * The actual page table walker functions. In order to keep the implementation + * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO + * flags to note_page() if a region, segment or page table entry is invalid or + * read-only. + * After all it's just a hint that the current level being walked contains an + * invalid or read-only entry. + */ +static void walk_pte_level(struct seq_file *m, struct pg_state *st, + pmd_t *pmd, unsigned long addr) +{ + unsigned int prot; + pte_t *pte; + int i; + + for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { + st->current_address = addr; + pte = pte_offset_kernel(pmd, addr); + prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); + note_page(m, st, prot, 4); + addr += PAGE_SIZE; + } +} + +static void walk_pmd_level(struct seq_file *m, struct pg_state *st, + pud_t *pud, unsigned long addr) +{ + unsigned int prot; + pmd_t *pmd; + int i; + + for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { + st->current_address = addr; + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) { + if (pmd_large(*pmd)) { + prot = pmd_val(*pmd) & _SEGMENT_ENTRY_RO; + note_page(m, st, prot, 3); + } else + walk_pte_level(m, st, pmd, addr); + } else + note_page(m, st, _PAGE_INVALID, 3); + addr += PMD_SIZE; + } +} + +static void walk_pud_level(struct seq_file *m, struct pg_state *st, + pgd_t *pgd, unsigned long addr) +{ + pud_t *pud; + int i; + + for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { + st->current_address = addr; + pud = pud_offset(pgd, addr); + if (!pud_none(*pud)) + walk_pmd_level(m, st, pud, addr); + else + note_page(m, st, _PAGE_INVALID, 2); + addr += PUD_SIZE; + } +} + +static void walk_pgd_level(struct seq_file *m) +{ + unsigned long addr = 0; + struct pg_state st; + pgd_t *pgd; + int i; + + memset(&st, 0, sizeof(st)); + for (i = 0; i < PTRS_PER_PGD && addr < max_addr; i++) { + st.current_address = addr; + pgd = pgd_offset_k(addr); + if (!pgd_none(*pgd)) + walk_pud_level(m, &st, pgd, addr); + else + note_page(m, &st, _PAGE_INVALID, 1); + addr += PGDIR_SIZE; + } + /* Flush out the last page */ + st.current_address = max_addr; + note_page(m, &st, 0, 0); +} + +static int ptdump_show(struct seq_file *m, void *v) +{ + walk_pgd_level(m); + return 0; +} + +static int ptdump_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show, NULL); +} + +static const struct file_operations ptdump_fops = { + .open = ptdump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int pt_dump_init(void) +{ + /* + * Figure out the maximum virtual address being accessible with the + * kernel ASCE. We need this to keep the page table walker functions + * from accessing non-existent entries. + */ +#ifdef CONFIG_32BIT + max_addr = 1UL << 31; +#else + max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; + max_addr = 1UL << (max_addr * 11 + 31); + address_markers[MODULES_NR].start_address = MODULES_VADDR; +#endif + address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; + address_markers[VMALLOC_NR].start_address = VMALLOC_START; + debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); + return 0; +} +device_initcall(pt_dump_init); diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c new file mode 100644 index 000000000000..4d1ee88864e8 --- /dev/null +++ b/arch/s390/mm/extable.c @@ -0,0 +1,81 @@ +#include <linux/module.h> +#include <linux/sort.h> +#include <asm/uaccess.h> + +/* + * Search one exception table for an entry corresponding to the + * given instruction address, and return the address of the entry, + * or NULL if none is found. + * We use a binary search, and thus we assume that the table is + * already sorted. + */ +const struct exception_table_entry * +search_extable(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value) +{ + const struct exception_table_entry *mid; + unsigned long addr; + + while (first <= last) { + mid = ((last - first) >> 1) + first; + addr = extable_insn(mid); + if (addr < value) + first = mid + 1; + else if (addr > value) + last = mid - 1; + else + return mid; + } + return NULL; +} + +/* + * The exception table needs to be sorted so that the binary + * search that we use to find entries in it works properly. + * This is used both for the kernel exception table and for + * the exception tables of modules that get loaded. + * + */ +static int cmp_ex(const void *a, const void *b) +{ + const struct exception_table_entry *x = a, *y = b; + + /* This compare is only valid after normalization. */ + return x->insn - y->insn; +} + +void sort_extable(struct exception_table_entry *start, + struct exception_table_entry *finish) +{ + struct exception_table_entry *p; + int i; + + /* Normalize entries to being relative to the start of the section */ + for (p = start, i = 0; p < finish; p++, i += 8) + p->insn += i; + sort(start, finish - start, sizeof(*start), cmp_ex, NULL); + /* Denormalize all entries */ + for (p = start, i = 0; p < finish; p++, i += 8) + p->insn -= i; +} + +#ifdef CONFIG_MODULES +/* + * If the exception table is sorted, any referring to the module init + * will be at the beginning or the end. + */ +void trim_init_extable(struct module *m) +{ + /* Trim the beginning */ + while (m->num_exentries && + within_module_init(extable_insn(&m->extable[0]), m)) { + m->extable++; + m->num_exentries--; + } + /* Trim the end */ + while (m->num_exentries && + within_module_init(extable_insn(&m->extable[m->num_exentries-1]), m)) + m->num_exentries--; +} +#endif /* CONFIG_MODULES */ diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 6c013f544146..04ad4001a289 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -111,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code) if (trans_exc_code == 2) /* Access via secondary space, set_fs setting decides */ return current->thread.mm_segment.ar4; - if (addressing_mode == HOME_SPACE_MODE) + if (s390_user_mode == HOME_SPACE_MODE) /* User space if the access has been done via home space. */ return trans_exc_code == 3; /* @@ -163,7 +163,7 @@ static noinline void do_no_context(struct pt_regs *regs) /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { - regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; return; } @@ -367,6 +367,7 @@ retry: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + flags |= FAULT_FLAG_TRIED; down_read(&mm->mmap_sem); goto retry; } @@ -628,9 +629,8 @@ static int __cpuinit pfault_cpu_notify(struct notifier_block *self, struct thread_struct *thread, *next; struct task_struct *tsk; - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_DEAD: - case CPU_DEAD_FROZEN: spin_lock_irq(&pfault_lock); list_for_each_entry_safe(thread, next, &pfault_list, list) { thread->pfault_wait = 0; diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 65cb06e2af4e..60acb93a4680 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -115,7 +115,16 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, pmd = *pmdp; barrier(); next = pmd_addr_end(addr, end); - if (pmd_none(pmd)) + /* + * The pmd_trans_splitting() check below explains why + * pmdp_splitting_flush() has to serialize with + * smp_call_function() against our disabled IRQs, to stop + * this gup-fast code from running while we set the + * splitting bit in the pmd. Returning zero will take + * the slow path that will call wait_split_huge_page() + * if the pmd is still in splitting state. + */ + if (pmd_none(pmd) || pmd_trans_splitting(pmd)) return 0; if (unlikely(pmd_huge(pmd))) { if (!gup_huge_pmd(pmdp, pmd, addr, next, @@ -154,6 +163,43 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, return 1; } +/* + * Like get_user_pages_fast() except its IRQ-safe in that it won't fall + * back to the regular GUP. + */ +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + unsigned long addr, len, end; + unsigned long next, flags; + pgd_t *pgdp, pgd; + int nr = 0; + + start &= PAGE_MASK; + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; + if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + + local_irq_save(flags); + pgdp = pgd_offset(mm, addr); + do { + pgd = *pgdp; + barrier(); + next = pgd_addr_end(addr, end); + if (pgd_none(pgd)) + break; + if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) + break; + } while (pgdp++, addr = next, addr != end); + local_irq_restore(flags); + + return nr; +} + /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 6adbc082618a..81e596c65dee 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -42,7 +42,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL(empty_zero_page); -static unsigned long setup_zero_pages(void) +static unsigned long __init setup_zero_pages(void) { struct cpuid cpu_id; unsigned int order; @@ -212,7 +212,7 @@ void free_initmem(void) } #ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) +void __init free_initrd_mem(unsigned long start, unsigned long end) { free_init_pages("initrd memory", start, end); } diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index b36537a5f43e..00be01c4b4f3 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -8,25 +8,38 @@ #include <asm/cacheflush.h> #include <asm/pgtable.h> +static pte_t *walk_page_table(unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + pgdp = pgd_offset_k(addr); + if (pgd_none(*pgdp)) + return NULL; + pudp = pud_offset(pgdp, addr); + if (pud_none(*pudp)) + return NULL; + pmdp = pmd_offset(pudp, addr); + if (pmd_none(*pmdp) || pmd_large(*pmdp)) + return NULL; + ptep = pte_offset_kernel(pmdp, addr); + if (pte_none(*ptep)) + return NULL; + return ptep; +} + static void change_page_attr(unsigned long addr, int numpages, pte_t (*set) (pte_t)) { pte_t *ptep, pte; - pmd_t *pmdp; - pud_t *pudp; - pgd_t *pgdp; int i; for (i = 0; i < numpages; i++) { - pgdp = pgd_offset(&init_mm, addr); - pudp = pud_offset(pgdp, addr); - pmdp = pmd_offset(pudp, addr); - if (pmd_huge(*pmdp)) { - WARN_ON_ONCE(1); - continue; - } - ptep = pte_offset_kernel(pmdp, addr); - + ptep = walk_page_table(addr); + if (WARN_ON_ONCE(!ptep)) + break; pte = *ptep; pte = set(pte); __ptep_ipte(addr, ptep); @@ -40,21 +53,18 @@ int set_memory_ro(unsigned long addr, int numpages) change_page_attr(addr, numpages, pte_wrprotect); return 0; } -EXPORT_SYMBOL_GPL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_mkwrite); return 0; } -EXPORT_SYMBOL_GPL(set_memory_rw); /* not possible */ int set_memory_nx(unsigned long addr, int numpages) { return 0; } -EXPORT_SYMBOL_GPL(set_memory_nx); int set_memory_x(unsigned long addr, int numpages) { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 18df31d1f2c9..c8188a18af05 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -609,8 +609,8 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) */ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) { - struct page *page; - unsigned long *table; + unsigned long *uninitialized_var(table); + struct page *uninitialized_var(page); unsigned int mask, bit; if (mm_has_pgste(mm)) @@ -787,6 +787,30 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) tlb_table_flush(tlb); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void thp_split_vma(struct vm_area_struct *vma) +{ + unsigned long addr; + struct page *page; + + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + page = follow_page(vma, addr, FOLL_SPLIT); + } +} + +void thp_split_mm(struct mm_struct *mm) +{ + struct vm_area_struct *vma = mm->mmap; + + while (vma != NULL) { + thp_split_vma(vma); + vma->vm_flags &= ~VM_HUGEPAGE; + vma->vm_flags |= VM_NOHUGEPAGE; + vma = vma->vm_next; + } +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * switch on pgstes for its userspace process (for kvm) */ @@ -796,7 +820,7 @@ int s390_enable_sie(void) struct mm_struct *mm, *old_mm; /* Do we have switched amode? If no, we cannot do sie */ - if (addressing_mode == HOME_SPACE_MODE) + if (s390_user_mode == HOME_SPACE_MODE) return -EINVAL; /* Do we have pgstes? if yes, we are done */ @@ -824,6 +848,12 @@ int s390_enable_sie(void) if (!mm) return -ENOMEM; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* split thp mappings and disable thp for future mappings */ + thp_split_mm(mm); + mm->def_flags |= VM_NOHUGEPAGE; +#endif + /* Now lets check again if something happened */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || @@ -866,3 +896,81 @@ bool kernel_page_present(struct page *page) return cc == 0; } #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + /* No need to flush TLB + * On s390 reference bits are in storage key and never in TLB */ + return pmdp_test_and_clear_young(vma, address, pmdp); +} + +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (pmd_same(*pmdp, entry)) + return 0; + pmdp_invalidate(vma, address, pmdp); + set_pmd_at(vma->vm_mm, address, pmdp, entry); + return 1; +} + +static void pmdp_splitting_flush_sync(void *arg) +{ + /* Simply deliver the interrupt */ +} + +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT, + (unsigned long *) pmdp)) { + /* need to serialize against gup-fast (IRQ disabled) */ + smp_call_function(pmdp_splitting_flush_sync, NULL, 1); + } +} + +void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable) +{ + struct list_head *lh = (struct list_head *) pgtable; + + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + if (!mm->pmd_huge_pte) + INIT_LIST_HEAD(lh); + else + list_add(lh, (struct list_head *) mm->pmd_huge_pte); + mm->pmd_huge_pte = pgtable; +} + +pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm) +{ + struct list_head *lh; + pgtable_t pgtable; + pte_t *ptep; + + assert_spin_locked(&mm->page_table_lock); + + /* FIFO */ + pgtable = mm->pmd_huge_pte; + lh = (struct list_head *) pgtable; + if (list_empty(lh)) + mm->pmd_huge_pte = NULL; + else { + mm->pmd_huge_pte = (pgtable_t) lh->next; + list_del(lh); + } + ptep = (pte_t *) pgtable; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + ptep++; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + return pgtable; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 6f896e75ab49..387c7c60b5b8 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -79,7 +79,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) */ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { - unsigned long address; + unsigned long end = start + size; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; @@ -87,7 +88,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pte_t pte; int ret = -ENOMEM; - for (address = start; address < start + size; address += PAGE_SIZE) { + while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -107,13 +108,12 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); pm_dir = pmd_offset(pu_dir, address); -#ifdef CONFIG_64BIT - if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && - (address + HPAGE_SIZE <= start + size) && - (address >= HPAGE_SIZE)) { +#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) + if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && + !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { pte_val(pte) |= _SEGMENT_ENTRY_LARGE; pmd_val(*pm_dir) = pte_val(pte); - address += HPAGE_SIZE - PAGE_SIZE; + address += PMD_SIZE; continue; } #endif @@ -126,10 +126,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; + address += PAGE_SIZE; } ret = 0; out: - flush_tlb_kernel_range(start, start + size); + flush_tlb_kernel_range(start, end); return ret; } @@ -139,7 +140,8 @@ out: */ static void vmem_remove_range(unsigned long start, unsigned long size) { - unsigned long address; + unsigned long end = start + size; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; @@ -147,25 +149,32 @@ static void vmem_remove_range(unsigned long start, unsigned long size) pte_t pte; pte_val(pte) = _PAGE_TYPE_EMPTY; - for (address = start; address < start + size; address += PAGE_SIZE) { + while (address < end) { pg_dir = pgd_offset_k(address); + if (pgd_none(*pg_dir)) { + address += PGDIR_SIZE; + continue; + } pu_dir = pud_offset(pg_dir, address); - if (pud_none(*pu_dir)) + if (pud_none(*pu_dir)) { + address += PUD_SIZE; continue; + } pm_dir = pmd_offset(pu_dir, address); - if (pmd_none(*pm_dir)) + if (pmd_none(*pm_dir)) { + address += PMD_SIZE; continue; - - if (pmd_huge(*pm_dir)) { + } + if (pmd_large(*pm_dir)) { pmd_clear(pm_dir); - address += HPAGE_SIZE - PAGE_SIZE; + address += PMD_SIZE; continue; } - pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; + address += PAGE_SIZE; } - flush_tlb_kernel_range(start, start + size); + flush_tlb_kernel_range(start, end); } /* @@ -330,8 +339,8 @@ void __init vmem_map_init(void) unsigned long start, end; int i; - ro_start = ((unsigned long)&_stext) & PAGE_MASK; - ro_end = PFN_ALIGN((unsigned long)&_eshared); + ro_start = PFN_ALIGN((unsigned long)&_stext); + ro_end = (unsigned long)&_eshared & PAGE_MASK; for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { if (memory_chunk[i].type == CHUNK_CRASHK || memory_chunk[i].type == CHUNK_OLDMEM) |