diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-01-18 02:34:51 +0300 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-01-18 02:34:51 +0300 |
commit | 9cdf083f981b8d37b3212400a359368661385099 (patch) | |
tree | aa15a6a08ad87e650dea40fb59b3180bef0d345b /arch/sh/mm | |
parent | e499e01d234a31d59679b7b1e1cf628d917ba49a (diff) | |
parent | a8b3485287731978899ced11f24628c927890e78 (diff) | |
download | linux-9cdf083f981b8d37b3212400a359368661385099.tar.xz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 87 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2.c | 69 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 20 | ||||
-rw-r--r-- | arch/sh/mm/clear_page.S | 18 | ||||
-rw-r--r-- | arch/sh/mm/copy_page.S | 16 | ||||
-rw-r--r-- | arch/sh/mm/fault.c | 161 | ||||
-rw-r--r-- | arch/sh/mm/hugetlbpage.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 47 | ||||
-rw-r--r-- | arch/sh/mm/ioremap.c | 92 | ||||
-rw-r--r-- | arch/sh/mm/pg-dma.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pg-sh4.c | 35 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 6 |
12 files changed, 247 insertions, 311 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 9dd606464d23..29f4ee35c6dc 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -4,8 +4,12 @@ menu "Processor selection" # Processor families # config CPU_SH2 + select SH_WRITETHROUGH if !CPU_SH2A bool - select SH_WRITETHROUGH + +config CPU_SH2A + bool + select CPU_SH2 config CPU_SH3 bool @@ -16,6 +20,7 @@ config CPU_SH4 bool select CPU_HAS_INTEVT select CPU_HAS_SR_RB + select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 config CPU_SH4A bool @@ -30,6 +35,9 @@ config CPU_SUBTYPE_ST40 select CPU_SH4 select CPU_HAS_INTC2_IRQ +config CPU_SHX2 + bool + # # Processor subtypes # @@ -40,6 +48,16 @@ config CPU_SUBTYPE_SH7604 bool "Support SH7604 processor" select CPU_SH2 +config CPU_SUBTYPE_SH7619 + bool "Support SH7619 processor" + select CPU_SH2 + +comment "SH-2A Processor Support" + +config CPU_SUBTYPE_SH7206 + bool "Support SH7206 processor" + select CPU_SH2A + comment "SH-3 Processor Support" config CPU_SUBTYPE_SH7300 @@ -89,6 +107,7 @@ comment "SH-4 Processor Support" config CPU_SUBTYPE_SH7750 bool "Support SH7750 processor" select CPU_SH4 + select CPU_HAS_IPR_IRQ help Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. @@ -104,15 +123,18 @@ config CPU_SUBTYPE_SH7750R bool "Support SH7750R processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7750S bool "Support SH7750S processor" select CPU_SH4 select CPU_SUBTYPE_SH7750 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7751 bool "Support SH7751 processor" select CPU_SH4 + select CPU_HAS_IPR_IRQ help Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, or if you have a HD6417751R CPU. @@ -121,6 +143,7 @@ config CPU_SUBTYPE_SH7751R bool "Support SH7751R processor" select CPU_SH4 select CPU_SUBTYPE_SH7751 + select CPU_HAS_IPR_IRQ config CPU_SUBTYPE_SH7760 bool "Support SH7760 processor" @@ -157,6 +180,12 @@ config CPU_SUBTYPE_SH7780 select CPU_SH4A select CPU_HAS_INTC2_IRQ +config CPU_SUBTYPE_SH7785 + bool "Support SH7785 processor" + select CPU_SH4A + select CPU_SHX2 + select CPU_HAS_INTC2_IRQ + comment "SH4AL-DSP Processor Support" config CPU_SUBTYPE_SH73180 @@ -167,6 +196,12 @@ config CPU_SUBTYPE_SH7343 bool "Support SH7343 processor" select CPU_SH4AL_DSP +config CPU_SUBTYPE_SH7722 + bool "Support SH7722 processor" + select CPU_SH4AL_DSP + select CPU_SHX2 + select CPU_HAS_IPR_IRQ + endmenu menu "Memory management options" @@ -216,13 +251,22 @@ config MEMORY_SIZE config 32BIT bool "Support 32-bit physical addressing through PMB" - depends on CPU_SH4A && MMU + depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) default y help If you say Y here, physical addressing will be extended to 32-bits through the SH-4A PMB. If this is not set, legacy 29-bit physical addressing will be used. +config X2TLB + bool "Enable extended TLB mode" + depends on CPU_SHX2 && MMU && EXPERIMENTAL + help + Selecting this option will enable the extended mode of the SH-X2 + TLB. For legacy SH-X behaviour and interoperability, say N. For + all of the fun new features and a willingless to submit bug reports, + say Y. + config VSYSCALL bool "Support vsyscall page" depends on MMU @@ -237,16 +281,52 @@ config VSYSCALL (the default value) say Y. choice + prompt "Kernel page size" + default PAGE_SIZE_4KB + +config PAGE_SIZE_4KB + bool "4kB" + help + This is the default page size used by all SuperH CPUs. + +config PAGE_SIZE_8KB + bool "8kB" + depends on EXPERIMENTAL && X2TLB + help + This enables 8kB pages as supported by SH-X2 and later MMUs. + +config PAGE_SIZE_64KB + bool "64kB" + depends on EXPERIMENTAL && CPU_SH4 + help + This enables support for 64kB pages, possible on all SH-4 + CPUs and later. Highly experimental, not recommended. + +endchoice + +choice prompt "HugeTLB page size" depends on HUGETLB_PAGE && CPU_SH4 && MMU default HUGETLB_PAGE_SIZE_64K config HUGETLB_PAGE_SIZE_64K - bool "64K" + bool "64kB" + +config HUGETLB_PAGE_SIZE_256K + bool "256kB" + depends on X2TLB config HUGETLB_PAGE_SIZE_1MB bool "1MB" +config HUGETLB_PAGE_SIZE_4MB + bool "4MB" + depends on X2TLB + +config HUGETLB_PAGE_SIZE_64MB + bool "64MB" + depends on X2TLB + endchoice source "mm/Kconfig" @@ -274,7 +354,6 @@ config SH_DIRECT_MAPPED config SH_WRITETHROUGH bool "Use write-through caching" - default y if CPU_SH2 help Selecting this option will configure the caches in write-through mode, as opposed to the default write-back configuration. diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 2689cb24ea2b..6614033f6be9 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -5,6 +5,7 @@ * * Released under the terms of the GNU GPL v2.0. */ + #include <linux/init.h> #include <linux/mm.h> @@ -14,37 +15,43 @@ #include <asm/cacheflush.h> #include <asm/io.h> -/* - * Calculate the OC address and set the way bit on the SH-2. - * - * We must have already jump_to_P2()'ed prior to calling this - * function, since we rely on CCR manipulation to do the - * Right Thing(tm). - */ -unsigned long __get_oc_addr(unsigned long set, unsigned long way) +void __flush_wback_region(void *start, int size) { - unsigned long ccr; - - /* - * On SH-2 the way bit isn't tracked in the address field - * if we're doing address array access .. instead, we need - * to manually switch out the way in the CCR. - */ - ccr = ctrl_inl(CCR); - ccr &= ~0x00c0; - ccr |= way << cpu_data->dcache.way_shift; - - /* - * Despite the number of sets being halved, we end up losing - * the first 2 ways to OCRAM instead of the last 2 (if we're - * 4-way). As a result, forcibly setting the W1 bit handily - * bumps us up 2 ways. - */ - if (ccr & CCR_CACHE_ORA) - ccr |= 1 << (cpu_data->dcache.way_shift + 1); - - ctrl_outl(ccr, CCR); - - return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + /* FIXME cache purge */ + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } +} + +void __flush_purge_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } +} + +void __flush_invalidate_region(void *start, int size) +{ + unsigned long v; + unsigned long begin, end; + + begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); + end = ((unsigned long)start + size + L1_CACHE_BYTES-1) + & ~(L1_CACHE_BYTES-1); + for (v = begin; v < end; v+=L1_CACHE_BYTES) { + ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); + } } diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index e48cc22724d9..c6955157c989 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -11,12 +11,8 @@ */ #include <linux/init.h> #include <linux/mm.h> -#include <asm/addrspace.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/cache.h> -#include <asm/io.h> -#include <asm/pgalloc.h> +#include <linux/io.h> +#include <linux/mutex.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> @@ -83,9 +79,9 @@ static void __init emit_cache_params(void) */ /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ -#define MAX_P3_SEMAPHORES 16 +#define MAX_P3_MUTEXES 16 -struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; +struct mutex p3map_mutex[MAX_P3_MUTEXES]; void __init p3_cache_init(void) { @@ -111,11 +107,11 @@ void __init p3_cache_init(void) emit_cache_params(); - if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE)) + if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL)) panic("%s failed.", __FUNCTION__); for (i = 0; i < cpu_data->dcache.n_aliases; i++) - sema_init(&p3map_sem[i], 1); + mutex_init(&p3map_mutex[i]); } /* @@ -229,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start, */ if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || (start < CACHE_OC_ADDRESS_ARRAY)) - exec_offset = 0x20000000; + exec_offset = 0x20000000; local_irq_save(flags); __flush_cache_4096(start | SH_CACHE_ASSOC, @@ -250,7 +246,7 @@ void flush_dcache_page(struct page *page) /* Loop all the D-cache */ n = cpu_data->dcache.n_aliases; - for (i = 0; i < n; i++, addr += PAGE_SIZE) + for (i = 0; i < n; i++, addr += 4096) flush_cache_4096(addr, phys); } diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S index 7b96425ae270..8a706131e521 100644 --- a/arch/sh/mm/clear_page.S +++ b/arch/sh/mm/clear_page.S @@ -1,12 +1,12 @@ -/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ - * +/* * __clear_user_page, __clear_user, clear_page implementation of SuperH * * Copyright (C) 2001 Kaz Kojima * Copyright (C) 2001, 2002 Niibe Yutaka - * + * Copyright (C) 2006 Paul Mundt */ #include <linux/linkage.h> +#include <asm/page.h> /* * clear_page_slow @@ -18,11 +18,11 @@ /* * r0 --- scratch * r4 --- to - * r5 --- to + 4096 + * r5 --- to + PAGE_SIZE */ ENTRY(clear_page_slow) mov r4,r5 - mov.w .Llimit,r0 + mov.l .Llimit,r0 add r0,r5 mov #0,r0 ! @@ -50,7 +50,7 @@ ENTRY(clear_page_slow) ! rts nop -.Llimit: .word (4096-28) +.Llimit: .long (PAGE_SIZE-28) ENTRY(__clear_user) ! @@ -164,10 +164,10 @@ ENTRY(__clear_user) * r0 --- scratch * r4 --- to * r5 --- orig_to - * r6 --- to + 4096 + * r6 --- to + PAGE_SIZE */ ENTRY(__clear_user_page) - mov.w .L4096,r0 + mov.l .Lpsz,r0 mov r4,r6 add r0,r6 mov #0,r0 @@ -191,7 +191,7 @@ ENTRY(__clear_user_page) ! rts nop -.L4096: .word 4096 +.Lpsz: .long PAGE_SIZE #endif diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S index 1addffe117c3..397c94c97315 100644 --- a/arch/sh/mm/copy_page.S +++ b/arch/sh/mm/copy_page.S @@ -1,12 +1,12 @@ -/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ - * +/* * copy_page, __copy_user_page, __copy_user implementation of SuperH * * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima * Copyright (C) 2002 Toshinobu Sugioka - * + * Copyright (C) 2006 Paul Mundt */ #include <linux/linkage.h> +#include <asm/page.h> /* * copy_page_slow @@ -18,7 +18,7 @@ /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + 4096 + * r8 --- from + PAGE_SIZE * r9 --- not used * r10 --- to * r11 --- from @@ -30,7 +30,7 @@ ENTRY(copy_page_slow) mov r4,r10 mov r5,r11 mov r5,r8 - mov.w .L4096,r0 + mov.l .Lpsz,r0 add r0,r8 ! 1: mov.l @r11+,r0 @@ -80,7 +80,7 @@ ENTRY(copy_page_slow) /* * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch - * r8 --- from + 4096 + * r8 --- from + PAGE_SIZE * r9 --- orig_to * r10 --- to * r11 --- from @@ -94,7 +94,7 @@ ENTRY(__copy_user_page) mov r5,r11 mov r6,r9 mov r5,r8 - mov.w .L4096,r0 + mov.l .Lpsz,r0 add r0,r8 ! 1: ocbi @r9 @@ -129,7 +129,7 @@ ENTRY(__copy_user_page) rts nop #endif -.L4096: .word 4096 +.Lpsz: .long PAGE_SIZE /* * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); * Return the number of bytes NOT copied diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 68663b8f99ae..716ebf568af2 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long); * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, - unsigned long address) +asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; unsigned long page; + int si_code; + siginfo_t info; + + trace_hardirqs_on(); + local_irq_enable(); #ifdef CONFIG_SH_KGDB if (kgdb_nofault && kgdb_bus_err_hook) @@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, tsk = current; mm = tsk->mm; + si_code = SEGV_MAPERR; + + if (unlikely(address >= TASK_SIZE)) { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + * + * Do _not_ use "tsk" here. We might be inside + * an interrupt in the middle of a task switch.. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + + pgd = get_TTB() + offset; + pgd_k = swapper_pg_dir + offset; + + /* This will never happen with the folded page table. */ + if (!pgd_present(*pgd)) { + if (!pgd_present(*pgd_k)) + goto bad_area_nosemaphore; + set_pgd(pgd, *pgd_k); + return; + } + + pud = pud_offset(pgd, address); + pud_k = pud_offset(pgd_k, address); + if (pud_present(*pud) || !pud_present(*pud_k)) + goto bad_area_nosemaphore; + set_pud(pud, *pud_k); + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + set_pmd(pmd, *pmd_k); + + return; + } /* * If we're in an interrupt or have no user @@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, * we can handle it.. */ good_area: + si_code = SEGV_ACCERR; if (writeaccess) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -104,10 +151,13 @@ survive: bad_area: up_read(&mm->mmap_sem); +bad_area_nosemaphore: if (user_mode(regs)) { - tsk->thread.address = address; - tsk->thread.error_code = writeaccess; - force_sig(SIGSEGV, tsk); + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = (void *) address; + force_sig_info(SIGSEGV, &info, tsk); return; } @@ -127,11 +177,9 @@ no_context: printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n", address); printk(KERN_ALERT "pc = %08lx\n", regs->pc); - asm volatile("mov.l %1, %0" - : "=r" (page) - : "m" (__m(MMU_TTB))); + page = (unsigned long)get_TTB(); if (page) { - page = ((unsigned long *) page)[address >> 22]; + page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; printk(KERN_ALERT "*pde = %08lx\n", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; @@ -166,98 +214,13 @@ do_sigbus: * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - tsk->thread.address = address; - tsk->thread.error_code = writeaccess; - tsk->thread.trap_no = 14; - force_sig(SIGBUS, tsk); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void *)address; + force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; } - -#ifdef CONFIG_SH_STORE_QUEUES -/* - * This is a special case for the SH-4 store queues, as pages for this - * space still need to be faulted in before it's possible to flush the - * store queue cache for writeout to the remapped region. - */ -#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) -#else -#define P3_ADDR_MAX P4SEG -#endif - -/* - * Called with interrupts disabled. - */ -asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, - unsigned long writeaccess, - unsigned long address) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - struct mm_struct *mm = current->mm; - spinlock_t *ptl; - int ret = 1; - -#ifdef CONFIG_SH_KGDB - if (kgdb_nofault && kgdb_bus_err_hook) - kgdb_bus_err_hook(); -#endif - - /* - * We don't take page faults for P1, P2, and parts of P4, these - * are always mapped, whether it be due to legacy behaviour in - * 29-bit mode, or due to PMB configuration in 32-bit mode. - */ - if (address >= P3SEG && address < P3_ADDR_MAX) { - pgd = pgd_offset_k(address); - mm = NULL; - } else { - if (unlikely(address >= TASK_SIZE || !mm)) - return 1; - - pgd = pgd_offset(mm, address); - } - - pud = pud_offset(pgd, address); - if (pud_none_or_clear_bad(pud)) - return 1; - pmd = pmd_offset(pud, address); - if (pmd_none_or_clear_bad(pmd)) - return 1; - - if (mm) - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - else - pte = pte_offset_kernel(pmd, address); - - entry = *pte; - if (unlikely(pte_none(entry) || pte_not_present(entry))) - goto unlock; - if (unlikely(writeaccess && !pte_write(entry))) - goto unlock; - - if (writeaccess) - entry = pte_mkdirty(entry); - entry = pte_mkyoung(entry); - -#ifdef CONFIG_CPU_SH4 - /* - * ITLB is not affected by "ldtlb" instruction. - * So, we need to flush the entry by ourselves. - */ - __flush_tlb_page(get_asid(), address & PAGE_MASK); -#endif - - set_pte(pte, entry); - update_mmu_cache(NULL, address, entry); - ret = 0; -unlock: - if (mm) - pte_unmap_unlock(pte, ptl); - return ret; -} diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 329059d6b54a..cf2c2ee35a37 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -63,6 +63,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } +int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) +{ + return 0; +} + struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce9785..29bd37b1488e 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -77,6 +77,7 @@ void show_mem(void) printk("%d pages swap cached\n",cached); } +#ifdef CONFIG_MMU static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; @@ -84,30 +85,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) pmd_t *pmd; pte_t *pte; - pgd = swapper_pg_dir + pgd_index(addr); + pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); return; } - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); - if (pmd != pmd_offset(pud, 0)) { - pud_ERROR(*pud); - return; - } + pud = pud_alloc(NULL, pgd, addr); + if (unlikely(!pud)) { + pud_ERROR(*pud); + return; } - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); - set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); - if (pte != pte_offset_kernel(pmd, 0)) { - pmd_ERROR(*pmd); - return; - } + pmd = pmd_alloc(NULL, pud, addr); + if (unlikely(!pmd)) { + pmd_ERROR(*pmd); + return; } pte = pte_offset_kernel(pmd, addr); @@ -147,6 +140,7 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) set_pte_phys(address, phys, prot); } +#endif /* CONFIG_MMU */ /* References to section boundaries */ @@ -155,9 +149,6 @@ extern char __init_begin, __init_end; /* * paging_init() sets up the page tables - * - * This routines also unmaps the page at virtual kernel address 0, so - * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { @@ -180,14 +171,11 @@ void __init paging_init(void) */ { unsigned long max_dma, low, start_pfn; - pgd_t *pg_dir; - int i; - /* We don't need kernel mapping as hardware support that. */ - pg_dir = swapper_pg_dir; - - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_val(pg_dir[i]) = 0; + /* We don't need to map the kernel through the TLB, as + * it is permanatly mapped using P1. So clear the + * entire pgd. */ + memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); /* Turn on the MMU */ enable_mmu(); @@ -206,6 +194,10 @@ void __init paging_init(void) } } + /* Set an initial value for the MMU.TTB so we don't have to + * check for a null value. */ + set_TTB(swapper_pg_dir); + #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* * If we don't have CONFIG_MMU set and the processor in question @@ -227,7 +219,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc; void __init mem_init(void) { - extern unsigned long empty_zero_page[1024]; int codesize, reservedpages, datasize, initsize; int tmp; extern unsigned long memory_start; diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a9fe80cfc233..90b494a0cf45 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -16,99 +16,13 @@ #include <linux/module.h> #include <linux/mm.h> #include <linux/pci.h> -#include <asm/io.h> +#include <linux/io.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/addrspace.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> -static inline void remap_area_pte(pte_t * pte, unsigned long address, - unsigned long size, unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - unsigned long pfn; - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); - - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - if (address >= end) - BUG(); - pfn = phys_addr >> PAGE_SHIFT; - do { - if (!pte_none(*pte)) { - printk("remap_area_pte: page already exists\n"); - BUG(); - } - set_pte(pte, pfn_pte(pfn, pgprot)); - address += PAGE_SIZE; - pfn++; - pte++; - } while (address && (address < end)); -} - -static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, - unsigned long size, unsigned long phys_addr, unsigned long flags) -{ - unsigned long end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - phys_addr -= address; - if (address >= end) - BUG(); - do { - pte_t * pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - remap_area_pte(pte, address, end - address, address + phys_addr, flags); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return 0; -} - -int remap_area_pages(unsigned long address, unsigned long phys_addr, - unsigned long size, unsigned long flags) -{ - int error; - pgd_t * dir; - unsigned long end = address + size; - - phys_addr -= address; - dir = pgd_offset_k(address); - flush_cache_all(); - if (address >= end) - BUG(); - do { - pud_t *pud; - pmd_t *pmd; - - error = -ENOMEM; - - pud = pud_alloc(&init_mm, dir, address); - if (!pud) - break; - pmd = pmd_alloc(&init_mm, pud, address); - if (!pmd) - break; - if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) - break; - error = 0; - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); - flush_tlb_all(); - return error; -} - /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -123,6 +37,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, { struct vm_struct * area; unsigned long offset, last_addr, addr, orig_addr; + pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -192,8 +107,9 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, } #endif + pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (likely(size)) - if (remap_area_pages(addr, phys_addr, size, flags)) { + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c index 1406d2e348ca..bb23679369d6 100644 --- a/arch/sh/mm/pg-dma.c +++ b/arch/sh/mm/pg-dma.c @@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from) static void clear_page_dma(void *to) { - extern unsigned long empty_zero_page[1024]; - /* * We get invoked quite early on, if the DMAC hasn't been initialized * yet, fall back on the slow manual implementation. diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 07371ed7a313..3f98d2a4f936 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -6,22 +6,12 @@ * * Released under the terms of the GNU GPL v2.0. */ -#include <linux/init.h> -#include <linux/mman.h> #include <linux/mm.h> -#include <linux/threads.h> -#include <asm/addrspace.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/cache.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pgalloc.h> +#include <linux/mutex.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> -extern struct semaphore p3map_sem[]; +extern struct mutex p3map_mutex[]; #define CACHE_ALIAS (cpu_data->dcache.alias_mask) @@ -37,10 +27,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) clear_page(to); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | - _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -50,8 +36,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page) pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); - down(&p3map_sem[(address & CACHE_ALIAS)>>12]); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); + mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -59,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) update_mmu_cache(NULL, p3_addr, entry); __clear_user_page((void *)p3_addr, to); pte_clear(&init_mm, p3_addr, pte); - up(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } @@ -77,10 +63,6 @@ void copy_user_page(void *to, void *from, unsigned long address, if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { - pgprot_t pgprot = __pgprot(_PAGE_PRESENT | - _PAGE_RW | _PAGE_CACHABLE | - _PAGE_DIRTY | _PAGE_ACCESSED | - _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); @@ -90,8 +72,8 @@ void copy_user_page(void *to, void *from, unsigned long address, pte_t entry; unsigned long flags; - entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); - down(&p3map_sem[(address & CACHE_ALIAS)>>12]); + entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); + mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); @@ -99,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address, update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); - up(&p3map_sem[(address & CACHE_ALIAS)>>12]); + mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } } @@ -122,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t } return pte; } - diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 92e745341e4d..b60ad83a7635 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -30,7 +30,7 @@ #define NR_PMB_ENTRIES 16 -static kmem_cache_t *pmb_cache; +static struct kmem_cache *pmb_cache; static unsigned long pmb_map; static struct pmb_entry pmb_init_map[] = { @@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr) } while (pmbe); } -static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) { struct pmb_entry *pmbe = pmb; @@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) spin_unlock_irq(&pmb_list_lock); } -static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) { spin_lock_irq(&pmb_list_lock); pmb_list_del(pmb); |