diff options
Diffstat (limited to 'arch/microblaze/mm')
-rw-r--r-- | arch/microblaze/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/microblaze/mm/fault.c | 304 | ||||
-rw-r--r-- | arch/microblaze/mm/init.c | 169 | ||||
-rw-r--r-- | arch/microblaze/mm/mmu_context.c | 70 | ||||
-rw-r--r-- | arch/microblaze/mm/pgtable.c | 286 |
5 files changed, 821 insertions, 10 deletions
diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index bf9e4479a1fd..6c8a924d9e26 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -3,3 +3,5 @@ # obj-y := init.o + +obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c new file mode 100644 index 000000000000..5e67cd1fab40 --- /dev/null +++ b/arch/microblaze/mm/fault.c @@ -0,0 +1,304 @@ +/* + * arch/microblaze/mm/fault.c + * + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from "arch/ppc/mm/fault.c" + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Derived from "arch/i386/mm/fault.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * Modified by Cort Dougan and Paul Mackerras. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#include <linux/module.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/interrupt.h> + +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/mmu.h> +#include <asm/mmu_context.h> +#include <asm/system.h> +#include <linux/uaccess.h> +#include <asm/exceptions.h> + +#if defined(CONFIG_KGDB) +int debugger_kernel_faults = 1; +#endif + +static unsigned long pte_misses; /* updated by do_page_fault() */ +static unsigned long pte_errors; /* updated by do_page_fault() */ + +/* + * Check whether the instruction at regs->pc is a store using + * an update addressing form which will update r1. + */ +static int store_updates_sp(struct pt_regs *regs) +{ + unsigned int inst; + + if (get_user(inst, (unsigned int *)regs->pc)) + return 0; + /* check for 1 in the rD field */ + if (((inst >> 21) & 0x1f) != 1) + return 0; + /* check for store opcodes */ + if ((inst & 0xd0000000) == 0xd0000000) + return 1; + return 0; +} + + +/* + * bad_page_fault is called when we have a bad access from the kernel. + * It is called from do_page_fault above and from some of the procedures + * in traps.c. + */ +static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) +{ + const struct exception_table_entry *fixup; +/* MS: no context */ + /* Are we prepared to handle this fault? */ + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return; + } + + /* kernel has accessed a bad area */ +#if defined(CONFIG_KGDB) + if (debugger_kernel_faults) + debugger(regs); +#endif + die("kernel access of bad area", regs, sig); +} + +/* + * The error_code parameter is ESR for a data fault, + * 0 for an instruction fault. + */ +void do_page_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + siginfo_t info; + int code = SEGV_MAPERR; + int is_write = error_code & ESR_S; + int fault; + + regs->ear = address; + regs->esr = error_code; + + /* On a kernel SLB miss we can only check for a valid exception entry */ + if (kernel_mode(regs) && (address >= TASK_SIZE)) { + printk(KERN_WARNING "kernel task_size exceed"); + _exception(SIGSEGV, regs, code, address); + } + + /* for instr TLB miss and instr storage exception ESR_S is undefined */ + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +#if defined(CONFIG_KGDB) + if (debugger_fault_handler && regs->trap == 0x300) { + debugger_fault_handler(regs); + return; + } +#endif /* CONFIG_KGDB */ + + if (in_atomic() || mm == NULL) { + /* FIXME */ + if (kernel_mode(regs)) { + printk(KERN_EMERG + "Page fault in kernel mode - Oooou!!! pid %d\n", + current->pid); + _exception(SIGSEGV, regs, code, address); + return; + } + /* in_atomic() in user mode is really bad, + as is current->mm == NULL. */ + printk(KERN_EMERG "Page fault in user mode with " + "in_atomic(), mm = %p\n", mm); + printk(KERN_EMERG "r15 = %lx MSR = %lx\n", + regs->r15, regs->msr); + die("Weird page fault", regs, SIGSEGV); + } + + /* When running in the kernel we expect faults to occur only to + * addresses in user space. All other faults represent errors in the + * kernel and should generate an OOPS. Unfortunately, in the case of an + * erroneous fault occurring in a code path which already holds mmap_sem + * we will deadlock attempting to validate the fault against the + * address space. Luckily the kernel only validly references user + * space from well defined areas of code, which are listed in the + * exceptions table. + * + * As the vast majority of faults will be valid we will only perform + * the source reference check when there is a possibility of a deadlock. + * Attempt to lock the address space, if we cannot we then validate the + * source. If this is invalid we can skip the address space check, + * thus avoiding the deadlock. + */ + if (!down_read_trylock(&mm->mmap_sem)) { + if (kernel_mode(regs) && !search_exception_tables(regs->pc)) + goto bad_area_nosemaphore; + + down_read(&mm->mmap_sem); + } + + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + + if (vma->vm_start <= address) + goto good_area; + + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + + if (!is_write) + goto bad_area; + + /* + * N.B. The ABI allows programs to access up to + * a few hundred bytes below the stack pointer (TBD). + * The kernel signal delivery code writes up to about 1.5kB + * below the stack pointer (r1) before decrementing it. + * The exec code can write slightly over 640kB to the stack + * before setting the user r1. Thus we allow the stack to + * expand to 1MB without further checks. + */ + if (address + 0x100000 < vma->vm_end) { + + /* get user regs even if this fault is in kernel mode */ + struct pt_regs *uregs = current->thread.regs; + if (uregs == NULL) + goto bad_area; + + /* + * A user-mode access to an address a long way below + * the stack pointer is only valid if the instruction + * is one which would update the stack pointer to the + * address accessed if the instruction completed, + * i.e. either stwu rs,n(r1) or stwux rs,r1,rb + * (or the byte, halfword, float or double forms). + * + * If we don't check this then any write to the area + * between the last mapped region and the stack will + * expand the stack rather than segfaulting. + */ + if (address + 2048 < uregs->r1 + && (kernel_mode(regs) || !store_updates_sp(regs))) + goto bad_area; + } + if (expand_stack(vma, address)) + goto bad_area; + +good_area: + code = SEGV_ACCERR; + + /* a write */ + if (is_write) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + /* a read */ + } else { + /* protection fault */ + if (error_code & 0x08000000) + goto bad_area; + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ +survive: + fault = handle_mm_fault(mm, vma, address, is_write); + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + up_read(&mm->mmap_sem); + /* + * keep track of tlb+htab misses that are good addrs but + * just need pte's created via handle_mm_fault() + * -- Cort + */ + pte_misses++; + return; + +bad_area: + up_read(&mm->mmap_sem); + +bad_area_nosemaphore: + pte_errors++; + + /* User mode accesses cause a SIGSEGV */ + if (user_mode(regs)) { + _exception(SIGSEGV, regs, code, address); +/* info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_code = code; + info.si_addr = (void *) address; + force_sig_info(SIGSEGV, &info, current);*/ + return; + } + + bad_page_fault(regs, address, SIGSEGV); + return; + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + if (current->pid == 1) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + up_read(&mm->mmap_sem); + printk(KERN_WARNING "VM: killing process %s\n", current->comm); + if (user_mode(regs)) + do_exit(SIGKILL); + bad_page_fault(regs, address, SIGKILL); + return; + +do_sigbus: + up_read(&mm->mmap_sem); + if (user_mode(regs)) { + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *)address; + force_sig_info(SIGBUS, &info, current); + return; + } + bad_page_fault(regs, address, SIGBUS); +} diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index b0c8213cd6cf..b5a701cd71e0 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -23,8 +23,16 @@ #include <asm/sections.h> #include <asm/tlb.h> +#ifndef CONFIG_MMU unsigned int __page_offset; -/* EXPORT_SYMBOL(__page_offset); */ +EXPORT_SYMBOL(__page_offset); + +#else +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +int mem_init_done; +static int init_bootmem_done; +#endif /* CONFIG_MMU */ char *klimit = _end; @@ -32,28 +40,26 @@ char *klimit = _end; * Initialize the bootmem system and give it all the memory we * have available. */ -unsigned int memory_start; -unsigned int memory_end; /* due to mm/nommu.c */ -unsigned int memory_size; +unsigned long memory_start; +unsigned long memory_end; /* due to mm/nommu.c */ +unsigned long memory_size; /* * paging_init() sets up the page tables - in fact we've already done this. */ static void __init paging_init(void) { - int i; unsigned long zones_size[MAX_NR_ZONES]; + /* Clean every zones */ + memset(zones_size, 0, sizeof(zones_size)); + /* * old: we can DMA to/from any address.put all page into ZONE_DMA * We use only ZONE_NORMAL */ zones_size[ZONE_NORMAL] = max_mapnr; - /* every other zones are empty */ - for (i = 1; i < MAX_NR_ZONES; i++) - zones_size[i] = 0; - free_area_init(zones_size); } @@ -61,6 +67,7 @@ void __init setup_memory(void) { int i; unsigned long map_size; +#ifndef CONFIG_MMU u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ @@ -93,6 +100,7 @@ void __init setup_memory(void) __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); +#endif /* * Kernel: * start: base phys address of kernel - page align @@ -121,9 +129,13 @@ void __init setup_memory(void) * for 4GB of memory, using 4kB pages), plus 1 page * (in case the address isn't page-aligned). */ +#ifndef CONFIG_MMU map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn); - +#else + map_size = init_bootmem_node(&contig_page_data, + PFN_UP(TOPHYS((u32)_end)), min_low_pfn, max_low_pfn); +#endif lmb_reserve(PFN_UP(TOPHYS((u32)_end)) << PAGE_SHIFT, map_size); /* free bootmem is whole main memory */ @@ -137,6 +149,9 @@ void __init setup_memory(void) reserve_bootmem(lmb.reserved.region[i].base, lmb_size_bytes(&lmb.reserved, i) - 1, BOOTMEM_DEFAULT); } +#ifdef CONFIG_MMU + init_bootmem_done = 1; +#endif paging_init(); } @@ -191,11 +206,145 @@ void __init mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10)); +#ifdef CONFIG_MMU + mem_init_done = 1; +#endif } +#ifndef CONFIG_MMU /* Check against bounds of physical memory */ int ___range_ok(unsigned long addr, unsigned long size) { return ((addr < memory_start) || ((addr + size) > memory_end)); } +EXPORT_SYMBOL(___range_ok); + +#else +int page_is_ram(unsigned long pfn) +{ + return pfn < max_low_pfn; +} + +/* + * Check for command-line options that affect what MMU_init will do. + */ +static void mm_cmdline_setup(void) +{ + unsigned long maxmem = 0; + char *p = cmd_line; + + /* Look for mem= option on command line */ + p = strstr(cmd_line, "mem="); + if (p) { + p += 4; + maxmem = memparse(p, &p); + if (maxmem && memory_size > maxmem) { + memory_size = maxmem; + memory_end = memory_start + memory_size; + lmb.memory.region[0].size = memory_size; + } + } +} + +/* + * MMU_init_hw does the chip-specific initialization of the MMU hardware. + */ +static void __init mmu_init_hw(void) +{ + /* + * The Zone Protection Register (ZPR) defines how protection will + * be applied to every page which is a member of a given zone. At + * present, we utilize only two of the zones. + * The zone index bits (of ZSEL) in the PTE are used for software + * indicators, except the LSB. For user access, zone 1 is used, + * for kernel access, zone 0 is used. We set all but zone 1 + * to zero, allowing only kernel access as indicated in the PTE. + * For zone 1, we set a 01 binary (a value of 10 will not work) + * to allow user access as indicated in the PTE. This also allows + * kernel access as indicated in the PTE. + */ + __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ + "mts rzpr, r11;" + : : : "r11"); +} + +/* + * MMU_init sets up the basic memory mappings for the kernel, + * including both RAM and possibly some I/O regions, + * and sets up the page tables and the MMU hardware ready to go. + */ + +/* called from head.S */ +asmlinkage void __init mmu_init(void) +{ + unsigned int kstart, ksize; + + if (!lmb.reserved.cnt) { + printk(KERN_EMERG "Error memory count\n"); + machine_restart(NULL); + } + + if ((u32) lmb.memory.region[0].size < 0x1000000) { + printk(KERN_EMERG "Memory must be greater than 16MB\n"); + machine_restart(NULL); + } + /* Find main memory where the kernel is */ + memory_start = (u32) lmb.memory.region[0].base; + memory_end = (u32) lmb.memory.region[0].base + + (u32) lmb.memory.region[0].size; + memory_size = memory_end - memory_start; + + mm_cmdline_setup(); /* FIXME parse args from command line - not used */ + + /* + * Map out the kernel text/data/bss from the available physical + * memory. + */ + kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ + /* kernel size */ + ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); + lmb_reserve(kstart, ksize); + +#if defined(CONFIG_BLK_DEV_INITRD) + /* Remove the init RAM disk from the available memory. */ +/* if (initrd_start) { + mem_pieces_remove(&phys_avail, __pa(initrd_start), + initrd_end - initrd_start, 1); + }*/ +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Initialize the MMU hardware */ + mmu_init_hw(); + + /* Map in all of RAM starting at CONFIG_KERNEL_START */ + mapin_ram(); + +#ifdef HIGHMEM_START_BOOL + ioremap_base = HIGHMEM_START; +#else + ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ +#endif /* CONFIG_HIGHMEM */ + ioremap_bot = ioremap_base; + + /* Initialize the context management stuff */ + mmu_context_init(); +} + +/* This is only called until mem_init is done. */ +void __init *early_get_page(void) +{ + void *p; + if (init_bootmem_done) { + p = alloc_bootmem_pages(PAGE_SIZE); + } else { + /* + * Mem start + 32MB -> here is limit + * because of mem mapping from head.S + */ + p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, + memory_start + 0x2000000)); + } + return p; +} +#endif /* CONFIG_MMU */ diff --git a/arch/microblaze/mm/mmu_context.c b/arch/microblaze/mm/mmu_context.c new file mode 100644 index 000000000000..26ff82f4fa8f --- /dev/null +++ b/arch/microblaze/mm/mmu_context.c @@ -0,0 +1,70 @@ +/* + * This file contains the routines for handling the MMU. + * + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from arch/ppc/mm/4xx_mmu.c: + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/mm.h> +#include <linux/init.h> + +#include <asm/tlbflush.h> +#include <asm/mmu_context.h> + +mm_context_t next_mmu_context; +unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; +atomic_t nr_free_contexts; +struct mm_struct *context_mm[LAST_CONTEXT+1]; + +/* + * Initialize the context management stuff. + */ +void __init mmu_context_init(void) +{ + /* + * The use of context zero is reserved for the kernel. + * This code assumes FIRST_CONTEXT < 32. + */ + context_map[0] = (1 << FIRST_CONTEXT) - 1; + next_mmu_context = FIRST_CONTEXT; + atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1); +} + +/* + * Steal a context from a task that has one at the moment. + * + * This isn't an LRU system, it just frees up each context in + * turn (sort-of pseudo-random replacement :). This would be the + * place to implement an LRU scheme if anyone were motivated to do it. + */ +void steal_context(void) +{ + struct mm_struct *mm; + + /* free up context `next_mmu_context' */ + /* if we shouldn't free context 0, don't... */ + if (next_mmu_context < FIRST_CONTEXT) + next_mmu_context = FIRST_CONTEXT; + mm = context_mm[next_mmu_context]; + flush_tlb_mm(mm); + destroy_context(mm); +} diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c new file mode 100644 index 000000000000..46c4ca5d15c5 --- /dev/null +++ b/arch/microblaze/mm/pgtable.c @@ -0,0 +1,286 @@ +/* + * This file contains the routines setting up the linux page tables. + * + * Copyright (C) 2008 Michal Simek + * Copyright (C) 2008 PetaLogix + * + * Copyright (C) 2007 Xilinx, Inc. All rights reserved. + * + * Derived from arch/ppc/mm/pgtable.c: + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/init.h> + +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <linux/io.h> +#include <asm/mmu.h> +#include <asm/sections.h> + +#define flush_HPTE(X, va, pg) _tlbie(va) + +unsigned long ioremap_base; +unsigned long ioremap_bot; + +/* The maximum lowmem defaults to 768Mb, but this can be configured to + * another value. + */ +#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE + +#ifndef CONFIG_SMP +struct pgtable_cache_struct quicklists; +#endif + +static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, + unsigned long flags) +{ + unsigned long v, i; + phys_addr_t p; + int err; + + /* + * Choose an address to map it to. + * Once the vmalloc system is running, we use it. + * Before then, we use space going down from ioremap_base + * (ioremap_bot records where we're up to). + */ + p = addr & PAGE_MASK; + size = PAGE_ALIGN(addr + size) - p; + + /* + * Don't allow anybody to remap normal RAM that we're using. + * mem_init() sets high_memory so only do the check after that. + * + * However, allow remap of rootfs: TBD + */ + if (mem_init_done && + p >= memory_start && p < virt_to_phys(high_memory) && + !(p >= virt_to_phys((unsigned long)&__bss_stop) && + p < virt_to_phys((unsigned long)__bss_stop))) { + printk(KERN_WARNING "__ioremap(): phys addr "PTE_FMT + " is RAM lr %p\n", (unsigned long)p, + __builtin_return_address(0)); + return NULL; + } + + if (size == 0) + return NULL; + + /* + * Is it already mapped? If the whole area is mapped then we're + * done, otherwise remap it since we want to keep the virt addrs for + * each request contiguous. + * + * We make the assumption here that if the bottom and top + * of the range we want are mapped then it's mapped to the + * same virt address (and this is contiguous). + * -- Cort + */ + + if (mem_init_done) { + struct vm_struct *area; + area = get_vm_area(size, VM_IOREMAP); + if (area == NULL) + return NULL; + v = VMALLOC_VMADDR(area->addr); + } else { + v = (ioremap_bot -= size); + } + + if ((flags & _PAGE_PRESENT) == 0) + flags |= _PAGE_KERNEL; + if (flags & _PAGE_NO_CACHE) + flags |= _PAGE_GUARDED; + + err = 0; + for (i = 0; i < size && err == 0; i += PAGE_SIZE) + err = map_page(v + i, p + i, flags); + if (err) { + if (mem_init_done) + vfree((void *)v); + return NULL; + } + + return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); +} + +void __iomem *ioremap(phys_addr_t addr, unsigned long size) +{ + return __ioremap(addr, size, _PAGE_NO_CACHE); +} +EXPORT_SYMBOL(ioremap); + +void iounmap(void *addr) +{ + if (addr > high_memory && (unsigned long) addr < ioremap_bot) + vfree((void *) (PAGE_MASK & (unsigned long) addr)); +} +EXPORT_SYMBOL(iounmap); + + +int map_page(unsigned long va, phys_addr_t pa, int flags) +{ + pmd_t *pd; + pte_t *pg; + int err = -ENOMEM; + /* spin_lock(&init_mm.page_table_lock); */ + /* Use upper 10 bits of VA to index the first level map */ + pd = pmd_offset(pgd_offset_k(va), va); + /* Use middle 10 bits of VA to index the second-level map */ + pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ + /* pg = pte_alloc_kernel(&init_mm, pd, va); */ + + if (pg != NULL) { + err = 0; + set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, + __pgprot(flags))); + if (mem_init_done) + flush_HPTE(0, va, pmd_val(*pd)); + /* flush_HPTE(0, va, pg); */ + + } + /* spin_unlock(&init_mm.page_table_lock); */ + return err; +} + +void __init adjust_total_lowmem(void) +{ +/* TBD */ +#if 0 + unsigned long max_low_mem = MAX_LOW_MEM; + + if (total_lowmem > max_low_mem) { + total_lowmem = max_low_mem; +#ifndef CONFIG_HIGHMEM + printk(KERN_INFO "Warning, memory limited to %ld Mb, use " + "CONFIG_HIGHMEM to reach %ld Mb\n", + max_low_mem >> 20, total_memory >> 20); + total_memory = total_lowmem; +#endif /* CONFIG_HIGHMEM */ + } +#endif +} + +static void show_tmem(unsigned long tmem) +{ + volatile unsigned long a; + a = a + tmem; +} + +/* + * Map in all of physical memory starting at CONFIG_KERNEL_START. + */ +void __init mapin_ram(void) +{ + unsigned long v, p, s, f; + + v = CONFIG_KERNEL_START; + p = memory_start; + show_tmem(memory_size); + for (s = 0; s < memory_size; s += PAGE_SIZE) { + f = _PAGE_PRESENT | _PAGE_ACCESSED | + _PAGE_SHARED | _PAGE_HWEXEC; + if ((char *) v < _stext || (char *) v >= _etext) + f |= _PAGE_WRENABLE; + else + /* On the MicroBlaze, no user access + forces R/W kernel access */ + f |= _PAGE_USER; + map_page(v, p, f); + v += PAGE_SIZE; + p += PAGE_SIZE; + } +} + +/* is x a power of 2? */ +#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) + +/* + * Set up a mapping for a block of I/O. + * virt, phys, size must all be page-aligned. + * This should only be called before ioremap is called. + */ +void __init io_block_mapping(unsigned long virt, phys_addr_t phys, + unsigned int size, int flags) +{ + int i; + + if (virt > CONFIG_KERNEL_START && virt < ioremap_bot) + ioremap_bot = ioremap_base = virt; + + /* Put it in the page tables. */ + for (i = 0; i < size; i += PAGE_SIZE) + map_page(virt + i, phys + i, flags); +} + +/* Scan the real Linux page tables and return a PTE pointer for + * a virtual address in a context. + * Returns true (1) if PTE was found, zero otherwise. The pointer to + * the PTE pointer is unmodified if PTE is not found. + */ +static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int retval = 0; + + pgd = pgd_offset(mm, addr & PAGE_MASK); + if (pgd) { + pmd = pmd_offset(pgd, addr & PAGE_MASK); + if (pmd_present(*pmd)) { + pte = pte_offset_kernel(pmd, addr & PAGE_MASK); + if (pte) { + retval = 1; + *ptep = pte; + } + } + } + return retval; +} + +/* Find physical address for this virtual address. Normally used by + * I/O functions, but anyone can call it. + */ +unsigned long iopa(unsigned long addr) +{ + unsigned long pa; + + pte_t *pte; + struct mm_struct *mm; + + /* Allow mapping of user addresses (within the thread) + * for DMA if necessary. + */ + if (addr < TASK_SIZE) + mm = current->mm; + else + mm = &init_mm; + + pa = 0; + if (get_pteptr(mm, addr, &pte)) + pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); + + return pa; +} |