summaryrefslogtreecommitdiff
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/Kconfig29
-rw-r--r--arch/sh/mm/Makefile_327
-rw-r--r--arch/sh/mm/asids-debugfs.c4
-rw-r--r--arch/sh/mm/ioremap_32.c8
-rw-r--r--arch/sh/mm/pmb-fixed.c45
-rw-r--r--arch/sh/mm/pmb.c38
-rw-r--r--arch/sh/mm/tlb-pteaex.c96
7 files changed, 218 insertions, 9 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 555ec9714b9e..10c24356d2d5 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -57,7 +57,7 @@ config 32BIT
bool
default y if CPU_SH5
-config PMB
+config PMB_ENABLE
bool "Support 32-bit physical addressing through PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
select 32BIT
@@ -67,6 +67,33 @@ config PMB
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
+choice
+ prompt "PMB handling type"
+ depends on PMB_ENABLE
+ default PMB_FIXED
+
+config PMB
+ bool "PMB"
+ depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
+ select 32BIT
+ help
+ If you say Y here, physical addressing will be extended to
+ 32-bits through the SH-4A PMB. If this is not set, legacy
+ 29-bit physical addressing will be used.
+
+config PMB_FIXED
+ bool "fixed PMB"
+ depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \
+ CPU_SUBTYPE_SH7785)
+ select 32BIT
+ help
+ If this option is enabled, fixed PMB mappings are inherited
+ from the boot loader, and the kernel does not attempt dynamic
+ management. This is the closest to legacy 29-bit physical mode,
+ and allows systems to support up to 512MiB of system memory.
+
+endchoice
+
config X2TLB
bool "Enable extended TLB mode"
depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
index cb2f3f299591..986a1e055834 100644
--- a/arch/sh/mm/Makefile_32
+++ b/arch/sh/mm/Makefile_32
@@ -25,8 +25,10 @@ obj-$(CONFIG_CPU_SH4) += cache-debugfs.o
endif
ifdef CONFIG_MMU
-obj-$(CONFIG_CPU_SH3) += tlb-sh3.o
-obj-$(CONFIG_CPU_SH4) += tlb-sh4.o
+tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
+tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o
+tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o
+obj-y += $(tlb-y)
ifndef CONFIG_CACHE_OFF
obj-$(CONFIG_CPU_SH4) += pg-sh4.o
obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o
@@ -35,6 +37,7 @@ endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o
+obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o
obj-$(CONFIG_NUMA) += numa.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/sh/mm/asids-debugfs.c b/arch/sh/mm/asids-debugfs.c
index 8e912a15e94f..cd8c3bf39b5a 100644
--- a/arch/sh/mm/asids-debugfs.c
+++ b/arch/sh/mm/asids-debugfs.c
@@ -37,10 +37,8 @@ static int asids_seq_show(struct seq_file *file, void *iter)
continue;
if (p->mm)
- seq_printf(file, "%5d : %02lx\n", pid,
+ seq_printf(file, "%5d : %04lx\n", pid,
cpu_asid(smp_processor_id(), p->mm));
- else
- seq_printf(file, "%5d : (none)\n", pid);
}
read_unlock(&tasklist_lock);
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c
index 32946fba123e..60cc486d2c2c 100644
--- a/arch/sh/mm/ioremap_32.c
+++ b/arch/sh/mm/ioremap_32.c
@@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
return (void __iomem *)phys_addr;
+#if !defined(CONFIG_PMB_FIXED)
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if (phys_addr < virt_to_phys(high_memory))
return NULL;
+#endif
/*
* Mappings have to be page-aligned
@@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr;
-#ifdef CONFIG_32BIT
+#ifdef CONFIG_PMB
/*
* First try to remap through the PMB once a valid VMA has been
* established. Smaller allocations (or the rest of the size
@@ -119,10 +121,10 @@ void __iounmap(void __iomem *addr)
unsigned long seg = PXSEG(vaddr);
struct vm_struct *p;
- if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr))
+ if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr))
return;
-#ifdef CONFIG_32BIT
+#ifdef CONFIG_PMB
/*
* Purge any PMB entries that may have been established for this
* mapping, then proceed with conventional VMA teardown.
diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c
new file mode 100644
index 000000000000..43c8eac4d8a1
--- /dev/null
+++ b/arch/sh/mm/pmb-fixed.c
@@ -0,0 +1,45 @@
+/*
+ * arch/sh/mm/fixed_pmb.c
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/mmu.h>
+#include <asm/mmu_context.h>
+
+static int __uses_jump_to_uncached fixed_pmb_init(void)
+{
+ int i;
+ unsigned long addr, data;
+
+ jump_to_uncached();
+
+ for (i = 0; i < PMB_ENTRY_MAX; i++) {
+ addr = PMB_DATA + (i << PMB_E_SHIFT);
+ data = ctrl_inl(addr);
+ if (!(data & PMB_V))
+ continue;
+
+ if (data & PMB_C) {
+#if defined(CONFIG_CACHE_WRITETHROUGH)
+ data |= PMB_WT;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ data &= ~PMB_WT;
+#else
+ data &= ~(PMB_C | PMB_WT);
+#endif
+ }
+ ctrl_outl(data, addr);
+ }
+
+ back_to_cached();
+
+ return 0;
+}
+arch_initcall(fixed_pmb_init);
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 84241676265e..b1a714a92b14 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -15,6 +15,8 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -402,3 +404,39 @@ static int __init pmb_debugfs_init(void)
return 0;
}
postcore_initcall(pmb_debugfs_init);
+
+#ifdef CONFIG_PM
+static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
+{
+ static pm_message_t prev_state;
+
+ /* Restore the PMB after a resume from hibernation */
+ if (state.event == PM_EVENT_ON &&
+ prev_state.event == PM_EVENT_FREEZE) {
+ struct pmb_entry *pmbe;
+ spin_lock_irq(&pmb_list_lock);
+ for (pmbe = pmb_list; pmbe; pmbe = pmbe->next)
+ set_pmb_entry(pmbe);
+ spin_unlock_irq(&pmb_list_lock);
+ }
+ prev_state = state;
+ return 0;
+}
+
+static int pmb_sysdev_resume(struct sys_device *dev)
+{
+ return pmb_sysdev_suspend(dev, PMSG_ON);
+}
+
+static struct sysdev_driver pmb_sysdev_driver = {
+ .suspend = pmb_sysdev_suspend,
+ .resume = pmb_sysdev_resume,
+};
+
+static int __init pmb_sysdev_init(void)
+{
+ return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
+}
+
+subsys_initcall(pmb_sysdev_init);
+#endif
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
new file mode 100644
index 000000000000..2aab3ea934d7
--- /dev/null
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -0,0 +1,96 @@
+/*
+ * arch/sh/mm/tlb-pteaex.c
+ *
+ * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
+ *
+ * Copyright (C) 2009 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ unsigned long pteval;
+ unsigned long vpn;
+
+ /* Ptrace may call this routine. */
+ if (vma && current->active_mm != vma->vm_mm)
+ return;
+
+#ifndef CONFIG_CACHE_OFF
+ {
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (!test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+ __flush_wback_region((void *)P1SEGADDR(phys),
+ PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
+ }
+ }
+#endif
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = address & MMU_VPN_MASK;
+ __raw_writel(vpn, MMU_PTEH);
+
+ /* Set PTEAEX */
+ __raw_writel(get_asid(), MMU_PTEAEX);
+
+ pteval = pte.pte_low;
+
+ /* Set PTEA register */
+#ifdef CONFIG_X2TLB
+ /*
+ * For the extended mode TLB this is trivial, only the ESZ and
+ * EPR bits need to be written out to PTEA, with the remainder of
+ * the protection bits (with the exception of the compat-mode SZ
+ * and PR bits, which are cleared) being written out in PTEL.
+ */
+ __raw_writel(pte.pte_high, MMU_PTEA);
+#endif
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+#ifdef CONFIG_CACHE_WRITETHROUGH
+ pteval |= _PAGE_WT;
+#endif
+ /* conveniently, we want all the software flags to be 0 anyway */
+ __raw_writel(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
+
+/*
+ * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
+ * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
+ * address arrays. In compat mode the second array is inaccessible, while
+ * in extended mode, the legacy 8-bit ASID field in address array 1 has
+ * undefined behaviour.
+ */
+void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
+ unsigned long page)
+{
+ jump_to_uncached();
+ __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
+ __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
+ back_to_cached();
+}