summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/copypage-v6.c6
-rw-r--r--arch/arm/mm/fault-armv.c31
-rw-r--r--arch/arm/mm/flush.c44
-rw-r--r--arch/arm/mm/ioremap.c47
5 files changed, 79 insertions, 51 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 3fefb43c67f7..95606b4a3ba6 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -62,7 +62,7 @@ config CPU_ARM720T
# ARM920T
config CPU_ARM920T
bool "Support ARM920T processor" if !ARCH_S3C2410
- depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX
+ depends on ARCH_INTEGRATOR || ARCH_S3C2410 || ARCH_IMX || ARCH_AAEC2000
default y if ARCH_S3C2410
select CPU_32v4
select CPU_ABRT_EV4T
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index a8c00236bd3d..27d041574ea7 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -30,8 +30,6 @@
static DEFINE_SPINLOCK(v6_lock);
-#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
-
/*
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
@@ -55,7 +53,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
*/
void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
- unsigned int offset = DCACHE_COLOUR(vaddr);
+ unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to;
/*
@@ -95,7 +93,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
*/
void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
- unsigned int offset = DCACHE_COLOUR(vaddr);
+ unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
/*
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 01967ddeef53..be4ab3d73c91 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -77,9 +77,8 @@ no_pmd:
}
static void
-make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
+make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
{
- struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
@@ -87,9 +86,6 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
pgoff_t pgoff;
int aliases = 0;
- if (!mapping)
- return;
-
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/*
@@ -115,9 +111,11 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
if (aliases)
adjust_pte(vma, addr);
else
- flush_cache_page(vma, addr, page_to_pfn(page));
+ flush_cache_page(vma, addr, pfn);
}
+void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
/*
* Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two
@@ -134,29 +132,22 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page,
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
+ struct address_space *mapping;
struct page *page;
if (!pfn_valid(pfn))
return;
+
page = pfn_to_page(pfn);
- if (page_mapping(page)) {
+ mapping = page_mapping(page);
+ if (mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
- if (dirty) {
- /*
- * This is our first userspace mapping of this page.
- * Ensure that the physical page is coherent with
- * the kernel mapping.
- *
- * FIXME: only need to do this on VIVT and aliasing
- * VIPT cache architectures. We can do that
- * by choosing whether to set this bit...
- */
- __cpuc_flush_dcache_page(page_address(page));
- }
+ if (dirty)
+ __flush_dcache_page(mapping, page);
if (cache_is_vivt())
- make_coherent(vma, addr, page, dirty);
+ make_coherent(mapping, vma, addr, pfn);
}
}
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 4085ed983e46..191788fb18d1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -37,13 +37,8 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif
-static void __flush_dcache_page(struct address_space *mapping, struct page *page)
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *mpnt;
- struct prio_tree_iter iter;
- pgoff_t pgoff;
-
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
@@ -52,24 +47,21 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
__cpuc_flush_dcache_page(page_address(page));
/*
- * If there's no mapping pointer here, then this page isn't
- * visible to userspace yet, so there are no cache lines
- * associated with any other aliases.
- */
- if (!mapping)
- return;
-
- /*
- * This is a page cache page. If we have a VIPT cache, we
- * only need to do one flush - which would be at the relevant
+ * If this is a page cache page, and we have an aliasing VIPT cache,
+ * we only need to do one flush - which would be at the relevant
* userspace colour, which is congruent with page->index.
*/
- if (cache_is_vipt()) {
- if (cache_is_vipt_aliasing())
- flush_pfn_alias(page_to_pfn(page),
- page->index << PAGE_CACHE_SHIFT);
- return;
- }
+ if (mapping && cache_is_vipt_aliasing())
+ flush_pfn_alias(page_to_pfn(page),
+ page->index << PAGE_CACHE_SHIFT);
+}
+
+static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
+{
+ struct mm_struct *mm = current->active_mm;
+ struct vm_area_struct *mpnt;
+ struct prio_tree_iter iter;
+ pgoff_t pgoff;
/*
* There are possible user space mappings of this page:
@@ -116,12 +108,12 @@ void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
- if (cache_is_vipt_nonaliasing())
- return;
-
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
- else
+ else {
__flush_dcache_page(mapping, page);
+ if (mapping && cache_is_vivt())
+ __flush_dcache_aliases(mapping, page);
+ }
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 00bb8fd37a59..7110e54182b1 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -170,3 +170,50 @@ void __iounmap(void __iomem *addr)
vfree((void *) (PAGE_MASK & (unsigned long) addr));
}
EXPORT_SYMBOL(__iounmap);
+
+#ifdef __io
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return __io(port);
+}
+EXPORT_SYMBOL(ioport_map);
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(ioport_unmap);
+#endif
+
+#ifdef CONFIG_PCI
+#include <linux/pci.h>
+#include <linux/ioport.h>
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap(start, len);
+ return ioremap_nocache(start, len);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap);
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ if ((unsigned long)addr >= VMALLOC_START &&
+ (unsigned long)addr < VMALLOC_END)
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+#endif