summaryrefslogtreecommitdiff
path: root/arch/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/include/asm/fixmap.h27
-rw-r--r--arch/xtensa/mm/highmem.c6
2 files changed, 27 insertions, 6 deletions
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 9f6c33d0428a..a43cd5265556 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -23,8 +23,8 @@
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of the consistent memory region backwards.
+ * in the boot process. We allocate these special addresses
+ * from the start of the consistent memory region upwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
@@ -47,7 +47,28 @@ enum fixed_addresses {
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
-#include <asm-generic/fixmap.h>
+#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+#endif
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel( \
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
index 17a8c0d6fd17..2e95a7665bf3 100644
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -28,9 +28,9 @@ void *kmap_atomic(struct page *page)
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte - idx)));
+ BUG_ON(!pte_none(*(kmap_pte + idx)));
#endif
- set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+ set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
return (void *)vaddr;
}
@@ -51,7 +51,7 @@ void __kunmap_atomic(void *kvaddr)
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
- pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+ pte_clear(&init_mm, kvaddr, kmap_pte + idx);
local_flush_tlb_kernel_range((unsigned long)kvaddr,
(unsigned long)kvaddr + PAGE_SIZE);