summaryrefslogtreecommitdiff
path: root/arch/tile/include
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2010-11-01 22:21:35 +0300
committerChris Metcalf <cmetcalf@tilera.com>2010-11-01 22:30:36 +0300
commit38a6f4266989c4dae68eccb1a5cb4580a48003e4 (patch)
treedba1fecb1c0631f4d9b3dfb9f56d11f92d905a4f /arch/tile/include
parentc8ddb2713c624f432fa5fe3c7ecffcdda46ea0d4 (diff)
downloadlinux-38a6f4266989c4dae68eccb1a5cb4580a48003e4.tar.xz
arch/tile: complete migration to new kmap_atomic scheme
This change makes KM_TYPE_NR independent of the actual deprecated list of km_type values, which are no longer used in tile code anywhere. For now we leave it set to 8, allowing that many nested mappings, and thus reserving 32MB of address space. A few remaining places using KM_* values were cleaned up as well. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include')
-rw-r--r--arch/tile/include/asm/highmem.h1
-rw-r--r--arch/tile/include/asm/kmap_types.h34
-rw-r--r--arch/tile/include/asm/pgtable.h6
3 files changed, 26 insertions, 15 deletions
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee186721..b2a6c5de79ab 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/threads.h>
-#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106d1c05..3d0f20246260 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
#define _ASM_TILE_KMAP_TYPES_H
/*
- * In TILE Linux each set of four of these uses another 16MB chunk of
- * address space, given 64 tiles and 64KB pages, so we only enable
- * ones that are required by the kernel configuration.
+ * In 32-bit TILE Linux we have to balance the desire to have a lot of
+ * nested atomic mappings with the fact that large page sizes and many
+ * processors chew up address space quickly. In a typical
+ * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
+ * adds 4MB of required address-space. For now we leave KM_TYPE_NR
+ * set to depth 8.
*/
enum km_type {
+ KM_TYPE_NR = 8
+};
+
+/*
+ * We provide dummy definitions of all the stray values that used to be
+ * required for kmap_atomic() and no longer are.
+ */
+enum {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
- KM_MEMCPY0,
- KM_MEMCPY1,
-#if defined(CONFIG_HIGHPTE)
- KM_PTE0,
- KM_PTE1,
-#endif
- KM_TYPE_NR
+ KM_SYNC_ICACHE,
+ KM_SYNC_DCACHE,
+ KM_UML_USERCOPY,
+ KM_IRQ_PTE,
+ KM_NMI,
+ KM_NMI_PTE,
+ KM_KDB
};
#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd855bc..a6604e9485da 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#if defined(CONFIG_HIGHPTE)
-extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
-#define pte_offset_map(dir, address) \
- _pte_offset_map(dir, address, KM_PTE0)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
+#define pte_unmap(pte) kunmap_atomic(pte)
#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)