summaryrefslogtreecommitdiff
path: root/arch/tile/include/asm/pgtable.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-03-01 00:37:34 +0300
committerChris Metcalf <cmetcalf@tilera.com>2011-03-10 21:17:53 +0300
commit76c567fbba50c3da2f4d40e2e551bab26cfd4381 (patch)
tree6e3c92a266d0ec255e1930adf5ba5268cd71dee9 /arch/tile/include/asm/pgtable.h
parent09c17eab075ceeafb53935d858c575b6776394d1 (diff)
downloadlinux-76c567fbba50c3da2f4d40e2e551bab26cfd4381.tar.xz
arch/tile: support 4KB page size as well as 64KB
The Tilera architecture traditionally supports 64KB page sizes to improve TLB utilization and improve performance when the hardware is being used primarily to run a single application. For more generic server scenarios, it can be beneficial to run with 4KB page sizes, so this commit allows that to be specified (by modifying the arch/tile/include/hv/pagesize.h header). As part of this change, we also re-worked the PTE management slightly so that PTE writes all go through a __set_pte() function where we can do some additional validation. The set_pte_order() function was eliminated since the "order" argument wasn't being used. One bug uncovered was in the PCI DMA code, which wasn't properly flushing the specified range. This was benign with 64KB pages, but with 4KB pages we were getting some larger flushes wrong. The per-cpu memory reservation code also needed updating to conform with the newer percpu stuff; before it always chose 64KB, and that was always correct, but with 4KB granularity we now have to pay closer attention and reserve the amount of memory that will be requested when the percpu code starts allocating. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/pgtable.h')
-rw-r--r--arch/tile/include/asm/pgtable.h31
1 files changed, 12 insertions, 19 deletions
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index a6604e9485da..1a20b7ef8ea2 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -233,15 +233,23 @@ static inline void __pte_clear(pte_t *ptep)
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
+/* Return PA and protection info for a given kernel VA. */
+int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
+
+/*
+ * __set_pte() ensures we write the 64-bit PTE with 32-bit words in
+ * the right order on 32-bit platforms and also allows us to write
+ * hooks to check valid PTEs, etc., if we want.
+ */
+void __set_pte(pte_t *ptep, pte_t pte);
+
/*
- * set_pte_order() sets the given PTE and also sanity-checks the
+ * set_pte() sets the given PTE and also sanity-checks the
* requested PTE against the page homecaching. Unspecified parts
* of the PTE are filled in when it is written to memory, i.e. all
* caching attributes if "!forcecache", or the home cpu if "anyhome".
*/
-extern void set_pte_order(pte_t *ptep, pte_t pte, int order);
-
-#define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0)
+extern void set_pte(pte_t *ptep, pte_t pte);
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
@@ -293,21 +301,6 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
/*
- * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
- *
- * dst - pointer to pgd range anwhere on a pgd page
- * src - ""
- * count - the number of pgds to copy.
- *
- * dst and src can be on the same page, but the range must not overlap,
- * and must not cross a page boundary.
- */
-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
-{
- memcpy(dst, src, count * sizeof(pgd_t));
-}
-
-/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/