summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2013-10-12 04:22:37 +0400
committerScott Wood <scottwood@freescale.com>2014-01-10 03:52:19 +0400
commit47ce8af4209f4344f152aa6fc538efe9d6bdfd1a (patch)
tree933b09da56b9b015bce4b9c5e9e5533cd00eb4a5 /arch/powerpc/mm
parentdde7dd3d67728418bc61cee424fcd9041058cf3f (diff)
downloadlinux-47ce8af4209f4344f152aa6fc538efe9d6bdfd1a.tar.xz
powerpc: add barrier after writing kernel PTE
There is no barrier between something like ioremap() writing to a PTE, and returning the value to a caller that may then store the pointer in a place that is visible to other CPUs. Such callers generally don't perform barriers of their own. Even if callers of ioremap() and similar things did use barriers, the most logical choise would be smp_wmb(), which is not architecturally sufficient when BookE hardware tablewalk is used. A full sync is specified by the architecture. For userspace mappings, OTOH, we generally already have an lwsync due to locking, and if we occasionally take a spurious fault due to not having a full sync with hardware tablewalk, it will not be fatal because we will retry rather than oops. Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/pgtable_32.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c12
2 files changed, 13 insertions, 0 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5b9601715289..343a87fa78b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -299,6 +299,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
+ smp_wmb();
return err;
}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 02e8681fb865..755138218e04 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -153,6 +153,18 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
}
#endif /* !CONFIG_PPC_MMU_NOHASH */
}
+
+#ifdef CONFIG_PPC_BOOK3E_64
+ /*
+ * With hardware tablewalk, a sync is needed to ensure that
+ * subsequent accesses see the PTE we just wrote. Unlike userspace
+ * mappings, we can't tolerate spurious faults, so make sure
+ * the new PTE will be seen the first time.
+ */
+ mb();
+#else
+ smp_wmb();
+#endif
return 0;
}