summaryrefslogtreecommitdiff
path: root/arch/arm/include/asm/io.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2012-10-30 01:15:32 +0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-10-30 01:15:32 +0400
commit19bf7f8ac3f8131100027281c495dbbe00cd5ae0 (patch)
tree270b97e3ca47c0f62a1babca2ae37f79a76a309c /arch/arm/include/asm/io.h
parent787c57c0fb393fe8a3974d300ddcfe30373386fe (diff)
parent35fd3dc58da675d659513384221349ef90749a01 (diff)
downloadlinux-19bf7f8ac3f8131100027281c495dbbe00cd5ae0.tar.xz
Merge remote-tracking branch 'master' into queue
Merge reason: development work has dependency on kvm patches merged upstream. Conflicts: arch/powerpc/include/asm/Kbuild arch/powerpc/include/asm/kvm_para.h Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/arm/include/asm/io.h')
-rw-r--r--arch/arm/include/asm/io.h67
1 files changed, 61 insertions, 6 deletions
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 8f4db67533e5..35c1ed89b936 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -47,13 +47,68 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)))
-#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
-#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)))
+#if __LINUX_ARM_ARCH__ < 6
+/*
+ * Half-word accesses are problematic with RiscPC due to limitations of
+ * the bus. Rather than special-case the machine, just let the compiler
+ * generate the access for CPUs prior to ARMv6.
+ */
+#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
+#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#else
+/*
+ * When running under a hypervisor, we want to avoid I/O accesses with
+ * writeback addressing modes as these incur a significant performance
+ * overhead (the address generation must be emulated in software).
+ */
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("strh %1, %0"
+ : "+Qo" (*(volatile u16 __force *)addr)
+ : "r" (val));
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+ asm volatile("ldrh %1, %0"
+ : "+Qo" (*(volatile u16 __force *)addr),
+ "=r" (val));
+ return val;
+}
+#endif
-#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
-#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
-#define __raw_readl(a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a))
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("strb %1, %0"
+ : "+Qo" (*(volatile u8 __force *)addr)
+ : "r" (val));
+}
+
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("str %1, %0"
+ : "+Qo" (*(volatile u32 __force *)addr)
+ : "r" (val));
+}
+
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+ asm volatile("ldrb %1, %0"
+ : "+Qo" (*(volatile u8 __force *)addr),
+ "=r" (val));
+ return val;
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+ asm volatile("ldr %1, %0"
+ : "+Qo" (*(volatile u32 __force *)addr),
+ "=r" (val));
+ return val;
+}
/*
* Architecture ioremap implementation.