summaryrefslogtreecommitdiff
path: root/arch/ia64/include/asm/gcc_intrin.h
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2009-05-23 00:49:49 +0400
committerTony Luck <tony.luck@intel.com>2009-06-17 20:33:49 +0400
commite088a4ad7fa53c3dc3c29f930025f41ccf01953e (patch)
tree07b012952bbbaccfe4ef3bb44b1ea0a3a3bb3868 /arch/ia64/include/asm/gcc_intrin.h
parente56e2dcd381d9ec35379328f332221581eda4787 (diff)
downloadlinux-e088a4ad7fa53c3dc3c29f930025f41ccf01953e.tar.xz
[IA64] Convert ia64 to use int-ll64.h
It is generally agreed that it would be beneficial for u64 to be an unsigned long long on all architectures. ia64 (in common with several other 64-bit architectures) currently uses unsigned long. Migrating piecemeal is too painful; this giant patch fixes all compilation warnings and errors that come as a result of switching to use int-ll64.h. Note that userspace will still see __u64 defined as unsigned long. This is important as it affects C++ name mangling. [Updated by Tony Luck to change efi.h:efi_freemem_callback_t to use u64 for start/end rather than unsigned long] Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/include/asm/gcc_intrin.h')
-rw-r--r--arch/ia64/include/asm/gcc_intrin.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/ia64/include/asm/gcc_intrin.h b/arch/ia64/include/asm/gcc_intrin.h
index c2c5fd8fcac4..21ddee54adae 100644
--- a/arch/ia64/include/asm/gcc_intrin.h
+++ b/arch/ia64/include/asm/gcc_intrin.h
@@ -388,7 +388,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
#define ia64_native_thash(addr) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
ia64_intri_res; \
})
@@ -419,7 +419,7 @@ register unsigned long ia64_r13 asm ("r13") __used;
#define ia64_tpa(addr) \
({ \
- __u64 ia64_pa; \
+ unsigned long ia64_pa; \
asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
ia64_pa; \
})
@@ -444,35 +444,35 @@ register unsigned long ia64_r13 asm ("r13") __used;
#define ia64_native_get_cpuid(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
ia64_intri_res; \
})
#define __ia64_get_dbr(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_ibr(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pkr(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_get_pmc(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
@@ -480,14 +480,14 @@ register unsigned long ia64_r13 asm ("r13") __used;
#define ia64_native_get_pmd(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
ia64_intri_res; \
})
#define ia64_native_get_rr(index) \
({ \
- __u64 ia64_intri_res; \
+ unsigned long ia64_intri_res; \
asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
ia64_intri_res; \
})