summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/bitops.h45
-rw-r--r--arch/ia64/include/asm/dma.h2
-rw-r--r--arch/ia64/include/asm/io.h8
-rw-r--r--arch/ia64/include/asm/mmu_context.h5
-rw-r--r--arch/ia64/include/asm/pci.h6
-rw-r--r--arch/ia64/include/asm/pgtable.h18
-rw-r--r--arch/ia64/include/asm/processor.h2
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h28
-rw-r--r--arch/ia64/kernel/smp.c6
-rw-r--r--arch/ia64/mm/fault.c4
-rw-r--r--arch/ia64/mm/init.c28
12 files changed, 72 insertions, 81 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cb93769a9f2a..26ac8ea15a9e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -39,7 +39,6 @@ config IA64
select HAVE_FUNCTION_DESCRIPTORS
select HAVE_VIRT_CPU_ACCOUNTING
select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE
- select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 577be93c0818..1accb7842f58 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -53,7 +53,7 @@ set_bit (int nr, volatile void *addr)
}
/**
- * __set_bit - Set a bit in memory
+ * arch___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -61,8 +61,8 @@ set_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void
-__set_bit (int nr, volatile void *addr)
+static __always_inline void
+arch___set_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
}
@@ -135,7 +135,7 @@ __clear_bit_unlock(int nr, void *addr)
}
/**
- * __clear_bit - Clears a bit in memory (non-atomic version)
+ * arch___clear_bit - Clears a bit in memory (non-atomic version)
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -143,8 +143,8 @@ __clear_bit_unlock(int nr, void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void
-__clear_bit (int nr, volatile void *addr)
+static __always_inline void
+arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
@@ -175,7 +175,7 @@ change_bit (int nr, volatile void *addr)
}
/**
- * __change_bit - Toggle a bit in memory
+ * arch___change_bit - Toggle a bit in memory
* @nr: the bit to toggle
* @addr: the address to start counting from
*
@@ -183,8 +183,8 @@ change_bit (int nr, volatile void *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static __inline__ void
-__change_bit (int nr, volatile void *addr)
+static __always_inline void
+arch___change_bit(unsigned long nr, volatile unsigned long *addr)
{
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
}
@@ -224,7 +224,7 @@ test_and_set_bit (int nr, volatile void *addr)
#define test_and_set_bit_lock test_and_set_bit
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * arch___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
@@ -232,8 +232,8 @@ test_and_set_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int
-__test_and_set_bit (int nr, volatile void *addr)
+static __always_inline bool
+arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
@@ -269,7 +269,7 @@ test_and_clear_bit (int nr, volatile void *addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * arch___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
@@ -277,8 +277,8 @@ test_and_clear_bit (int nr, volatile void *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static __inline__ int
-__test_and_clear_bit(int nr, volatile void * addr)
+static __always_inline bool
+arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * arch___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
*/
-static __inline__ int
-__test_and_change_bit (int nr, void *addr)
+static __always_inline bool
+arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__u32 old, bit = (1 << (nr & 31));
__u32 *m = (__u32 *) addr + (nr >> 5);
@@ -331,11 +331,8 @@ __test_and_change_bit (int nr, void *addr)
return (old & bit) != 0;
}
-static __inline__ int
-test_bit (int nr, const volatile void *addr)
-{
- return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
-}
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
/**
* ffz - find the first zero bit in a long word
@@ -443,6 +440,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
#ifdef __KERNEL__
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
+
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
index 59625e9c1f9c..eaed2626ffda 100644
--- a/arch/ia64/include/asm/dma.h
+++ b/arch/ia64/include/asm/dma.h
@@ -12,8 +12,6 @@
extern unsigned long MAX_DMA_ADDRESS;
-extern int isa_dma_bridge_buggy;
-
#define free_dma(x)
#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 6d93b923b379..ce66dfc0e719 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -96,14 +96,6 @@ extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
-/*
- * The following two macros are deprecated and scheduled for removal.
- * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
- */
-#define bus_to_virt phys_to_virt
-#define virt_to_bus virt_to_phys
-#define page_to_bus page_to_phys
-
# endif /* KERNEL */
/*
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 87a0d5bc11ef..06257e355d00 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -124,9 +124,12 @@ reload_context (nv_mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
- unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
+ unsigned long rr0, rr1, rr2, rr3, rr4;
+#ifdef CONFIG_HUGETLB_PAGE
+ unsigned long old_rr4;
old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
+#endif
rid = context << 3; /* make space for encoding the region number */
rid_incr = 1 << 8;
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 8c163d1d0189..fa8f545c24c9 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -63,10 +63,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
return (pci_domain_nr(bus) != 0);
}
-#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
-{
- return channel ? isa_irq_to_vector(15) : isa_irq_to_vector(14);
-}
-
#endif /* _ASM_IA64_PCI_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 7aa8f2330fb1..6925e28ae61d 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -161,24 +161,6 @@
* attempts to write to the page.
*/
/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
-#define __P011 PAGE_READONLY /* ditto */
-#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
-#define __S011 PAGE_SHARED
-#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
#if CONFIG_PGTABLE_LEVELS == 4
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 7cbce290f4e5..757c2f6d8d4b 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -538,7 +538,7 @@ ia64_get_irr(unsigned int vector)
{
unsigned int reg = vector / 64;
unsigned int bit = vector % 64;
- u64 irr;
+ unsigned long irr;
switch (reg) {
case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 2c2f3cfeaa77..ca2e02685343 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -33,24 +33,24 @@ extern void ia64_xchg_called_with_bad_pointer(void);
\
switch (size) { \
case 1: \
- __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ __xchg_result = ia64_xchg1((__u8 __force *)ptr, x); \
break; \
\
case 2: \
- __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ __xchg_result = ia64_xchg2((__u16 __force *)ptr, x); \
break; \
\
case 4: \
- __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ __xchg_result = ia64_xchg4((__u32 __force *)ptr, x); \
break; \
\
case 8: \
- __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ __xchg_result = ia64_xchg8((__u64 __force *)ptr, x); \
break; \
default: \
ia64_xchg_called_with_bad_pointer(); \
} \
- __xchg_result; \
+ (__typeof__ (*(ptr)) __force) __xchg_result; \
})
#ifndef __KERNEL__
@@ -76,42 +76,42 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
\
switch (size) { \
case 1: \
- _o_ = (__u8) (long) (old); \
+ _o_ = (__u8) (long __force) (old); \
break; \
case 2: \
- _o_ = (__u16) (long) (old); \
+ _o_ = (__u16) (long __force) (old); \
break; \
case 4: \
- _o_ = (__u32) (long) (old); \
+ _o_ = (__u32) (long __force) (old); \
break; \
case 8: \
- _o_ = (__u64) (long) (old); \
+ _o_ = (__u64) (long __force) (old); \
break; \
default: \
break; \
} \
switch (size) { \
case 1: \
- _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg1_##sem((__u8 __force *) ptr, new, _o_); \
break; \
\
case 2: \
- _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg2_##sem((__u16 __force *) ptr, new, _o_); \
break; \
\
case 4: \
- _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg4_##sem((__u32 __force *) ptr, new, _o_); \
break; \
\
case 8: \
- _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
+ _r_ = ia64_cmpxchg8_##sem((__u64 __force *) ptr, new, _o_); \
break; \
\
default: \
_r_ = ia64_cmpxchg_called_with_bad_pointer(); \
break; \
} \
- (__typeof__(old)) _r_; \
+ (__typeof__(old) __force) _r_; \
})
#define cmpxchg_acq(ptr, o, n) \
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 7b7b64eb3129..e2cc59db86bc 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -333,9 +333,3 @@ smp_send_stop (void)
{
send_IPI_allbutself(IPI_CPU_STOP);
}
-
-int
-setup_profiling_timer (unsigned int multiplier)
-{
- return -EINVAL;
-}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 07379d1a227f..ef78c2d66cdd 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -139,6 +139,10 @@ retry:
if (fault_signal_pending(fault, regs))
return;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 855d949d81df..fc4e4217e87f 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
- gate_vma.vm_page_prot = __P101;
+ gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
return 0;
}
@@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
__remove_pages(start_pfn, nr_pages, altmap);
}
#endif
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_READONLY,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_X_RX),
+ [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RX),
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_X_RX),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RX),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RWX),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+ _PAGE_AR_RWX)
+};
+DECLARE_VM_GET_PAGE_PROT