diff options
Diffstat (limited to 'arch/ia64/include')
-rw-r--r-- | arch/ia64/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/ia64/include/asm/bitops.h | 8 | ||||
-rw-r--r-- | arch/ia64/include/asm/dmi.h | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 5 | ||||
-rw-r--r-- | arch/ia64/include/asm/tlb.h | 9 |
5 files changed, 14 insertions, 11 deletions
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 05b03ecd7933..a3456f34f672 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -3,3 +3,4 @@ generic-y += clkdev.h generic-y += exec.h generic-y += kvm_para.h generic-y += trace_clock.h +generic-y += vtime.h
\ No newline at end of file diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 8e20bff39f79..c27eccd33349 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -425,13 +425,7 @@ __fls (unsigned long x) #include <asm-generic/bitops/fls64.h> -/* - * ffs: find first bit set. This is defined the same way as the libc and - * compiler builtin ffs routines, therefore differs in spirit from the above - * ffz (man ffs): it operates on "int" values only and the result value is the - * bit number + 1. ffs(0) is defined to return zero. - */ -#define ffs(x) __builtin_ffs(x) +#include <asm-generic/bitops/builtin-ffs.h> /* * hweightN: returns the hamming weight (i.e. the number diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h index 1ed4c8fedb83..185d3d18d0ec 100644 --- a/arch/ia64/include/asm/dmi.h +++ b/arch/ia64/include/asm/dmi.h @@ -7,6 +7,6 @@ /* Use normal IO mappings for DMI */ #define dmi_ioremap ioremap #define dmi_iounmap(x,l) iounmap(x) -#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) +#define dmi_alloc(l) kzalloc(l, GFP_ATOMIC) #endif diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 54ff557d474e..45698cd15b7b 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -102,6 +102,11 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } +static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK); +} + static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -22,7 +22,7 @@ * unmapping a portion of the virtual address space, these hooks are called according to * the following template: * - * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM + * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM * { * for each vma that needs a shootdown do { * tlb_start_vma(tlb, vma); @@ -58,6 +58,7 @@ struct mmu_gather { unsigned int max; unsigned char fullmm; /* non-zero means full mm flush */ unsigned char need_flush; /* really unmapped some PTEs? */ + unsigned long start, end; unsigned long start_addr; unsigned long end_addr; struct page **pages; @@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; tlb->nr = 0; - tlb->fullmm = full_mm_flush; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; tlb->start_addr = ~0UL; } |