diff options
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 86 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 4 | ||||
-rw-r--r-- | arch/arc/include/asm/delay.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-compact.h | 6 | ||||
-rw-r--r-- | arch/arc/include/asm/entry.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/kprobes.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/mach_desc.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 2 |
9 files changed, 31 insertions, 79 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 11859287c52a..4e0072730241 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot +#define atomic_andnot atomic_andnot +#define atomic_fetch_andnot atomic_fetch_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ @@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) -#define atomic_andnot(mask, v) atomic_and(~(mask), (v)) -#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v)) ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) @@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v - */ -#define __atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - \ - /* \ - * Explicit full memory barrier needed before/after as \ - * LLOCK/SCOND thmeselves don't provide any such semantics \ - */ \ - smp_mb(); \ - \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ - c = old; \ - \ - smp_mb(); \ - \ - c; \ -}) - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) - -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) - - #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> @@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot +#define atomic64_andnot atomic64_andnot +#define atomic64_fetch_andnot atomic64_fetch_andnot ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(sub, sub.f, sbc) @@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) return val; } +#define atomic64_dec_if_positive atomic64_dec_if_positive /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * if (v != u) { v += a; ret = 1} else {ret = 0} - * Returns 1 iff @v was not @u (i.e. if add actually happened) + * Atomically adds @a to @v, if it was not @u. + * Returns the old value of @v */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) { - long long val; - int op_done; + long long old, temp; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%2] \n" - " mov %1, 1 \n" " brne %L0, %L4, 2f # continue to add since v != u \n" " breq.d %H0, %H4, 3f # return since v == u \n" - " mov %1, 0 \n" "2: \n" - " add.f %L0, %L0, %L3 \n" - " adc %H0, %H0, %H3 \n" - " scondd %0, [%2] \n" + " add.f %L1, %L0, %L3 \n" + " adc %H1, %H0, %H3 \n" + " scondd %1, [%2] \n" " bnz 1b \n" "3: \n" - : "=&r"(val), "=&r" (op_done) + : "=&r"(old), "=&r" (temp) : "r"(&v->counter), "r"(a), "r"(u) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); - return op_done; + return old; } - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 8486f328cc5d..ff7d3232764a 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -48,7 +48,9 @@ }) /* Largest line length for either L1 or L2 is 128 bytes */ -#define ARCH_DMA_MINALIGN 128 +#define SMP_CACHE_BYTES 128 +#define cache_line_size() SMP_CACHE_BYTES +#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index d5da2115d78a..03d6bb0f4e13 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h @@ -17,8 +17,11 @@ #ifndef __ASM_ARC_UDELAY_H #define __ASM_ARC_UDELAY_H +#include <asm-generic/types.h> #include <asm/param.h> /* HZ */ +extern unsigned long loops_per_jiffy; + static inline void __delay(unsigned long loops) { __asm__ __volatile__( diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index ec36d5b6d435..29f3988c9424 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -234,6 +234,9 @@ POP gp RESTORE_R12_TO_R0 +#ifdef CONFIG_ARC_CURR_IN_REG + ld r25, [sp, 12] +#endif ld sp, [sp] /* restore original sp */ /* orig_r0, ECR, user_r25 skipped automatically */ .endm @@ -315,6 +318,9 @@ POP gp RESTORE_R12_TO_R0 +#ifdef CONFIG_ARC_CURR_IN_REG + ld r25, [sp, 12] +#endif ld sp, [sp] /* restore original sp */ /* orig_r0, ECR, user_r25 skipped automatically */ .endm diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 51597f344a62..302b0db8ea2b 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -86,9 +86,6 @@ POP r1 POP r0 -#ifdef CONFIG_ARC_CURR_IN_REG - ld r25, [sp, 12] -#endif .endm /*-------------------------------------------------------------- diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h index 2e52d18e6bc7..2c1b479d5aea 100644 --- a/arch/arc/include/asm/kprobes.h +++ b/arch/arc/include/asm/kprobes.h @@ -45,8 +45,6 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned int kprobe_status; - struct pt_regs jprobe_saved_regs; - char jprobes_stack[MAX_STACK_SIZE]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index c28e6c347b49..871f3cb16af9 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -34,9 +34,7 @@ struct machine_desc { const char *name; const char **dt_compat; void (*init_early)(void); -#ifdef CONFIG_SMP void (*init_per_cpu)(unsigned int); -#endif void (*init_machine)(void); void (*init_late)(void); diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 109baa06831c..09ddddf71cc5 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -105,7 +105,7 @@ typedef pte_t * pgtable_t; #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) /* Default Permissions for stack/heaps pages (Non Executable) */ -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE) +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define WANT_PAGE_VIRTUAL 1 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 8ec5599a0957..cf4be70d5892 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, /* Decode a PTE containing swap "identifier "into constituents */ #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) -#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13) +#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) /* NOPs, to keep generic kernel happy */ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |