diff options
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/arcregs.h | 10 | ||||
-rw-r--r-- | arch/arc/include/asm/atomic.h | 263 | ||||
-rw-r--r-- | arch/arc/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/dwarf.h | 38 | ||||
-rw-r--r-- | arch/arc/include/asm/elf.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/irqflags-arcv2.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/linkage.h | 12 | ||||
-rw-r--r-- | arch/arc/include/asm/perf_event.h | 3 | ||||
-rw-r--r-- | arch/arc/include/asm/uaccess.h | 11 |
9 files changed, 327 insertions, 17 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 7fbaea00a336..db25c65155cb 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -95,7 +95,7 @@ /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_INTR_VEC_BASE 0x25 -#define AUX_NON_VOL 0x5e +#define AUX_VOL 0x5e /* * Floating Pt Registers @@ -240,14 +240,6 @@ struct bcr_extn_xymem { #endif }; -struct bcr_perip { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int start:8, pad2:8, sz:8, ver:8; -#else - unsigned int ver:8, sz:8, pad2:8, start:8; -#endif -}; - struct bcr_iccm_arcompact { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int base:16, pad:5, sz:3, ver:8; diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 4e3c1b6b0806..b65930a49589 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -20,6 +20,7 @@ #ifndef CONFIG_ARC_PLAT_EZNPS #define atomic_read(v) READ_ONCE((v)->counter) +#define ATOMIC_INIT(i) { (i) } #ifdef CONFIG_ARC_HAS_LLSC @@ -284,6 +285,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) #define atomic_sub(i, v) atomic_add(-(i), (v)) #define atomic_sub_return(i, v) atomic_add_return(-(i), (v)) +#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v)) #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ @@ -292,6 +294,7 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) #define atomic_andnot(mask, v) atomic_and(~(mask), (v)) +#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v)) ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) @@ -343,10 +346,266 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) -#define ATOMIC_INIT(i) { (i) } + +#ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> -#endif +#else /* Kconfig ensures this is only enabled with needed h/w assist */ + +/* + * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD) + * - The address HAS to be 64-bit aligned + * - There are 2 semantics involved here: + * = exclusive implies no interim update between load/store to same addr + * = both words are observed/updated together: this is guaranteed even + * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set() + * is NOT required to use LLOCKD+SCONDD, STD suffices + */ + +typedef struct { + aligned_u64 counter; +} atomic64_t; + +#define ATOMIC64_INIT(a) { (a) } + +static inline long long atomic64_read(const atomic64_t *v) +{ + unsigned long long val; + + __asm__ __volatile__( + " ldd %0, [%1] \n" + : "=r"(val) + : "r"(&v->counter)); + + return val; +} + +static inline void atomic64_set(atomic64_t *v, long long a) +{ + /* + * This could have been a simple assignment in "C" but would need + * explicit volatile. Otherwise gcc optimizers could elide the store + * which borked atomic64 self-test + * In the inline asm version, memory clobber needed for exact same + * reason, to tell gcc about the store. + * + * This however is not needed for sibling atomic64_add() etc since both + * load/store are explicitly done in inline asm. As long as API is used + * for each access, gcc has no way to optimize away any load/store + */ + __asm__ __volatile__( + " std %0, [%1] \n" + : + : "r"(a), "r"(&v->counter) + : "memory"); +} + +#define ATOMIC64_OP(op, op1, op2) \ +static inline void atomic64_##op(long long a, atomic64_t *v) \ +{ \ + unsigned long long val; \ + \ + __asm__ __volatile__( \ + "1: \n" \ + " llockd %0, [%1] \n" \ + " " #op1 " %L0, %L0, %L2 \n" \ + " " #op2 " %H0, %H0, %H2 \n" \ + " scondd %0, [%1] \n" \ + " bnz 1b \n" \ + : "=&r"(val) \ + : "r"(&v->counter), "ir"(a) \ + : "cc"); \ +} \ + +#define ATOMIC64_OP_RETURN(op, op1, op2) \ +static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \ +{ \ + unsigned long long val; \ + \ + smp_mb(); \ + \ + __asm__ __volatile__( \ + "1: \n" \ + " llockd %0, [%1] \n" \ + " " #op1 " %L0, %L0, %L2 \n" \ + " " #op2 " %H0, %H0, %H2 \n" \ + " scondd %0, [%1] \n" \ + " bnz 1b \n" \ + : [val] "=&r"(val) \ + : "r"(&v->counter), "ir"(a) \ + : "cc"); /* memory clobber comes from smp_mb() */ \ + \ + smp_mb(); \ + \ + return val; \ +} + +#define ATOMIC64_FETCH_OP(op, op1, op2) \ +static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \ +{ \ + unsigned long long val, orig; \ + \ + smp_mb(); \ + \ + __asm__ __volatile__( \ + "1: \n" \ + " llockd %0, [%2] \n" \ + " " #op1 " %L1, %L0, %L3 \n" \ + " " #op2 " %H1, %H0, %H3 \n" \ + " scondd %1, [%2] \n" \ + " bnz 1b \n" \ + : "=&r"(orig), "=&r"(val) \ + : "r"(&v->counter), "ir"(a) \ + : "cc"); /* memory clobber comes from smp_mb() */ \ + \ + smp_mb(); \ + \ + return orig; \ +} + +#define ATOMIC64_OPS(op, op1, op2) \ + ATOMIC64_OP(op, op1, op2) \ + ATOMIC64_OP_RETURN(op, op1, op2) \ + ATOMIC64_FETCH_OP(op, op1, op2) + +#define atomic64_andnot atomic64_andnot + +ATOMIC64_OPS(add, add.f, adc) +ATOMIC64_OPS(sub, sub.f, sbc) +ATOMIC64_OPS(and, and, and) +ATOMIC64_OPS(andnot, bic, bic) +ATOMIC64_OPS(or, or, or) +ATOMIC64_OPS(xor, xor, xor) + +#undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +static inline long long +atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new) +{ + long long prev; + + smp_mb(); + + __asm__ __volatile__( + "1: llockd %0, [%1] \n" + " brne %L0, %L2, 2f \n" + " brne %H0, %H2, 2f \n" + " scondd %3, [%1] \n" + " bnz 1b \n" + "2: \n" + : "=&r"(prev) + : "r"(ptr), "ir"(expected), "r"(new) + : "cc"); /* memory clobber comes from smp_mb() */ + + smp_mb(); + + return prev; +} + +static inline long long atomic64_xchg(atomic64_t *ptr, long long new) +{ + long long prev; + + smp_mb(); + + __asm__ __volatile__( + "1: llockd %0, [%1] \n" + " scondd %2, [%1] \n" + " bnz 1b \n" + "2: \n" + : "=&r"(prev) + : "r"(ptr), "r"(new) + : "cc"); /* memory clobber comes from smp_mb() */ + + smp_mb(); + + return prev; +} + +/** + * atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic64_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ + +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long val; + + smp_mb(); + + __asm__ __volatile__( + "1: llockd %0, [%1] \n" + " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n" + " sub.c %H0, %H0, 1 # if C set, w1 - 1\n" + " brlt %H0, 0, 2f \n" + " scondd %0, [%1] \n" + " bnz 1b \n" + "2: \n" + : "=&r"(val) + : "r"(&v->counter) + : "cc"); /* memory clobber comes from smp_mb() */ + + smp_mb(); + + return val; +} + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * if (v != u) { v += a; ret = 1} else {ret = 0} + * Returns 1 iff @v was not @u (i.e. if add actually happened) + */ +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +{ + long long val; + int op_done; + + smp_mb(); + + __asm__ __volatile__( + "1: llockd %0, [%2] \n" + " mov %1, 1 \n" + " brne %L0, %L4, 2f # continue to add since v != u \n" + " breq.d %H0, %H4, 3f # return since v == u \n" + " mov %1, 0 \n" + "2: \n" + " add.f %L0, %L0, %L3 \n" + " adc %H0, %H0, %H3 \n" + " scondd %0, [%2] \n" + " bnz 1b \n" + "3: \n" + : "=&r"(val), "=&r" (op_done) + : "r"(&v->counter), "r"(a), "r"(u) + : "cc"); /* memory clobber comes from smp_mb() */ + + smp_mb(); + + return op_done; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#endif /* !CONFIG_GENERIC_ATOMIC64 */ + +#endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 23706c635c30..fb781e34f322 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -54,7 +54,7 @@ extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); extern int ioc_exists; -extern unsigned long perip_base; +extern unsigned long perip_base, perip_end; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h new file mode 100644 index 000000000000..bb7bdbc59a44 --- /dev/null +++ b/arch/arc/include/asm/dwarf.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_ARC_DWARF_H +#define _ASM_ARC_DWARF_H + +#ifdef __ASSEMBLY__ + +#ifdef ARC_DW2_UNWIND_AS_CFI + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_REGISTER .cfi_register +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_UNDEFINED .cfi_undefined + +#else + +#define CFI_IGNORE # + +#define CFI_STARTPROC CFI_IGNORE +#define CFI_ENDPROC CFI_IGNORE +#define CFI_DEF_CFA CFI_IGNORE +#define CFI_REGISTER CFI_IGNORE +#define CFI_REL_OFFSET CFI_IGNORE +#define CFI_UNDEFINED CFI_IGNORE + +#endif /* !ARC_DW2_UNWIND_AS_CFI */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ARC_DWARF_H */ diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index 51a99e25fe33..7096f97a1434 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h @@ -23,8 +23,7 @@ /* ARC Relocations (kernel Modules only) */ #define R_ARC_32 0x4 #define R_ARC_32_ME 0x1B -#define R_ARC_S25H_PCREL 0x10 -#define R_ARC_S25W_PCREL 0x11 +#define R_ARC_32_PCREL 0x31 /*to set parameters in the core dumps */ #define ELF_ARCH EM_ARCOMPACT diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index d1ec7f6b31e0..e880dfa3fcd3 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -112,7 +112,7 @@ static inline long arch_local_save_flags(void) */ temp = (1 << 5) | ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) | - (temp & CLRI_STATUS_E_MASK); + ((temp >> 1) & CLRI_STATUS_E_MASK); return temp; } diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h index 5faad17118b4..b29f1a9fd6f7 100644 --- a/arch/arc/include/asm/linkage.h +++ b/arch/arc/include/asm/linkage.h @@ -9,6 +9,8 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H +#include <asm/dwarf.h> + #ifdef __ASSEMBLY__ #define ASM_NL ` /* use '`' to mark new line in macro */ @@ -32,6 +34,16 @@ #endif .endm +#define ENTRY_CFI(name) \ + .globl name ASM_NL \ + ALIGN ASM_NL \ + name: ASM_NL \ + CFI_STARTPROC ASM_NL + +#define END_CFI(name) \ + CFI_ENDPROC ASM_NL \ + .size name, .-name + #else /* !__ASSEMBLY__ */ #ifdef CONFIG_ARC_HAS_ICCM diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 5f071762fb1c..9185541035cc 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -118,6 +118,9 @@ static const char * const arc_pmu_ev_hw_map[] = { [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */ + + [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */ + [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */ }; #define C(_x) PERF_COUNT_HW_CACHE_##_x diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index a78d5670884f..41faf17cd28d 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -83,7 +83,10 @@ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ - "3: mov %0, %3\n" \ + "3: # return -EFAULT\n" \ + " mov %0, %3\n" \ + " # zero out dst ptr\n" \ + " mov %1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ @@ -101,7 +104,11 @@ "2: ;nop\n" \ " .section .fixup, \"ax\"\n" \ " .align 4\n" \ - "3: mov %0, %3\n" \ + "3: # return -EFAULT\n" \ + " mov %0, %3\n" \ + " # zero out dst ptr\n" \ + " mov %1, 0\n" \ + " mov %R1, 0\n" \ " j 2b\n" \ " .previous\n" \ " .section __ex_table, \"a\"\n" \ |