summaryrefslogtreecommitdiff
path: root/arch/arc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/include')
-rw-r--r--arch/arc/include/asm/arcregs.h14
-rw-r--r--arch/arc/include/asm/bitops.h31
-rw-r--r--arch/arc/include/asm/perf_event.h70
-rw-r--r--arch/arc/include/asm/processor.h14
-rw-r--r--arch/arc/include/asm/stacktrace.h37
-rw-r--r--arch/arc/include/asm/thread_info.h2
6 files changed, 104 insertions, 64 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index be33db8a2ee3..e2b1b1211b0d 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -30,6 +30,7 @@
#define ARC_REG_D_UNCACH_BCR 0x6A
#define ARC_REG_BPU_BCR 0xc0
#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_RTT_BCR 0xF2
#define ARC_REG_SMART_BCR 0xFF
/* status32 Bits Positions */
@@ -50,11 +51,7 @@
* [15: 8] = Exception Cause Code
* [ 7: 0] = Exception Parameters (for certain types only)
*/
-#define ECR_VEC_MASK 0xff0000
-#define ECR_CODE_MASK 0x00ff00
-#define ECR_PARAM_MASK 0x0000ff
-
-/* Exception Cause Vector Values */
+#define ECR_V_MEM_ERR 0x01
#define ECR_V_INSN_ERR 0x02
#define ECR_V_MACH_CHK 0x20
#define ECR_V_ITLB_MISS 0x21
@@ -62,7 +59,8 @@
#define ECR_V_PROTV 0x23
#define ECR_V_TRAP 0x25
-/* Protection Violation Exception Cause Code Values */
+/* DTLB Miss and Protection Violation Cause Codes */
+
#define ECR_C_PROTV_INST_FETCH 0x00
#define ECR_C_PROTV_LOAD 0x01
#define ECR_C_PROTV_STORE 0x02
@@ -173,11 +171,11 @@
} \
}
-#define WRITE_BCR(reg, into) \
+#define WRITE_AUX(reg, into) \
{ \
unsigned int tmp; \
if (sizeof(tmp) == sizeof(into)) { \
- tmp = (*(unsigned int *)(into)); \
+ tmp = (*(unsigned int *)&(into)); \
write_aux_reg(reg, tmp); \
} else { \
extern void bogus_undefined(void); \
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 1a5bf07eefe2..4051e9525939 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -32,6 +32,20 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *m)
m += nr >> 5;
+ /*
+ * ARC ISA micro-optimization:
+ *
+ * Instructions dealing with bitpos only consider lower 5 bits (0-31)
+ * e.g (x << 33) is handled like (x << 1) by ASL instruction
+ * (mem pointer still needs adjustment to point to next word)
+ *
+ * Hence the masking to clamp @nr arg can be elided in general.
+ *
+ * However if @nr is a constant (above assumed it in a register),
+ * and greater than 31, gcc can optimize away (x << 33) to 0,
+ * as overflow, given the 32-bit ISA. Thus masking needs to be done
+ * for constant @nr, but no code is generated due to const prop.
+ */
if (__builtin_constant_p(nr))
nr &= 0x1f;
@@ -374,29 +388,20 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *m)
* This routine doesn't need to be atomic.
*/
static inline int
-__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
- return ((1UL << (nr & 31)) &
- (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int
-__test_bit(unsigned int nr, const volatile unsigned long *addr)
+test_bit(unsigned int nr, const volatile unsigned long *addr)
{
unsigned long mask;
addr += nr >> 5;
- /* ARC700 only considers 5 bits in bit-fiddling insn */
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
mask = 1 << nr;
return ((mask & *addr) != 0);
}
-#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
- __constant_test_bit((nr), (addr)) : \
- __test_bit((nr), (addr)))
-
/*
* Count the number of zeros, starting from MSB
* Helper for fls( ) friends
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index cbf755e32a03..2b8880e953a2 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -54,29 +54,13 @@ struct arc_reg_cc_build {
#define PERF_COUNT_ARC_BPOK (PERF_COUNT_HW_MAX + 3)
#define PERF_COUNT_ARC_EDTLB (PERF_COUNT_HW_MAX + 4)
#define PERF_COUNT_ARC_EITLB (PERF_COUNT_HW_MAX + 5)
-#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_LDC (PERF_COUNT_HW_MAX + 6)
+#define PERF_COUNT_ARC_STC (PERF_COUNT_HW_MAX + 7)
+
+#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
/*
- * The "generalized" performance events seem to really be a copy
- * of the available events on x86 processors; the mapping to ARC
- * events is not always possible 1-to-1. Fortunately, there doesn't
- * seem to be an exact definition for these events, so we can cheat
- * a bit where necessary.
- *
- * In particular, the following PERF events may behave a bit differently
- * compared to other architectures:
- *
- * PERF_COUNT_HW_CPU_CYCLES
- * Cycles not in halted state
- *
- * PERF_COUNT_HW_REF_CPU_CYCLES
- * Reference cycles not in halted state, same as PERF_COUNT_HW_CPU_CYCLES
- * for now as we don't do Dynamic Voltage/Frequency Scaling (yet)
- *
- * PERF_COUNT_HW_BUS_CYCLES
- * Unclear what this means, Intel uses 0x013c, which according to
- * their datasheet means "unhalted reference cycles". It sounds similar
- * to PERF_COUNT_HW_REF_CPU_CYCLES, and we use the same counter for it.
+ * Some ARC pct quirks:
*
* PERF_COUNT_HW_STALLED_CYCLES_BACKEND
* PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
@@ -91,21 +75,38 @@ struct arc_reg_cc_build {
* Note that I$ cache misses aren't counted by either of the two!
*/
+/*
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
+ * (based on a specific RTL build)
+ * Below is the static map between perf generic/arc specific event_id and
+ * h/w condition names.
+ * At the time of probe, we loop thru each index and find it's name to
+ * complete the mapping of perf event_id to h/w index as latter is needed
+ * to program the counter really
+ */
static const char * const arc_pmu_ev_hw_map[] = {
+ /* count cycles */
[PERF_COUNT_HW_CPU_CYCLES] = "crun",
[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
[PERF_COUNT_HW_BUS_CYCLES] = "crun",
- [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail",
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
- [PERF_COUNT_ARC_DCLM] = "dclm",
- [PERF_COUNT_ARC_DCSM] = "dcsm",
- [PERF_COUNT_ARC_ICM] = "icm",
- [PERF_COUNT_ARC_BPOK] = "bpok",
- [PERF_COUNT_ARC_EDTLB] = "edtlb",
- [PERF_COUNT_ARC_EITLB] = "eitlb",
+
+ /* counts condition */
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
+
+ [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
+
+ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
+ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
+ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
+ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
+ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
};
#define C(_x) PERF_COUNT_HW_CACHE_##_x
@@ -114,11 +115,11 @@ static const char * const arc_pmu_ev_hw_map[] = {
static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
},
[C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
},
[C(OP_PREFETCH)] = {
@@ -128,7 +129,7 @@ static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
[C(L1I)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
[C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
},
[C(OP_WRITE)] = {
@@ -156,9 +157,10 @@ static const unsigned arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
[C(DTLB)] = {
[C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
},
+ /* DTLB LD/ST Miss not segregated by h/w*/
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 4e547296831d..52312cb5dbe2 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -47,9 +47,6 @@ struct thread_struct {
/* Forward declaration, a strange C thing */
struct task_struct;
-/* Return saved PC of a blocked thread */
-unsigned long thread_saved_pc(struct task_struct *t);
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
@@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t);
#define release_segments(mm) do { } while (0)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/*
* Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
-#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define TSK_K_ESP(tsk) (tsk->thread.ksp)
-#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
+#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
-#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
-#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
+#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
+#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
+
+#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
extern void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long usp);
diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h
new file mode 100644
index 000000000000..b29b6064ea14
--- /dev/null
+++ b/arch/arc/include/asm/stacktrace.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_STACKTRACE_H
+#define __ASM_STACKTRACE_H
+
+#include <linux/sched.h>
+
+/**
+ * arc_unwind_core - Unwind the kernel mode stack for an execution context
+ * @tsk: NULL for current task, specific task otherwise
+ * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC}
+ * If NULL, use pt_regs of @tsk (if !NULL) otherwise
+ * use the current values of {SP, FP, BLINK, PC}
+ * @consumer_fn: Callback invoked for each frame unwound
+ * Returns 0 to continue unwinding, -1 to stop
+ * @arg: Arg to callback
+ *
+ * Returns the address of first function in stack
+ *
+ * Semantics:
+ * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL
+ * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL
+ * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL
+ */
+notrace noinline unsigned int arc_unwind_core(
+ struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *),
+ void *arg);
+
+#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 1163a1838ac1..aca0d5a45c7b 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -43,7 +43,6 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct task_struct *task; /* main task structure */
mm_segment_t addr_limit; /* thread address space */
- struct exec_domain *exec_domain;/* execution domain */
__u32 cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
};
@@ -56,7 +55,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \