diff options
author | Greentime Hu <greentime@andestech.com> | 2017-10-24 09:42:02 +0300 |
---|---|---|
committer | Greentime Hu <greentime@andestech.com> | 2018-02-22 05:44:31 +0300 |
commit | 2923f5ea7738cb9f5372bb9d7ac9886cf4193003 (patch) | |
tree | ee1811a5903a7cc76253c6fdb0161e150cfe80c9 /arch/nds32/kernel/ex-exit.S | |
parent | 001d953ede35dfb135e636af4b41d5dd20a09471 (diff) | |
download | linux-2923f5ea7738cb9f5372bb9d7ac9886cf4193003.tar.xz |
nds32: Exception handling
This patch includes the exception/interrupt entries, pt_reg structure and
related accessors.
/* Unaligned accessing handling*/
Andes processors cannot load/store information which is not naturally
aligned on the bus, i.e., loading a 4 byte data whose start address must
be divisible by 4. If unaligned data accessing is happened, data
unaligned exception will be triggered and user will get SIGSEGV or
kernel oops according to the unaligned address. In order to make user be
able to load/store data from an unaligned address, software load/store
emulation is implemented in arch/nds32/mm/alignment.c to address data
unaligned exception.
Unaligned accessing handling is disabled by default because it is not a
normal case. User can enable this feature by following steps.
A. Compile time:
1. Enable kernel config CONFIG_ALIGNMENT_TRAP
B. Run time:
1. Enter /proc/sys/nds32/unaligned_acess folder
2. Write 1 to file enable_mode to enable unaligned accessing
handling. User can disable it by writing 0 to this file.
3. Write 1 to file debug to show which unaligned address is under
processing. User can disable it by writing 0 to this file.
However, unaligned accessing handler cannot work if this unaligned
address is not accessible such as protection violation. On this
condition, the default behaviors for addressing data unaligned exception
still happen
Signed-off-by: Vincent Chen <vincentc@andestech.com>
Signed-off-by: Greentime Hu <greentime@andestech.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/nds32/kernel/ex-exit.S')
-rw-r--r-- | arch/nds32/kernel/ex-exit.S | 184 |
1 files changed, 184 insertions, 0 deletions
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S new file mode 100644 index 000000000000..03e4f7788a18 --- /dev/null +++ b/arch/nds32/kernel/ex-exit.S @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2005-2017 Andes Technology Corporation + +#include <linux/linkage.h> +#include <asm/unistd.h> +#include <asm/assembler.h> +#include <asm/nds32.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/current.h> + + + +#ifdef CONFIG_HWZOL + .macro pop_zol + mtusr $r14, $LB + mtusr $r15, $LE + mtusr $r16, $LC + .endm +#endif + + .macro restore_user_regs_first + setgie.d + isb + + addi $sp, $sp, FUCOP_CTL_OFFSET + + lmw.adm $r12, [$sp], $r24, #0x0 + mtsr $r12, $SP_USR + mtsr $r13, $IPC +#ifdef CONFIG_HWZOL + pop_zol +#endif + mtsr $r19, $PSW + mtsr $r20, $IPSW + mtsr $r21, $P_IPSW + mtsr $r22, $P_IPC + mtsr $r23, $P_P0 + mtsr $r24, $P_P1 + lmw.adm $sp, [$sp], $sp, #0xe + .endm + + .macro restore_user_regs_last + pop $p0 + cmovn $sp, $p0, $p0 + + iret + nop + + .endm + + .macro restore_user_regs + restore_user_regs_first + lmw.adm $r0, [$sp], $r25, #0x0 + addi $sp, $sp, OSP_OFFSET + restore_user_regs_last + .endm + + .macro fast_restore_user_regs + restore_user_regs_first + lmw.adm $r1, [$sp], $r25, #0x0 + addi $sp, $sp, OSP_OFFSET-4 + restore_user_regs_last + .endm + +#ifdef CONFIG_PREEMPT + .macro preempt_stop + .endm +#else + .macro preempt_stop + setgie.d + isb + .endm +#define resume_kernel no_work_pending +#endif + +ENTRY(ret_from_exception) + preempt_stop +ENTRY(ret_from_intr) + +/* + * judge Kernel or user mode + * + */ + lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt + andi $p0, $p0, #PSW_mskINTL + bnez $p0, resume_kernel ! done with iret + j resume_userspace + + +/* + * This is the fast syscall return path. We do as little as + * possible here, and this includes saving $r0 back into the SVC + * stack. + * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8 + */ +ENTRY(ret_fast_syscall) + gie_disable + lwi $r1, [tsk+#TSK_TI_FLAGS] + andi $p1, $r1, #_TIF_WORK_MASK + bnez $p1, fast_work_pending + fast_restore_user_regs ! iret + +/* + * Ok, we need to do extra processing, + * enter the slow path returning from syscall, while pending work. + */ +fast_work_pending: + swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception +work_pending: + andi $p1, $r1, #_TIF_NEED_RESCHED + bnez $p1, work_resched + + andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME + beqz $p1, no_work_pending + + move $r0, $sp ! 'regs' + gie_enable + bal do_notify_resume + b ret_slow_syscall +work_resched: + bal schedule ! path, return to user mode + +/* + * "slow" syscall return path. + */ +ENTRY(resume_userspace) +ENTRY(ret_slow_syscall) + gie_disable + lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt + andi $p0, $p0, #PSW_mskINTL + bnez $p0, no_work_pending ! done with iret + lwi $r1, [tsk+#TSK_TI_FLAGS] + andi $p1, $r1, #_TIF_WORK_MASK + bnez $p1, work_pending ! handle work_resched, sig_pend + +no_work_pending: +#ifdef CONFIG_TRACE_IRQFLAGS + lwi $p0, [$sp+(#IPSW_OFFSET)] + andi $p0, $p0, #0x1 + la $r10, trace_hardirqs_off + la $r9, trace_hardirqs_on + cmovz $r9, $p0, $r10 + jral $r9 +#endif + restore_user_regs ! return from iret + + +/* + * preemptive kernel + */ +#ifdef CONFIG_PREEMPT +resume_kernel: + gie_disable + lwi $t0, [tsk+#TSK_TI_PREEMPT] + bnez $t0, no_work_pending +need_resched: + lwi $t0, [tsk+#TSK_TI_FLAGS] + andi $p1, $t0, #_TIF_NEED_RESCHED + beqz $p1, no_work_pending + + lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off? + andi $t0, $t0, #1 + beqz $t0, no_work_pending + + jal preempt_schedule_irq + b need_resched +#endif + +/* + * This is how we return from a fork. + */ +ENTRY(ret_from_fork) + bal schedule_tail + beqz $r6, 1f ! r6 stores fn for kernel thread + move $r0, $r7 ! prepare kernel thread arg + jral $r6 +1: + lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing + andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls? + beqz $p1, ret_slow_syscall + move $r0, $sp + bal syscall_trace_leave + b ret_slow_syscall |