diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/cache.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/elf.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/futex.h | 49 | ||||
-rw-r--r-- | include/asm-sparc64/kprobes.h | 10 | ||||
-rw-r--r-- | include/asm-sparc64/mmu_context.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/mutex.h | 9 | ||||
-rw-r--r-- | include/asm-sparc64/oplib.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/processor.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 18 | ||||
-rw-r--r-- | include/asm-sparc64/thread_info.h | 6 | ||||
-rw-r--r-- | include/asm-sparc64/unistd.h | 23 |
12 files changed, 54 insertions, 74 deletions
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 11f5aa5d108c..25256bdc8aae 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_add_unless(v, a, u) \ ({ \ diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h index ade5ec3bfd5a..f7d35a2ae9b8 100644 --- a/include/asm-sparc64/cache.h +++ b/include/asm-sparc64/cache.h @@ -9,7 +9,6 @@ #define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */ #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) -#define L1_CACHE_SHIFT_MAX 5 /* largest L1 which this arch supports */ #define SMP_CACHE_BYTES_SHIFT 6 #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) /* L2 cache line size. */ diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 91458118277e..69539a8ab833 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h @@ -119,7 +119,7 @@ typedef struct { #endif #define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \ - ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; }) + ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; }) /* * This is used to ensure we don't load something for the wrong architecture. diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h index 9feff4ce1424..6a332a9f099c 100644 --- a/include/asm-sparc64/futex.h +++ b/include/asm-sparc64/futex.h @@ -1,53 +1,6 @@ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H -#ifdef __KERNEL__ +#include <asm-generic/futex.h> -#include <linux/futex.h> -#include <asm/errno.h> -#include <asm/uaccess.h> - -static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret; - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - inc_preempt_count(); - - switch (op) { - case FUTEX_OP_SET: - case FUTEX_OP_ADD: - case FUTEX_OP_OR: - case FUTEX_OP_ANDN: - case FUTEX_OP_XOR: - default: - ret = -ENOSYS; - } - - dec_preempt_count(); - - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} - -#endif #endif diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index 7ba845320f5c..e4efe652b54b 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h @@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t; #define MAX_INSN_SIZE 2 #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry +#define arch_remove_kprobe(p) do {} while (0) /* Architecture specific copy of original instruction*/ struct arch_specific_insn { @@ -38,15 +39,6 @@ struct kprobe_ctlblk { struct prev_kprobe prev_kprobe; }; -#ifdef CONFIG_KPROBES extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -#else /* !CONFIG_KPROBES */ -static inline int kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return 0; -} -#endif - #endif /* _SPARC64_KPROBES_H */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 08ba72d7722c..57ee7b306189 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -60,7 +60,7 @@ do { \ register unsigned long pgd_cache asm("o4"); \ paddr = __pa((__mm)->pgd); \ pgd_cache = 0UL; \ - if ((__tsk)->thread_info->flags & _TIF_32BIT) \ + if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ pgd_cache = get_pgd_cache((__mm)->pgd); \ __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ "mov %3, %%g4\n\t" \ diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h new file mode 100644 index 000000000000..458c1f7fbc18 --- /dev/null +++ b/include/asm-sparc64/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include <asm-generic/mutex-dec.h> diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index d02f1e8ae1a6..3c59b2693fb9 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -163,6 +163,7 @@ enum prom_input_device { PROMDEV_IKBD, /* input from keyboard */ PROMDEV_ITTYA, /* input from ttya */ PROMDEV_ITTYB, /* input from ttyb */ + PROMDEV_IRSC, /* input from rsc */ PROMDEV_I_UNK, }; @@ -174,6 +175,7 @@ enum prom_output_device { PROMDEV_OSCREEN, /* to screen */ PROMDEV_OTTYA, /* to ttya */ PROMDEV_OTTYB, /* to ttyb */ + PROMDEV_ORSC, /* to rsc */ PROMDEV_O_UNK, }; diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index 3169f3e2237e..cd8d9b4c8658 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long get_wchan(struct task_struct *task); -#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc) -#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP]) +#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) #define cpu_relax() barrier() diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index b5417529f6f1..af254e581834 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -193,11 +193,7 @@ do { \ * not preserve it's value. Hairy, but it lets us remove 2 loads * and 2 stores in this critical code path. -DaveM */ -#if __GNUC__ >= 3 #define EXTRA_CLOBBER ,"%l1" -#else -#define EXTRA_CLOBBER -#endif #define switch_to(prev, next, last) \ do { if (test_thread_flag(TIF_PERFCTR)) { \ unsigned long __tmp; \ @@ -212,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ - : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ + : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ __asm__ __volatile__( \ "mov %%g4, %%g7\n\t" \ "wrpr %%g0, 0x95, %%pstate\n\t" \ @@ -242,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "b,a ret_from_syscall\n\t" \ "1:\n\t" \ : "=&r" (last) \ - : "0" (next->thread_info), \ + : "0" (task_thread_info(next)), \ "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ "i" (TI_CWP), "i" (TI_TASK) \ : "cc", \ @@ -257,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ } \ } while(0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index c94d8b3991bd..ac9d068aab4f 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -221,7 +221,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); * nop */ #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ +#define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_PERFCTR 4 /* performance counters active */ @@ -241,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_POLLING_NRFLAG 14 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_PERFCTR (1<<TIF_PERFCTR) @@ -250,11 +249,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ - (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ + (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \ _TIF_NEED_RESCHED | _TIF_PERFCTR)) #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index 51ec2879b881..84ac2bdb0902 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -302,11 +302,26 @@ #define __NR_add_key 281 #define __NR_request_key 282 #define __NR_keyctl 283 +#define __NR_openat 284 +#define __NR_mkdirat 285 +#define __NR_mknodat 286 +#define __NR_fchownat 287 +#define __NR_futimesat 288 +#define __NR_newfstatat 289 +#define __NR_unlinkat 290 +#define __NR_renameat 291 +#define __NR_linkat 292 +#define __NR_symlinkat 293 +#define __NR_readlinkat 294 +#define __NR_fchmodat 295 +#define __NR_faccessat 296 +#define __NR_pselect6 297 +#define __NR_ppoll 298 -/* WARNING: You MAY NOT add syscall numbers larger than 283, since +/* WARNING: You MAY NOT add syscall numbers larger than 298, since * all of the syscall tables in the Sparc kernel are - * sized to have 283 entries (starting at zero). Therefore - * find a free slot in the 0-282 range. + * sized to have 298 entries (starting at zero). Therefore + * find a free slot in the 0-298 range. */ #define _syscall0(type,name) \ @@ -501,6 +516,8 @@ asmlinkage long sys_rt_sigaction(int sig, #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND #endif /* |