summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r--arch/sparc/include/asm/atomic_64.h4
-rw-r--r--arch/sparc/include/asm/backoff.h69
-rw-r--r--arch/sparc/include/asm/compat.h5
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/include/asm/processor_64.h28
-rw-r--r--arch/sparc/include/asm/prom.h8
-rw-r--r--arch/sparc/include/asm/ptrace.h23
-rw-r--r--arch/sparc/include/asm/smp_64.h2
-rw-r--r--arch/sparc/include/asm/switch_to_64.h2
-rw-r--r--arch/sparc/include/asm/syscalls.h2
-rw-r--r--arch/sparc/include/asm/thread_info_64.h30
-rw-r--r--arch/sparc/include/asm/ttable.h24
-rw-r--r--arch/sparc/include/asm/uaccess_64.h4
-rw-r--r--arch/sparc/include/asm/unistd.h1
14 files changed, 156 insertions, 47 deletions
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index ce35a1cf1a20..be56a244c9cf 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -1,7 +1,7 @@
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
- * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
@@ -106,6 +106,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+extern long atomic64_dec_if_positive(atomic64_t *v);
+
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
diff --git a/arch/sparc/include/asm/backoff.h b/arch/sparc/include/asm/backoff.h
index db3af0d30fb1..4e02086b839c 100644
--- a/arch/sparc/include/asm/backoff.h
+++ b/arch/sparc/include/asm/backoff.h
@@ -1,6 +1,46 @@
#ifndef _SPARC64_BACKOFF_H
#define _SPARC64_BACKOFF_H
+/* The macros in this file implement an exponential backoff facility
+ * for atomic operations.
+ *
+ * When multiple threads compete on an atomic operation, it is
+ * possible for one thread to be continually denied a successful
+ * completion of the compare-and-swap instruction. Heavily
+ * threaded cpu implementations like Niagara can compound this
+ * problem even further.
+ *
+ * When an atomic operation fails and needs to be retried, we spin a
+ * certain number of times. At each subsequent failure of the same
+ * operation we double the spin count, realizing an exponential
+ * backoff.
+ *
+ * When we spin, we try to use an operation that will cause the
+ * current cpu strand to block, and therefore make the core fully
+ * available to any other other runnable strands. There are two
+ * options, based upon cpu capabilities.
+ *
+ * On all cpus prior to SPARC-T4 we do three dummy reads of the
+ * condition code register. Each read blocks the strand for something
+ * between 40 and 50 cpu cycles.
+ *
+ * For SPARC-T4 and later we have a special "pause" instruction
+ * available. This is implemented using writes to register %asr27.
+ * The cpu will block the number of cycles written into the register,
+ * unless a disrupting trap happens first. SPARC-T4 specifically
+ * implements pause with a granularity of 8 cycles. Each strand has
+ * an internal pause counter which decrements every 8 cycles. So the
+ * chip shifts the %asr27 value down by 3 bits, and writes the result
+ * into the pause counter. If a value smaller than 8 is written, the
+ * chip blocks for 1 cycle.
+ *
+ * To achieve the same amount of backoff as the three %ccr reads give
+ * on earlier chips, we shift the backoff value up by 7 bits. (Three
+ * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
+ * whole amount we want to block into the pause register, rather than
+ * loop writing 128 each time.
+ */
+
#define BACKOFF_LIMIT (4 * 1024)
#ifdef CONFIG_SMP
@@ -11,16 +51,25 @@
#define BACKOFF_LABEL(spin_label, continue_label) \
spin_label
-#define BACKOFF_SPIN(reg, tmp, label) \
- mov reg, tmp; \
-88: brnz,pt tmp, 88b; \
- sub tmp, 1, tmp; \
- set BACKOFF_LIMIT, tmp; \
- cmp reg, tmp; \
- bg,pn %xcc, label; \
- nop; \
- ba,pt %xcc, label; \
- sllx reg, 1, reg;
+#define BACKOFF_SPIN(reg, tmp, label) \
+ mov reg, tmp; \
+88: rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ rd %ccr, %g0; \
+ .section .pause_3insn_patch,"ax";\
+ .word 88b; \
+ sllx tmp, 7, tmp; \
+ wr tmp, 0, %asr27; \
+ clr tmp; \
+ .previous; \
+ brnz,pt tmp, 88b; \
+ sub tmp, 1, tmp; \
+ set BACKOFF_LIMIT, tmp; \
+ cmp reg, tmp; \
+ bg,pn %xcc, label; \
+ nop; \
+ ba,pt %xcc, label; \
+ sllx reg, 1, reg;
#else
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index cef99fbc0a21..830502fe62b4 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -232,9 +232,10 @@ static inline void __user *arch_compat_alloc_user_space(long len)
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
- if (!(test_thread_flag(TIF_32BIT)))
+ if (test_thread_64bit_stack(usp))
usp += STACK_BIAS;
- else
+
+ if (test_thread_flag(TIF_32BIT))
usp &= 0xffffffffUL;
usp -= len;
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index f74ac9ee33a8..c1e01914fd98 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -106,7 +106,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
/* Free all resources held by a thread. */
#define release_thread(tsk) do { } while(0)
-extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long get_wchan(struct task_struct *);
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index 4e5a483122a0..cce72ce4c334 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -94,6 +94,7 @@ struct thread_struct {
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/fpumacro.h>
/* Return saved PC of a blocked thread. */
struct task_struct;
@@ -143,6 +144,10 @@ do { \
: \
: "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
"i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
+ fprs_write(0); \
+ current_thread_info()->xfsr[0] = 0; \
+ current_thread_info()->fpsaved[0] = 0; \
+ regs->tstate &= ~TSTATE_PEF; \
} while (0)
#define start_thread32(regs, pc, sp) \
@@ -183,20 +188,37 @@ do { \
: \
: "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
"i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
+ fprs_write(0); \
+ current_thread_info()->xfsr[0] = 0; \
+ current_thread_info()->fpsaved[0] = 0; \
+ regs->tstate &= ~TSTATE_PEF; \
} while (0)
/* Free all resources held by a thread. */
#define release_thread(tsk) do { } while (0)
-extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
extern unsigned long get_wchan(struct task_struct *task);
#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
-#define cpu_relax() barrier()
+/* Please see the commentary in asm/backoff.h for a description of
+ * what these instructions are doing and how they have been choosen.
+ * To make a long story short, we are trying to yield the current cpu
+ * strand during busy loops.
+ */
+#define cpu_relax() asm volatile("\n99:\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ "rd %%ccr, %%g0\n\t" \
+ ".section .pause_3insn_patch,\"ax\"\n\t"\
+ ".word 99b\n\t" \
+ "wr %%g0, 128, %%asr27\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".previous" \
+ ::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index c28765110706..67c62578d170 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -63,5 +63,13 @@ extern char *of_console_options;
extern void irq_trans_init(struct device_node *dp);
extern char *build_path_component(struct device_node *dp);
+/* SPARC has local implementations */
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+#define of_address_to_resource of_address_to_resource
+
+void __iomem *of_iomap(struct device_node *node, int index);
+#define of_iomap of_iomap
+
#endif /* __KERNEL__ */
#endif /* _SPARC_PROM_H */
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index 0c6f6b068289..bdfafd7af46f 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -32,6 +32,9 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
#define arch_ptrace_stop(exit_code, info) \
synchronize_user_stack()
+#define current_pt_regs() \
+ ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
+
struct global_reg_snapshot {
unsigned long tstate;
unsigned long tpc;
@@ -42,11 +45,20 @@ struct global_reg_snapshot {
struct thread_info *thread;
unsigned long pad1;
};
-extern struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
-#define force_successful_syscall_return() \
-do { current_thread_info()->syscall_noerror = 1; \
-} while (0)
+struct global_pmu_snapshot {
+ unsigned long pcr[4];
+ unsigned long pic[4];
+};
+
+union global_cpu_snapshot {
+ struct global_reg_snapshot reg;
+ struct global_pmu_snapshot pmu;
+};
+
+extern union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
+
+#define force_successful_syscall_return() set_thread_noerror(1)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
@@ -89,6 +101,9 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
#define arch_ptrace_stop(exit_code, info) \
synchronize_user_stack()
+#define current_pt_regs() \
+ ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1)
+
#define user_mode(regs) (!((regs)->psr & PSR_PS))
#define instruction_pointer(regs) ((regs)->pc)
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index 29862a9e9065..dd3bef4b9896 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -48,6 +48,7 @@ extern void smp_fill_in_sib_core_maps(void);
extern void cpu_play_dead(void);
extern void smp_fetch_global_regs(void);
+extern void smp_fetch_global_pmu(void);
struct seq_file;
void smp_bogo(struct seq_file *);
@@ -65,6 +66,7 @@ extern void __cpu_die(unsigned int cpu);
#define hard_smp_processor_id() 0
#define smp_fill_in_sib_core_maps() do { } while (0)
#define smp_fetch_global_regs() do { } while (0)
+#define smp_fetch_global_pmu() do { } while (0)
#endif /* !(CONFIG_SMP) */
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index 7923c4a2be38..cad36f56fa03 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -23,7 +23,7 @@ do { flush_tlb_pending(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
- : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
+ : : "r" (task_thread_info(next)->current_ds));\
trap_block[current_thread_info()->cpu].thread = \
task_thread_info(next); \
__asm__ __volatile__( \
diff --git a/arch/sparc/include/asm/syscalls.h b/arch/sparc/include/asm/syscalls.h
index 45a43f637a14..bf8972adea17 100644
--- a/arch/sparc/include/asm/syscalls.h
+++ b/arch/sparc/include/asm/syscalls.h
@@ -8,6 +8,4 @@ extern asmlinkage long sparc_do_fork(unsigned long clone_flags,
struct pt_regs *regs,
unsigned long stack_size);
-extern asmlinkage int sparc_execve(struct pt_regs *regs);
-
#endif /* _SPARC64_SYSCALLS_H */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 4e2276631081..269bd92313df 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -14,12 +14,12 @@
#define TI_FLAG_FAULT_CODE_SHIFT 56
#define TI_FLAG_BYTE_WSTATE 1
#define TI_FLAG_WSTATE_SHIFT 48
-#define TI_FLAG_BYTE_CWP 2
-#define TI_FLAG_CWP_SHIFT 40
-#define TI_FLAG_BYTE_CURRENT_DS 3
-#define TI_FLAG_CURRENT_DS_SHIFT 32
-#define TI_FLAG_BYTE_FPDEPTH 4
-#define TI_FLAG_FPDEPTH_SHIFT 24
+#define TI_FLAG_BYTE_NOERROR 2
+#define TI_FLAG_BYTE_NOERROR_SHIFT 40
+#define TI_FLAG_BYTE_FPDEPTH 3
+#define TI_FLAG_FPDEPTH_SHIFT 32
+#define TI_FLAG_BYTE_CWP 4
+#define TI_FLAG_CWP_SHIFT 24
#define TI_FLAG_BYTE_WSAVED 5
#define TI_FLAG_WSAVED_SHIFT 16
@@ -47,7 +47,7 @@ struct thread_info {
struct exec_domain *exec_domain;
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u8 new_child;
- __u8 syscall_noerror;
+ __u8 current_ds;
__u16 cpu;
unsigned long *utraps;
@@ -74,9 +74,9 @@ struct thread_info {
#define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE)
#define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE)
#define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP)
-#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
+#define TI_SYS_NOERROR (TI_FLAGS + TI_FLAG_BYTE_NOERROR)
#define TI_FPSAVED 0x00000010
#define TI_KSP 0x00000018
#define TI_FAULT_ADDR 0x00000020
@@ -84,7 +84,7 @@ struct thread_info {
#define TI_EXEC_DOMAIN 0x00000030
#define TI_PRE_COUNT 0x00000038
#define TI_NEW_CHILD 0x0000003c
-#define TI_SYS_NOERROR 0x0000003d
+#define TI_CURRENT_DS 0x0000003d
#define TI_CPU 0x0000003e
#define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048
@@ -121,7 +121,7 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .flags = ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
+ .current_ds = ASI_P, \
.exec_domain = &default_exec_domain, \
.preempt_count = INIT_PREEMPT_COUNT, \
.restart_block = { \
@@ -153,13 +153,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val))
#define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP])
#define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val))
-#define get_thread_current_ds() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS])
-#define set_thread_current_ds(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val))
+#define get_thread_noerror() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_NOERROR])
+#define set_thread_noerror(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_NOERROR] = (val))
#define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH])
#define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val))
#define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED])
#define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val))
-
#endif /* !(__ASSEMBLY__) */
/*
@@ -259,6 +258,11 @@ static inline bool test_and_clear_restore_sigmask(void)
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
+#define test_thread_64bit_stack(__SP) \
+ ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \
+ false : true)
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
index 48f2807d3265..71b5a67522ab 100644
--- a/arch/sparc/include/asm/ttable.h
+++ b/arch/sparc/include/asm/ttable.h
@@ -372,7 +372,9 @@ etrap_spill_fixup_64bit: \
/* Normal 32bit spill */
#define SPILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + %g0] ASI; \
mov 0x04, %g3; \
stwa %l1, [%sp + %g3] ASI; \
@@ -398,14 +400,16 @@ etrap_spill_fixup_64bit: \
stwa %i6, [%g1 + %g0] ASI; \
stwa %i7, [%g1 + %g3] ASI; \
saved; \
- retry; nop; nop; \
+ retry; \
b,a,pt %xcc, spill_fixup_dax; \
b,a,pt %xcc, spill_fixup_mna; \
b,a,pt %xcc, spill_fixup;
#define SPILL_2_GENERIC_ETRAP \
etrap_user_spill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, etrap_user_spill_64bit; \
+ srl %sp, 0, %sp; \
stwa %l0, [%sp + 0x00] %asi; \
stwa %l1, [%sp + 0x04] %asi; \
stwa %l2, [%sp + 0x08] %asi; \
@@ -427,7 +431,7 @@ etrap_user_spill_32bit: \
ba,pt %xcc, etrap_save; \
wrpr %g1, %cwp; \
nop; nop; nop; nop; \
- nop; nop; nop; nop; \
+ nop; nop; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit; \
ba,a,pt %xcc, etrap_spill_fixup_32bit;
@@ -592,7 +596,9 @@ user_rtt_fill_64bit: \
/* Normal 32bit fill */
#define FILL_2_GENERIC(ASI) \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, (. - (128 + 4)); \
+ srl %sp, 0, %sp; \
lduwa [%sp + %g0] ASI, %l0; \
mov 0x04, %g2; \
mov 0x08, %g3; \
@@ -616,14 +622,16 @@ user_rtt_fill_64bit: \
lduwa [%g1 + %g3] ASI, %i6; \
lduwa [%g1 + %g5] ASI, %i7; \
restored; \
- retry; nop; nop; nop; nop; \
+ retry; nop; nop; \
b,a,pt %xcc, fill_fixup_dax; \
b,a,pt %xcc, fill_fixup_mna; \
b,a,pt %xcc, fill_fixup;
#define FILL_2_GENERIC_RTRAP \
user_rtt_fill_32bit: \
- srl %sp, 0, %sp; \
+ and %sp, 1, %g3; \
+ brnz,pn %g3, user_rtt_fill_64bit; \
+ srl %sp, 0, %sp; \
lduwa [%sp + 0x00] %asi, %l0; \
lduwa [%sp + 0x04] %asi, %l1; \
lduwa [%sp + 0x08] %asi, %l2; \
@@ -643,7 +651,7 @@ user_rtt_fill_32bit: \
ba,pt %xcc, user_rtt_pre_restore; \
restored; \
nop; nop; nop; nop; nop; \
- nop; nop; nop; nop; nop; \
+ nop; nop; nop; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup; \
ba,a,pt %xcc, user_rtt_fill_fixup;
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 73083e1d38d9..e562d3caee57 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -38,14 +38,14 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
+#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
#define get_ds() (KERNEL_DS)
#define segment_eq(a,b) ((a).seg == (b).seg)
#define set_fs(val) \
do { \
- set_thread_current_ds((val).seg); \
+ current_thread_info()->current_ds =(val).seg; \
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
} while(0)
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 0ecea6ed943e..c3e5d8b64171 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -46,6 +46,7 @@
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
+#define __ARCH_WANT_SYS_EXECVE
/*
* "Conditional" syscalls