summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-15 23:55:59 +0400
committerIngo Molnar <mingo@elte.hu>2008-07-15 23:55:59 +0400
commit1a781a777b2f6ac46523fe92396215762ced624d (patch)
tree4f34bb4aade85c0eb364b53d664ec7f6ab959006 /include
parentb9d2252c1e44fa83a4e65fdc9eb93db6297c55af (diff)
parent42a2f217a5e324ed5f2373ab1b7a0a15187c4d6c (diff)
downloadlinux-1a781a777b2f6ac46523fe92396215762ced624d.tar.xz
Merge branch 'generic-ipi' into generic-ipi-for-linus
Conflicts: arch/powerpc/Kconfig arch/s390/kernel/time.c arch/x86/kernel/apic_32.c arch/x86/kernel/cpu/perfctr-watchdog.c arch/x86/kernel/i8259_64.c arch/x86/kernel/ldt.c arch/x86/kernel/nmi_64.c arch/x86/kernel/smpboot.c arch/x86/xen/smp.c include/asm-x86/hw_irq_32.h include/asm-x86/hw_irq_64.h include/asm-x86/mach-default/irq_vectors.h include/asm-x86/mach-voyager/irq_vectors.h include/asm-x86/smp.h kernel/Makefile Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/smp.h5
-rw-r--r--include/asm-arm/smp.h3
-rw-r--r--include/asm-ia64/smp.h8
-rw-r--r--include/asm-m32r/smp.h4
-rw-r--r--include/asm-mips/smp.h13
-rw-r--r--include/asm-parisc/smp.h3
-rw-r--r--include/asm-powerpc/smp.h8
-rw-r--r--include/asm-sh/smp.h14
-rw-r--r--include/asm-sparc/smp.h2
-rw-r--r--include/asm-x86/hw_irq.h1
-rw-r--r--include/asm-x86/irq_vectors.h6
-rw-r--r--include/asm-x86/mach-default/entry_arch.h1
-rw-r--r--include/asm-x86/mach-visws/entry_arch.h22
-rw-r--r--include/asm-x86/mach-voyager/entry_arch.h2
-rw-r--r--include/asm-x86/smp.h21
-rw-r--r--include/asm-x86/xen/events.h1
-rw-r--r--include/linux/smp.h46
17 files changed, 89 insertions, 71 deletions
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 286e1d844f63..544c69af8168 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -47,12 +47,13 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
extern int smp_num_cpus;
#define cpu_possible_map cpu_present_map
-int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
#else /* CONFIG_SMP */
#define hard_smp_processor_id() 0
-#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; })
+#define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; })
#endif /* CONFIG_SMP */
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h
index af99636db400..7fffa2404b8e 100644
--- a/include/asm-arm/smp.h
+++ b/include/asm-arm/smp.h
@@ -101,6 +101,9 @@ extern void platform_cpu_die(unsigned int cpu);
extern int platform_cpu_kill(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
/*
* Local timer interrupt handling function (can be IPI'ed).
*/
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index ec5f355fb7e3..27731e032ee9 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,9 +38,6 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
-extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
- void *info, int wait);
-
#define hard_smp_processor_id() ia64_get_lid()
#ifdef CONFIG_SMP
@@ -124,11 +121,12 @@ extern void __init init_smp_config (void);
extern void smp_do_timer (struct pt_regs *regs);
extern void smp_send_reschedule (int cpu);
-extern void lock_ipi_calllock(void);
-extern void unlock_ipi_calllock(void);
extern void identify_siblings (struct cpuinfo_ia64 *);
extern int is_multithreading_enabled(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
#else /* CONFIG_SMP */
#define cpu_logical_id(i) 0
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index 078e1a51a042..c5dd66916692 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -89,6 +89,9 @@ static __inline__ unsigned int num_booting_cpus(void)
extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
#endif /* not __ASSEMBLY__ */
#define NO_PROC_ID (0xff) /* No processor magic marker */
@@ -104,6 +107,7 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
#define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
#define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
#define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
+#define CALL_FUNC_SINGLE_IPI (M32R_IRQ_IPI6-M32R_IRQ_IPI0)
#define IPI_SHIFT (0)
#define NR_IPIS (8)
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 84fef1aeec0c..0ff5b523ea77 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -35,16 +35,6 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-struct call_data_struct {
- void (*func)(void *);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-};
-
-extern struct call_data_struct *call_data;
-
#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
@@ -67,4 +57,7 @@ static inline void smp_send_reschedule(int cpu)
extern asmlinkage void smp_call_function_interrupt(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
#endif /* __ASM_SMP_H */
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index 306f4950e32e..398cdbaf4e54 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -30,6 +30,9 @@ extern cpumask_t cpu_online_map;
extern void smp_send_reschedule(int cpu);
extern void smp_send_all_nop(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
#endif /* !ASSEMBLY */
/*
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 505f35bacaa9..c663a1fa77c5 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -67,10 +67,7 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
* in /proc/interrupts will be wrong!!! --Troy */
#define PPC_MSG_CALL_FUNCTION 0
#define PPC_MSG_RESCHEDULE 1
-/* This is unused now */
-#if 0
-#define PPC_MSG_MIGRATE_TASK 2
-#endif
+#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
void smp_init_iSeries(void);
@@ -117,6 +114,9 @@ extern void smp_generic_take_timebase(void);
extern struct smp_ops_t *smp_ops;
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h
index 9c8d34b07ebf..593343cd26ee 100644
--- a/include/asm-sh/smp.h
+++ b/include/asm-sh/smp.h
@@ -26,18 +26,10 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-struct smp_fn_call_struct {
- spinlock_t lock;
- atomic_t finished;
- void (*fn)(void *);
- void *data;
-};
-
-extern struct smp_fn_call_struct smp_fn_call;
-
#define SMP_MSG_FUNCTION 0
#define SMP_MSG_RESCHEDULE 1
-#define SMP_MSG_NR 2
+#define SMP_MSG_FUNCTION_SINGLE 2
+#define SMP_MSG_NR 3
void plat_smp_setup(void);
void plat_prepare_cpus(unsigned int max_cpus);
@@ -46,6 +38,8 @@ void plat_start_cpu(unsigned int cpu, unsigned long entry_point);
void plat_send_ipi(unsigned int cpu, unsigned int message);
int plat_register_ipi_handler(unsigned int message,
void (*handler)(void *), void *arg);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi(cpumask_t mask);
#else
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index e6d561599726..b61e74bea06a 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -72,7 +72,7 @@ static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
-static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
+static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
{
xc1((smpfunc_t)func, (unsigned long)info);
return 0;
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h
index 18f067c310f7..77ba51df5668 100644
--- a/include/asm-x86/hw_irq.h
+++ b/include/asm-x86/hw_irq.h
@@ -48,6 +48,7 @@ extern void irq_move_cleanup_interrupt(void);
extern void threshold_interrupt(void);
extern void call_function_interrupt(void);
+extern void call_function_single_interrupt(void);
/* PIC specific functions */
extern void disable_8259A_irq(unsigned int irq);
diff --git a/include/asm-x86/irq_vectors.h b/include/asm-x86/irq_vectors.h
index 0ac864ef3cd4..90b1d1f12f08 100644
--- a/include/asm-x86/irq_vectors.h
+++ b/include/asm-x86/irq_vectors.h
@@ -64,6 +64,7 @@
# define INVALIDATE_TLB_VECTOR 0xfd
# define RESCHEDULE_VECTOR 0xfc
# define CALL_FUNCTION_VECTOR 0xfb
+# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
# define THERMAL_APIC_VECTOR 0xf0
#else
@@ -72,6 +73,7 @@
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
+#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
#define INVALIDATE_TLB_VECTOR_END 0xf7
@@ -143,6 +145,7 @@
#define VIC_RESCHEDULE_CPI 4
#define VIC_ENABLE_IRQ_CPI 5
#define VIC_CALL_FUNCTION_CPI 6
+#define VIC_CALL_FUNCTION_SINGLE_CPI 7
/* Now the QIC CPIs: Since we don't need the two initial levels,
* these are 2 less than the VIC CPIs */
@@ -152,9 +155,10 @@
#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
+#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
#define VIC_START_FAKE_CPI VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI
+#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
/* this is the SYS_INT CPI. */
#define VIC_SYS_INT 8
diff --git a/include/asm-x86/mach-default/entry_arch.h b/include/asm-x86/mach-default/entry_arch.h
index bc861469bdba..9283b60a1dd2 100644
--- a/include/asm-x86/mach-default/entry_arch.h
+++ b/include/asm-x86/mach-default/entry_arch.h
@@ -13,6 +13,7 @@
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
#endif
/*
diff --git a/include/asm-x86/mach-visws/entry_arch.h b/include/asm-x86/mach-visws/entry_arch.h
index b183fa6d83d9..86be554342d4 100644
--- a/include/asm-x86/mach-visws/entry_arch.h
+++ b/include/asm-x86/mach-visws/entry_arch.h
@@ -1,23 +1,5 @@
/*
- * The following vectors are part of the Linux architecture, there
- * is no hardware IRQ pin equivalent for them, they are triggered
- * through the ICC by us (IPIs)
+ * VISWS uses the standard Linux entry points:
*/
-#ifdef CONFIG_X86_SMP
-BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
-BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
-BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
-#endif
-/*
- * every pentium local APIC has two 'local interrupts', with a
- * soft-definable vector attached to both interrupts, one of
- * which is a timer interrupt, the other one is error counter
- * overflow. Linux uses the local APIC timer interrupt to get
- * a much simpler SMP time architecture:
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
-BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
-BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-#endif
+#include "../mach-default/entry_arch.h"
diff --git a/include/asm-x86/mach-voyager/entry_arch.h b/include/asm-x86/mach-voyager/entry_arch.h
index 4a1e1e8c10b6..ae52624b5937 100644
--- a/include/asm-x86/mach-voyager/entry_arch.h
+++ b/include/asm-x86/mach-voyager/entry_arch.h
@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
-
+BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 2e221f1ce0b2..c2784b3e0b77 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -50,9 +50,9 @@ struct smp_ops {
void (*smp_send_stop)(void);
void (*smp_send_reschedule)(int cpu);
- int (*smp_call_function_mask)(cpumask_t mask,
- void (*func)(void *info), void *info,
- int wait);
+
+ void (*send_call_func_ipi)(cpumask_t mask);
+ void (*send_call_func_single_ipi)(int cpu);
};
/* Globals due to paravirt */
@@ -94,17 +94,22 @@ static inline void smp_send_reschedule(int cpu)
smp_ops.smp_send_reschedule(cpu);
}
-static inline int smp_call_function_mask(cpumask_t mask,
- void (*func) (void *info), void *info,
- int wait)
+static inline void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_ops.send_call_func_single_ipi(cpu);
+}
+
+static inline void arch_send_call_function_ipi(cpumask_t mask)
{
- return smp_ops.smp_call_function_mask(mask, func, info, wait);
+ smp_ops.send_call_func_ipi(mask);
}
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus);
int native_cpu_up(unsigned int cpunum);
+void native_send_call_func_ipi(cpumask_t mask);
+void native_send_call_func_single_ipi(int cpu);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
@@ -197,7 +202,5 @@ static inline int hard_smp_processor_id(void)
extern void cpu_uninit(void);
#endif
-extern void lock_ipi_call_lock(void);
-extern void unlock_ipi_call_lock(void);
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
index 596312a7bfc9..f8d57ea1f05f 100644
--- a/include/asm-x86/xen/events.h
+++ b/include/asm-x86/xen/events.h
@@ -4,6 +4,7 @@
enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR,
+ XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_NR_IPIS,
};
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 55232ccf9cfd..48262f86c969 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,9 +7,18 @@
*/
#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
extern void cpu_idle(void);
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ unsigned int flags;
+};
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
@@ -52,15 +61,34 @@ extern void smp_cpus_done(unsigned int max_cpus);
/*
* Call a function on all other processors
*/
-int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
-
+int smp_call_function(void(*func)(void *info), void *info, int wait);
+int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
- int retry, int wait);
+ int wait);
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void init_call_single_data(void);
+void ipi_call_lock(void);
+void ipi_call_unlock(void);
+void ipi_call_lock_irq(void);
+void ipi_call_unlock_irq(void);
+#else
+static inline void init_call_single_data(void)
+{
+}
+#endif
/*
* Call a function on all processors
*/
-int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
+int on_each_cpu(void (*func) (void *info), void *info, int wait);
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001
@@ -90,9 +118,9 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
{
return 0;
}
-#define smp_call_function(func, info, retry, wait) \
+#define smp_call_function(func, info, wait) \
(up_smp_call_function(func, info))
-#define on_each_cpu(func,info,retry,wait) \
+#define on_each_cpu(func,info,wait) \
({ \
local_irq_disable(); \
func(info); \
@@ -102,7 +130,7 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
-#define smp_call_function_single(cpuid, func, info, retry, wait) \
+#define smp_call_function_single(cpuid, func, info, wait) \
({ \
WARN_ON(cpuid != 0); \
local_irq_disable(); \
@@ -112,7 +140,9 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
-
+static inline void init_call_single_data(void)
+{
+}
#endif /* !SMP */
/*