From 3e6cb2d38a9c9758170813497a860c64543643d5 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 21 Feb 2006 18:32:14 +0000 Subject: [MIPS] Use "=R" constraint to avoid compiler errors in cmpxchg(). Signed-off-by: Ralf Baechle --- include/asm-mips/system.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index e8e5d4143377..ddae9bae31af 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -322,7 +322,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, #endif "2: \n" " .set pop \n" - : "=&r" (retval), "=m" (*m) + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else if (cpu_has_llsc) { @@ -342,7 +342,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, #endif "2: \n" " .set pop \n" - : "=&r" (retval), "=m" (*m) + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else { @@ -379,7 +379,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, #endif "2: \n" " .set pop \n" - : "=&r" (retval), "=m" (*m) + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else if (cpu_has_llsc) { @@ -397,7 +397,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, #endif "2: \n" " .set pop \n" - : "=&r" (retval), "=m" (*m) + : "=&r" (retval), "=R" (*m) : "R" (*m), "Jr" (old), "Jr" (new) : "memory"); } else { -- cgit v1.2.3 From 9b6695a8adfe0916e81ddd810a5b9db3eb8b0e46 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 23 Feb 2006 12:23:27 +0000 Subject: [MIPS] SMP: Fix initialization order bug. A recent change requires cpu_possible_map to be initialized before smp_sched_init() but most MIPS platforms were initializing their processors in the prom_prepare_cpus callback of smp_prepare_cpus. The simple fix of calling prom_prepare_cpus from one of the earlier SMP initialization hooks doesn't work well either since IPIs may require init_IRQ() to have completed, so bit the bullet and split prom_prepare_cpus into two initialization functions, plat_smp_setup which is called early from setup_arch and plat_prepare_cpus called where prom_prepare_cpus used to be called. Signed-off-by: Ralf Baechle --- arch/mips/kernel/setup.c | 3 +++ arch/mips/kernel/smp.c | 2 +- arch/mips/kernel/smp_mt.c | 13 +++++++------ arch/mips/pmc-sierra/yosemite/smp.c | 24 ++++++------------------ arch/mips/sgi-ip27/ip27-smp.c | 7 ++++++- arch/mips/sibyte/cfe/smp.c | 10 +++++++--- include/asm-mips/smp.h | 11 +++++++++-- 7 files changed, 39 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index d86affa21278..d9293c558e41 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -540,6 +540,9 @@ void __init setup_arch(char **cmdline_p) sparse_init(); paging_init(); resource_init(); +#ifdef CONFIG_SMP + plat_smp_setup(); +#endif } int __init fpu_disable(char *s) diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 5e189862e523..06ed90752424 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -236,7 +236,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) init_new_context(current, &init_mm); current_thread_info()->cpu = 0; smp_tune_scheduling(); - prom_prepare_cpus(max_cpus); + plat_prepare_cpus(max_cpus); } /* preload SMP state for boot cpu */ diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c index c930364830d0..993b8bf56aaf 100644 --- a/arch/mips/kernel/smp_mt.c +++ b/arch/mips/kernel/smp_mt.c @@ -143,7 +143,7 @@ static struct irqaction irq_call = { * Make sure all CPU's are in a sensible state before we boot any of the * secondarys */ -void prom_prepare_cpus(unsigned int max_cpus) +void plat_smp_setup(void) { unsigned long val; int i, num; @@ -179,11 +179,9 @@ void prom_prepare_cpus(unsigned int max_cpus) write_vpe_c0_vpeconf0(tmp); /* Record this as available CPU */ - if (i < max_cpus) { - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = ++num; - __cpu_logical_map[num] = i; - } + cpu_set(i, phys_cpu_present_map); + __cpu_number_map[i] = ++num; + __cpu_logical_map[num] = i; } /* disable multi-threading with TC's */ @@ -241,7 +239,10 @@ void prom_prepare_cpus(unsigned int max_cpus) set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); } +} +void __init plat_prepare_cpus(unsigned int max_cpus) +{ cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ; cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ; diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index 7f8fda962190..c197311e15d3 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c @@ -50,37 +50,25 @@ void __init prom_grab_secondary(void) * We don't want to start the secondary CPU yet nor do we have a nice probing * feature in PMON so we just assume presence of the secondary core. */ -static char maxcpus_string[] __initdata = - KERN_WARNING "max_cpus set to 0; using 1 instead\n"; - -void __init prom_prepare_cpus(unsigned int max_cpus) +void __init plat_smp_setup(void) { - int enabled = 0, i; - - if (max_cpus == 0) { - printk(maxcpus_string); - max_cpus = 1; - } + int i; cpus_clear(phys_cpu_present_map); for (i = 0; i < 2; i++) { - if (i == max_cpus) - break; - - /* - * The boot CPU - */ cpu_set(i, phys_cpu_present_map); __cpu_number_map[i] = i; __cpu_logical_map[i] = i; - enabled++; } +} +void __init plat_prepare_cpus(unsigned int max_cpus) +{ /* * Be paranoid. Enable the IPI only if we're really about to go SMP. */ - if (enabled > 1) + if (cpus_weight(cpu_possible_map)) set_c0_status(STATUSF_IP5); } diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index dbef3f6b5650..09fa7f5216f0 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -140,7 +140,7 @@ static __init void intr_clear_all(nasid_t nasid) REMOTE_HUB_CLR_INTR(nasid, i); } -void __init prom_prepare_cpus(unsigned int max_cpus) +void __init plat_smp_setup(void) { cnodeid_t cnode; @@ -161,6 +161,11 @@ void __init prom_prepare_cpus(unsigned int max_cpus) alloc_cpupda(0, 0); } +void __init plat_prepare_cpus(unsigned int max_cpus) +{ + /* We already did everything necessary earlier */ +} + /* * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we * set sp to the kernel stack of the newly created idle process, gp to the proc diff --git a/arch/mips/sibyte/cfe/smp.c b/arch/mips/sibyte/cfe/smp.c index 4477af3d8074..eab20e2db323 100644 --- a/arch/mips/sibyte/cfe/smp.c +++ b/arch/mips/sibyte/cfe/smp.c @@ -31,7 +31,7 @@ * * Common setup before any secondaries are started */ -void __init prom_prepare_cpus(unsigned int max_cpus) +void __init plat_smp_setup(void) { int i, num; @@ -40,14 +40,18 @@ void __init prom_prepare_cpus(unsigned int max_cpus) __cpu_number_map[0] = 0; __cpu_logical_map[0] = 0; - for (i=1, num=0; i Date: Thu, 23 Feb 2006 14:10:53 +0000 Subject: [MIPS] Fix atomic*_sub_if_positive return value. Reported and initial fix by Thomas Koeller , rewritten by me. Signed-off-by: Ralf Baechle --- include/asm-mips/atomic.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include') diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 654b97d3e13a..2c8b853376c9 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -250,7 +250,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " subu %0, %1, %3 \n" " bltz %0, 1f \n" " sc %0, %2 \n" + " .set noreorder \n" " beqzl %0, 1b \n" + " subu %0, %1, %3 \n" + " .set reorder \n" " sync \n" "1: \n" " .set mips0 \n" @@ -266,7 +269,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " subu %0, %1, %3 \n" " bltz %0, 1f \n" " sc %0, %2 \n" + " .set noreorder \n" " beqz %0, 1b \n" + " subu %0, %1, %3 \n" + " .set reorder \n" " sync \n" "1: \n" " .set mips0 \n" @@ -598,7 +604,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" " scd %0, %2 \n" + " .set noreorder \n" " beqzl %0, 1b \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" " sync \n" "1: \n" " .set mips0 \n" @@ -614,7 +623,10 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" " scd %0, %2 \n" + " .set noreorder \n" " beqz %0, 1b \n" + " dsubu %0, %1, %3 \n" + " .set reorder \n" " sync \n" "1: \n" " .set mips0 \n" -- cgit v1.2.3