diff options
Diffstat (limited to 'arch/mips')
37 files changed, 198 insertions, 137 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d61297252006..4ab9a794bbcd 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -118,6 +118,7 @@ config ATH25 config ATH79 bool "Atheros AR71XX/AR724X/AR913X based boards" + select ARCH_HAS_RESET_CONTROLLER select ARCH_REQUIRE_GPIOLIB select BOOT_RAW select CEVT_R4K @@ -151,7 +152,6 @@ config BMIPS_GENERIC select BCM7120_L2_IRQ select BRCMSTB_L2_IRQ select IRQ_MIPS_CPU - select RAW_IRQ_ACCESSORS select DMA_NONCOHERENT select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 6e46abe0dac6..bd34f4093cd9 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -35,6 +35,7 @@ #include <linux/init.h> #include <linux/io.h> +#include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/slab.h> @@ -389,12 +390,11 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate, return div1; } -static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk, - int scale, int maxdiv) +static int alchemy_clk_fgcs_detr(struct clk_hw *hw, + struct clk_rate_request *req, + int scale, int maxdiv) { - struct clk *pc, *bpc, *free; + struct clk_hw *pc, *bpc, *free; long tdv, tpr, pr, nr, br, bpr, diff, lastdiff; int j; @@ -408,7 +408,7 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, * the one that gets closest to but not over the requested rate. */ for (j = 0; j < 7; j++) { - pc = clk_get_parent_by_index(hw->clk, j); + pc = clk_hw_get_parent_by_index(hw, j); if (!pc) break; @@ -416,20 +416,20 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, * XXX: we would actually want clk_has_active_children() * but this is a good-enough approximation for now. */ - if (!__clk_is_prepared(pc)) { + if (!clk_hw_is_prepared(pc)) { if (!free) free = pc; } - pr = clk_get_rate(pc); - if (pr < rate) + pr = clk_hw_get_rate(pc); + if (pr < req->rate) continue; /* what can hardware actually provide */ - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL); nr = pr / tdv; - diff = rate - nr; - if (nr > rate) + diff = req->rate - nr; + if (nr > req->rate) continue; if (diff < lastdiff) { @@ -448,15 +448,16 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, */ if (lastdiff && free) { for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) { - tpr = rate * j; + tpr = req->rate * j; if (tpr < 0) break; - pr = clk_round_rate(free, tpr); + pr = clk_hw_round_rate(free, tpr); - tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL); + tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, + NULL); nr = pr / tdv; - diff = rate - nr; - if (nr > rate) + diff = req->rate - nr; + if (nr > req->rate) continue; if (diff < lastdiff) { lastdiff = diff; @@ -469,9 +470,14 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, } } - *best_parent_rate = bpr; - *best_parent_clk = __clk_get_hw(bpc); - return br; + if (br < 0) + return br; + + req->best_parent_rate = bpr; + req->best_parent_hw = bpc; + req->rate = br; + + return 0; } static int alchemy_clk_fgv1_en(struct clk_hw *hw) @@ -562,14 +568,10 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, return parent_rate / v; } -static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_fgv1_detr(struct clk_hw *hw, + struct clk_rate_request *req) { - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, 2, 512); + return alchemy_clk_fgcs_detr(hw, req, 2, 512); } /* Au1000, Au1100, Au15x0, Au12x0 */ @@ -696,11 +698,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, return t; } -static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_fgv2_detr(struct clk_hw *hw, + struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale, maxdiv; @@ -713,8 +712,7 @@ static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, maxdiv = 512; } - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, scale, maxdiv); + return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv); } /* Au1300 larger input mux, no separate disable bit, flexible divider */ @@ -917,17 +915,13 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, return 0; } -static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_clk) +static int alchemy_clk_csrc_detr(struct clk_hw *hw, + struct clk_rate_request *req) { struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ - return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, - best_parent_clk, scale, 4); + return alchemy_clk_fgcs_detr(hw, req, scale, 4); } static struct clk_ops alchemy_clkops_csrc = { diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 01a644f174dd..1ba21204ebe0 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -190,6 +190,7 @@ int get_c0_perfcount_int(void) { return ATH79_MISC_IRQ(5); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi index 4759cff814d1..fb7734eadbf0 100644 --- a/arch/mips/boot/dts/qca/ar9132.dtsi +++ b/arch/mips/boot/dts/qca/ar9132.dtsi @@ -115,6 +115,14 @@ interrupt-controller; #interrupt-cells = <1>; }; + + rst: reset-controller@1806001c { + compatible = "qca,ar9132-reset", + "qca,ar7100-reset"; + reg = <0x1806001c 0x4>; + + #reset-cells = <1>; + }; }; spi@1f000000 { diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 56f5d080ef9d..b7fa9ae28c36 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h deleted file mode 100644 index 11d3b572b1b3..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H - -#include <asm/bmips.h> - -#define plat_post_dma_flush bmips_post_dma_flush - -#include <asm/mach-generic/dma-coherence.h> - -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9d8106758142..ae8569475264 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ +#ifdef CONFIG_64BIT +#define LL_INSN "lld" +#define SC_INSN "scd" +#else /* CONFIG_32BIT */ +#define LL_INSN "ll" +#define SC_INSN "sc" +#endif + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + " .set push\n" + " .set noreorder\n" + "1: " LL_INSN " %[tmp], %[buddy]\n" + " bnez %[tmp], 2f\n" + " or %[tmp], %[tmp], %[global]\n" + " " SC_INSN " %[tmp], %[buddy]\n" + " beqz %[tmp], 1b\n" + " nop\n" + "2:\n" + " .set pop" + : [buddy] "+m" (buddy->pte), + [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ if (pte_none(*buddy)) pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ } #endif } diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 16f1ea9ab191..03722d4326a1 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif -extern asmlinkage void smp_call_function_interrupt(void); - static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 28d6d9364bd1..a71da576883c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -152,6 +152,31 @@ .set noreorder bltz k0, 8f move k1, sp +#ifdef CONFIG_EVA + /* + * Flush interAptiv's Return Prediction Stack (RPS) by writing + * EntryHi. Toggling Config7.RPS is slower and less portable. + * + * The RPS isn't automatically flushed when exceptions are + * taken, which can result in kernel mode speculative accesses + * to user addresses if the RPS mispredicts. That's harmless + * when user and kernel share the same address space, but with + * EVA the same user segments may be unmapped to kernel mode, + * even containing sensitive MMIO regions or invalid memory. + * + * This can happen when the kernel sets the return address to + * ret_from_* and jr's to the exception handler, which looks + * more like a tail call than a function call. If nested calls + * don't evict the last user address in the RPS, it will + * mispredict the return and fetch from a user controlled + * address into the icache. + * + * More recent EVA-capable cores with MAAR to restrict + * speculative accesses aren't affected. + */ + MFC0 k0, CP0_ENTRYHI + MTC0 k0, CP0_ENTRYHI +#endif .set reorder /* Called from user mode, new stack. */ get_saved_sp diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 7163cd7fdd69..9733cd0266e4 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -83,45 +83,43 @@ do { if (cpu_has_rw_llb) { \ } \ } while (0) +/* + * For newly created kernel threads switch_to() will return to + * ret_from_kernel_thread, newly created user threads to ret_from_fork. + * That is, everything following resume() will be skipped for new threads. + * So everything that matters to new threads should be placed before resume(). + */ #define switch_to(prev, next, last) \ do { \ - u32 __c0_stat; \ s32 __fpsave = FP_SAVE_NONE; \ __mips_mt_fpaff_switch_to(prev); \ - if (cpu_has_dsp) \ + if (cpu_has_dsp) { \ __save_dsp(prev); \ - if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \ - if (cop2_lazy_restore) \ - KSTK_STATUS(prev) &= ~ST0_CU2; \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(prev); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ + __restore_dsp(next); \ + } \ + if (cop2_present) { \ + set_c0_status(ST0_CU2); \ + if ((KSTK_STATUS(prev) & ST0_CU2)) { \ + if (cop2_lazy_restore) \ + KSTK_STATUS(prev) &= ~ST0_CU2; \ + cop2_save(prev); \ + } \ + if (KSTK_STATUS(next) & ST0_CU2 && \ + !cop2_lazy_restore) { \ + cop2_restore(next); \ + } \ + clear_c0_status(ST0_CU2); \ } \ __clear_software_ll_bit(); \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \ __fpsave = FP_SAVE_SCALAR; \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ - (last) = resume(prev, next, task_thread_info(next), __fpsave); \ -} while (0) - -#define finish_arch_switch(prev) \ -do { \ - u32 __c0_stat; \ - if (cop2_present && !cop2_lazy_restore && \ - (KSTK_STATUS(current) & ST0_CU2)) { \ - __c0_stat = read_c0_status(); \ - write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(current); \ - write_c0_status(__c0_stat & ~ST0_CU2); \ - } \ - if (cpu_has_dsp) \ - __restore_dsp(current); \ if (cpu_has_userlocal) \ - write_c0_userlocal(current_thread_info()->tp_value); \ + write_c0_userlocal(task_thread_info(next)->tp_value); \ __restore_watch(); \ disable_msa(); \ + (last) = resume(prev, next, task_thread_info(next), __fpsave); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index af42e7003f12..baa7b6fc0a60 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set noat SAVE_ALL FEXPORT(handle_\exception\ext) - __BUILD_clear_\clear + __build_clear_\clear .set at __BUILD_\verbose \exception move a0, sp diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 3e4491aa6d6b..789d7bf4fef3 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { unsigned int real_len; - cpumask_t mask; + cpumask_t allowed, mask; int retval; struct task_struct *p; @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); + cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: read_unlock(&tasklist_lock); diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index b130033838ba..5fcec3032f38 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -38,7 +38,7 @@ char *mips_get_machine_name(void) return mips_machine_name; } -#ifdef CONFIG_OF +#ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { return add_memory_region(base, size, BOOT_MEM_RAM); diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 74bab9ddd0e1..c6bbf2165051 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -24,7 +24,7 @@ LEAF(relocate_new_kernel) process_entry: PTR_L s2, (s0) - PTR_ADD s0, s0, SZREG + PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not @@ -61,9 +61,9 @@ copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) - PTR_ADD s4, s4, SZREG - PTR_ADD s2, s2, SZREG - LONG_SUB s6, s6, 1 + PTR_ADDIU s4, s4, SZREG + PTR_ADDIU s2, s2, SZREG + LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index ad4d44635c76..a6f6b762c47a 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -80,7 +80,7 @@ syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - daddiu a1, v0, __NR_64_Linux + move a1, v0 jal syscall_trace_enter bltz v0, 2f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 446cc654da56..4b2010654c46 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -72,7 +72,7 @@ n32_syscall_trace_entry: SAVE_STATIC move s0, t2 move a0, sp - daddiu a1, v0, __NR_N32_Linux + move a1, v0 jal syscall_trace_enter bltz v0, 2f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 19a7705f2a01..5d7f2634996f 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 336708ae5c5b..78cf8c2f1de0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) if (action == 0) scheduler_ipi(); else - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d0744cc77ea7..a31896c33716 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -192,16 +192,6 @@ asmlinkage void start_secondary(void) cpu_startup_entry(CPUHP_ONLINE); } -/* - * Call into both interrupt handlers, as we share the IPI for them - */ -void __irq_entry smp_call_function_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_interrupt(); - irq_exit(); -} - static void stop_this_cpu(void *dummy) { /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e207a43b5f8f..8ea28e6ab37d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task, void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; + mm_segment_t old_fs = get_fs(); if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; @@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp) prepare_frametrace(®s); } } + /* + * show_stack() deals exclusively with kernel mode, so be sure to access + * the stack in the kernel (not user) address space. + */ + set_fs(KERNEL_DS); show_stacktrace(task, ®s); + set_fs(old_fs); } static void show_code(unsigned int __user *pc) @@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; + mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } + if (!user_mode(regs)) + set_fs(KERNEL_DS); + show_code((unsigned int __user *) regs->cp0_epc); + set_fs(old_fs); + /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index af84bef0c90d..eb3efd137fd1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -438,7 +438,7 @@ do { \ : "memory"); \ } while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 6ab10573490d..2c218c3bbca5 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -466,6 +466,7 @@ int get_c0_perfcount_int(void) { return ltq_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index 509877c6e9d9..1a4738a8f2d3 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 77d96db8253c..aab218c36e0d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -160,18 +160,18 @@ static inline void setup_protection_map(void) protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); } else { diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 36c0f26fac6b..852a41c6da45 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -133,7 +133,8 @@ good_area: #endif goto bad_area; } - if (!(vma->vm_flags & VM_READ)) { + if (!(vma->vm_flags & VM_READ) && + exception_epc(regs) != address) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index d1392f8f5811..fa8f591f3713 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 5625b190edc0..b7bf721eabf5 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -154,6 +154,7 @@ int get_c0_perfcount_int(void) return mips_cpu_perf_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { @@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void) static void __init init_rtc(void) { - /* stop the clock whilst setting it up */ - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); + unsigned char freq, ctrl; - /* 32KHz time base */ - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); + /* Set 32KHz time base if not already set */ + freq = CMOS_READ(RTC_FREQ_SELECT); + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); - /* start the clock */ - CMOS_WRITE(RTC_24H, RTC_CONTROL); + /* Ensure SET bit is clear so RTC can run */ + ctrl = CMOS_READ(RTC_CONTROL); + if (ctrl & RTC_SET) + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); } void __init plat_time_init(void) diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index e1d69895fb1d..a120b7a5a8fe 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -77,6 +77,7 @@ int get_c0_perfcount_int(void) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index dc3e327fbbac..f5fff228b347 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) { clear_c0_eimr(irq); ack_c0_eirr(irq); - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); set_c0_eimr(irq); } diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 42181c7105df..f8d3e081b2eb 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index b8a0bf5766f2..c6996cf67a5c 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c @@ -311,12 +311,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) void pcibios_fixup_bus(struct pci_bus *bus) { - struct pci_dev *dev = bus->self; - - if (pci_has_flag(PCI_PROBE_ONLY) && dev && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); - } } EXPORT_SYMBOL(PCIBIOS_MIN_IO); diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 7c73fcb92a10..8a377346f0ca 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -26,6 +26,7 @@ int get_c0_perfcount_int(void) { return gic_get_c0_perfcount_int(); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); int get_c0_fdc_int(void) { diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index 10170580a2de..ffa0f7101a97 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 53707aacc0f8..8c624a8b9ea2 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -89,6 +89,7 @@ int get_c0_perfcount_int(void) { return rt_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 3fbaef97a1b8..16ec4e12daa3 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else #endif { diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index af7d44edd9a8..4c71aea25663 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -29,8 +29,6 @@ #include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_int.h> -extern void smp_call_function_interrupt(void); - /* * These are routines for dealing with the bcm1480 smp capabilities * independent of board/firmware @@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c0c4b3f88a08..1cf66f5ff23d 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } |