diff options
Diffstat (limited to 'arch/mips/include/asm')
33 files changed, 1499 insertions, 590 deletions
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h index 859cf7048347..81fae23ce7cd 100644 --- a/arch/mips/include/asm/asm.h +++ b/arch/mips/include/asm/asm.h @@ -55,6 +55,7 @@ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, 0, ra; \ + .cfi_startproc; \ .insn /* @@ -66,12 +67,14 @@ symbol: .frame sp, 0, ra; \ .type symbol, @function; \ .ent symbol, 0; \ symbol: .frame sp, framesize, rpc; \ + .cfi_startproc; \ .insn /* * END - mark end of function */ #define END(function) \ + .cfi_endproc; \ .end function; \ .size function, .-function diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h index a92aee7b977a..b3e2975f83d3 100644 --- a/arch/mips/include/asm/bmips.h +++ b/arch/mips/include/asm/bmips.h @@ -48,8 +48,8 @@ #include <asm/r4kcache.h> #include <asm/smp-ops.h> -extern struct plat_smp_ops bmips43xx_smp_ops; -extern struct plat_smp_ops bmips5000_smp_ops; +extern const struct plat_smp_ops bmips43xx_smp_ops; +extern const struct plat_smp_ops bmips5000_smp_ops; static inline int register_bmips_smp_ops(void) { diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index cd6efb07c980..a41059d47d31 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -15,6 +15,8 @@ #include <linux/cache.h> #include <linux/types.h> +#include <asm/mipsregs.h> + /* * Descriptor for a cache */ @@ -77,17 +79,10 @@ struct cpuinfo_mips { struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ int package;/* physical package number */ - int core; /* physical core number */ + unsigned int globalnumber; #ifdef CONFIG_64BIT int vmbits; /* Virtual memory size in bits */ #endif -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) - /* - * There is not necessarily a 1:1 mapping of VPE num to CPU number - * in particular on multi-core systems. - */ - int vpe_id; /* Virtual Processor number */ -#endif void *data; /* Additional data */ unsigned int watch_reg_count; /* Number that exist */ unsigned int watch_reg_use_cnt; /* Usable by ptrace */ @@ -144,11 +139,52 @@ struct proc_cpuinfo_notifier_args { unsigned long n; }; -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) -# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) -#else -# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; }) -#endif +static inline unsigned int cpu_cluster(struct cpuinfo_mips *cpuinfo) +{ + /* Optimisation for systems where multiple clusters aren't used */ + if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) + return 0; + + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CLUSTER) >> + MIPS_GLOBALNUMBER_CLUSTER_SHF; +} + +static inline unsigned int cpu_core(struct cpuinfo_mips *cpuinfo) +{ + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_CORE) >> + MIPS_GLOBALNUMBER_CORE_SHF; +} + +static inline unsigned int cpu_vpe_id(struct cpuinfo_mips *cpuinfo) +{ + /* Optimisation for systems where VP(E)s aren't used */ + if (!IS_ENABLED(CONFIG_MIPS_MT_SMP) && !IS_ENABLED(CONFIG_CPU_MIPSR6)) + return 0; + + return (cpuinfo->globalnumber & MIPS_GLOBALNUMBER_VP) >> + MIPS_GLOBALNUMBER_VP_SHF; +} + +extern void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster); +extern void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core); +extern void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe); + +static inline bool cpus_are_siblings(int cpua, int cpub) +{ + struct cpuinfo_mips *infoa = &cpu_data[cpua]; + struct cpuinfo_mips *infob = &cpu_data[cpub]; + unsigned int gnuma, gnumb; + + if (infoa->package != infob->package) + return false; + + gnuma = infoa->globalnumber & ~MIPS_GLOBALNUMBER_VP; + gnumb = infob->globalnumber & ~MIPS_GLOBALNUMBER_VP; + if (gnuma != gnumb) + return false; + + return true; +} static inline unsigned long cpu_asid_inc(void) { diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index 175fe565f4e1..a45af3de075d 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -151,11 +151,6 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_R5500: #endif -#ifdef CONFIG_SYS_HAS_CPU_R6000 - case CPU_R6000: - case CPU_R6000A: -#endif - #ifdef CONFIG_SYS_HAS_CPU_NEVADA case CPU_NEVADA: #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index d0c152b989f8..ece9b84f3bcb 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -286,11 +286,6 @@ enum cpu_type_enum { CPU_R3081, CPU_R3081E, /* - * R6000 class processors - */ - CPU_R6000, CPU_R6000A, - - /* * R4000 class processors */ CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310, diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h index d75aed36480a..021d09ae5670 100644 --- a/arch/mips/include/asm/floppy.h +++ b/arch/mips/include/asm/floppy.h @@ -10,11 +10,11 @@ #ifndef _ASM_FLOPPY_H #define _ASM_FLOPPY_H -#include <linux/dma-mapping.h> +#include <asm/io.h> static inline void fd_cacheflush(char * addr, long size) { - dma_cache_sync(NULL, addr, size, DMA_BIDIRECTIONAL); + dma_cache_wback_inv((unsigned long)addr, size); } #define MAX_BUFFER_SECTORS 24 diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index c05369e0b8d6..b36097d3cbf4 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -36,6 +36,7 @@ struct mips_fpu_emulator_stats { unsigned long emulated; unsigned long loads; unsigned long stores; + unsigned long branches; unsigned long cp1ops; unsigned long cp1xops; unsigned long errors; @@ -45,6 +46,121 @@ struct mips_fpu_emulator_stats { unsigned long ieee754_zerodiv; unsigned long ieee754_invalidop; unsigned long ds_emul; + + unsigned long abs_s; + unsigned long abs_d; + unsigned long add_s; + unsigned long add_d; + unsigned long bc1eqz; + unsigned long bc1nez; + unsigned long ceil_w_s; + unsigned long ceil_w_d; + unsigned long ceil_l_s; + unsigned long ceil_l_d; + unsigned long class_s; + unsigned long class_d; + unsigned long cmp_af_s; + unsigned long cmp_af_d; + unsigned long cmp_eq_s; + unsigned long cmp_eq_d; + unsigned long cmp_le_s; + unsigned long cmp_le_d; + unsigned long cmp_lt_s; + unsigned long cmp_lt_d; + unsigned long cmp_ne_s; + unsigned long cmp_ne_d; + unsigned long cmp_or_s; + unsigned long cmp_or_d; + unsigned long cmp_ueq_s; + unsigned long cmp_ueq_d; + unsigned long cmp_ule_s; + unsigned long cmp_ule_d; + unsigned long cmp_ult_s; + unsigned long cmp_ult_d; + unsigned long cmp_un_s; + unsigned long cmp_un_d; + unsigned long cmp_une_s; + unsigned long cmp_une_d; + unsigned long cmp_saf_s; + unsigned long cmp_saf_d; + unsigned long cmp_seq_s; + unsigned long cmp_seq_d; + unsigned long cmp_sle_s; + unsigned long cmp_sle_d; + unsigned long cmp_slt_s; + unsigned long cmp_slt_d; + unsigned long cmp_sne_s; + unsigned long cmp_sne_d; + unsigned long cmp_sor_s; + unsigned long cmp_sor_d; + unsigned long cmp_sueq_s; + unsigned long cmp_sueq_d; + unsigned long cmp_sule_s; + unsigned long cmp_sule_d; + unsigned long cmp_sult_s; + unsigned long cmp_sult_d; + unsigned long cmp_sun_s; + unsigned long cmp_sun_d; + unsigned long cmp_sune_s; + unsigned long cmp_sune_d; + unsigned long cvt_d_l; + unsigned long cvt_d_s; + unsigned long cvt_d_w; + unsigned long cvt_l_s; + unsigned long cvt_l_d; + unsigned long cvt_s_d; + unsigned long cvt_s_l; + unsigned long cvt_s_w; + unsigned long cvt_w_s; + unsigned long cvt_w_d; + unsigned long div_s; + unsigned long div_d; + unsigned long floor_w_s; + unsigned long floor_w_d; + unsigned long floor_l_s; + unsigned long floor_l_d; + unsigned long maddf_s; + unsigned long maddf_d; + unsigned long max_s; + unsigned long max_d; + unsigned long maxa_s; + unsigned long maxa_d; + unsigned long min_s; + unsigned long min_d; + unsigned long mina_s; + unsigned long mina_d; + unsigned long mov_s; + unsigned long mov_d; + unsigned long msubf_s; + unsigned long msubf_d; + unsigned long mul_s; + unsigned long mul_d; + unsigned long neg_s; + unsigned long neg_d; + unsigned long recip_s; + unsigned long recip_d; + unsigned long rint_s; + unsigned long rint_d; + unsigned long round_w_s; + unsigned long round_w_d; + unsigned long round_l_s; + unsigned long round_l_d; + unsigned long rsqrt_s; + unsigned long rsqrt_d; + unsigned long sel_s; + unsigned long sel_d; + unsigned long seleqz_s; + unsigned long seleqz_d; + unsigned long selnez_s; + unsigned long selnez_d; + unsigned long sqrt_s; + unsigned long sqrt_d; + unsigned long sub_s; + unsigned long sub_d; + unsigned long trunc_w_s; + unsigned long trunc_w_d; + unsigned long trunc_l_s; + unsigned long trunc_l_d; }; DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); @@ -62,7 +178,7 @@ do { \ extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, - void *__user *fault_addr); + void __user **fault_addr); void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, struct task_struct *tsk); int process_fpemu_return(int sig, void __user *fault_addr, diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index ecabc00c1e66..0cbf3af37eca 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -632,4 +632,6 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); */ #define xlate_dev_kmem_ptr(p) p +void __ioread64_copy(void *to, const void __iomem *from, size_t count); + #endif /* _ASM_IO_H */ diff --git a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h index bace5b9ae4df..f439cf9cf9d1 100644 --- a/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-au1x00/cpu-feature-overrides.h @@ -8,12 +8,16 @@ #define __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H #define cpu_has_tlb 1 +#define cpu_has_ftlb 0 #define cpu_has_tlbinv 0 #define cpu_has_segments 0 #define cpu_has_eva 0 #define cpu_has_htw 0 +#define cpu_has_ldpte 0 #define cpu_has_rixiex 0 #define cpu_has_maar 0 +#define cpu_has_rw_llb 0 +#define cpu_has_3kex 0 #define cpu_has_4kex 1 #define cpu_has_3k_cache 0 #define cpu_has_4k_cache 1 @@ -30,6 +34,12 @@ #define cpu_has_mcheck 1 #define cpu_has_ejtag 1 #define cpu_has_llsc 1 +#define cpu_has_guestctl0ext 0 +#define cpu_has_guestctl1 0 +#define cpu_has_guestctl2 0 +#define cpu_has_guestid 0 +#define cpu_has_drg 0 +#define cpu_has_bp_ghist 0 #define cpu_has_mips16 0 #define cpu_has_mips16e2 0 #define cpu_has_mdmx 0 @@ -37,17 +47,23 @@ #define cpu_has_smartmips 0 #define cpu_has_rixi 0 #define cpu_has_mmips 0 +#define cpu_has_lpa 0 +#define cpu_has_mhv 0 #define cpu_has_vtag_icache 0 #define cpu_has_dc_aliases 0 #define cpu_has_ic_fills_f_dc 1 #define cpu_has_pindexed_dcache 0 #define cpu_has_mips32r1 1 #define cpu_has_mips32r2 0 +#define cpu_has_mips32r6 0 #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 +#define cpu_has_mips64r6 0 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 +#define cpu_has_dsp3 0 #define cpu_has_mipsmt 0 +#define cpu_has_vp 0 #define cpu_has_userlocal 0 #define cpu_has_nofpuex 0 #define cpu_has_64bits 0 @@ -58,9 +74,19 @@ #define cpu_dcache_line_size() 32 #define cpu_icache_line_size() 32 +#define cpu_scache_line_size() 0 #define cpu_has_perf_cntr_intr_bit 0 #define cpu_has_vz 0 #define cpu_has_msa 0 +#define cpu_has_fre 0 +#define cpu_has_cdmm 0 +#define cpu_has_small_pages 0 +#define cpu_has_nan_legacy 1 +#define cpu_has_nan_2008 1 +#define cpu_has_ebase_wg 0 +#define cpu_has_badinstr 0 +#define cpu_has_badinstrp 0 +#define cpu_has_contextconfig 0 #endif /* __ASM_MACH_AU1X00_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 5035f09c5427..24080af570f9 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -710,7 +710,7 @@ /* Broadcom 6345 ENET DMA definitions */ #define ENETDMA_6345_CHANCFG_REG (0x00) -#define ENETDMA_6345_MAXBURST_REG (0x40) +#define ENETDMA_6345_MAXBURST_REG (0x04) #define ENETDMA_6345_RSTART_REG (0x08) diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index bd8b9bbe1771..a4f798629c3d 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -46,9 +46,9 @@ #define cpu_has_64bits 1 #define cpu_has_octeon_cache 1 #define cpu_has_saa octeon_has_saa() -#define cpu_has_mips32r1 0 -#define cpu_has_mips32r2 0 -#define cpu_has_mips64r1 0 +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 1 #define cpu_has_mips64r2 1 #define cpu_has_dsp 0 #define cpu_has_dsp2 0 diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index defd135e7ac8..3fb7a0e09494 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h @@ -23,7 +23,6 @@ struct cpuinfo_ip27 { extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS]; #define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid) -#define parent_node(node) (node) #define cpumask_of_node(node) ((node) == -1 ? \ cpu_all_mask : \ &hub_data(node)->h_cpus) diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h index 8064d7a4b33d..d750f93232e4 100644 --- a/arch/mips/include/asm/mach-lantiq/lantiq.h +++ b/arch/mips/include/asm/mach-lantiq/lantiq.h @@ -46,8 +46,6 @@ extern struct clk *clk_get_ppe(void); /* find out what bootsource we have */ extern unsigned char ltq_boot_select(void); -/* find out what caused the last cpu reset */ -extern int ltq_reset_cause(void); /* find out the soc type */ extern int ltq_soc_type(void); diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h index c68c0cc879c6..d0ae5d55413b 100644 --- a/arch/mips/include/asm/mach-loongson64/loongson.h +++ b/arch/mips/include/asm/mach-loongson64/loongson.h @@ -26,7 +26,7 @@ extern void mach_prepare_shutdown(void); /* environment arguments from bootloader */ extern u32 cpu_clock_freq; extern u32 memsize, highmemsize; -extern struct plat_smp_ops loongson3_smp_ops; +extern const struct plat_smp_ops loongson3_smp_ops; /* loongson-specific command line, env and memory initialization */ extern void __init prom_init_memory(void); diff --git a/arch/mips/include/asm/mach-loongson64/topology.h b/arch/mips/include/asm/mach-loongson64/topology.h index 0d8f3b55bdbc..bcb885615fca 100644 --- a/arch/mips/include/asm/mach-loongson64/topology.h +++ b/arch/mips/include/asm/mach-loongson64/topology.h @@ -4,7 +4,6 @@ #ifdef CONFIG_NUMA #define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2) -#define parent_node(node) (node) #define cpumask_of_node(node) (&__node_data[(node)]->cpumask) struct pci_bus; diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h index 987ff580466b..817698abf2eb 100644 --- a/arch/mips/include/asm/mips-boards/maltaint.h +++ b/arch/mips/include/asm/mips-boards/maltaint.h @@ -10,8 +10,6 @@ #ifndef _MIPS_MALTAINT_H #define _MIPS_MALTAINT_H -#include <linux/irqchip/mips-gic.h> - /* * Interrupts 0..15 are used for Malta ISA compatible interrupts */ @@ -62,7 +60,4 @@ #define MSC01E_INT_PERFCTR 10 #define MSC01E_INT_CPUCTR 11 -/* GIC external interrupts */ -#define GIC_INT_I8259A GIC_SHARED_TO_HWIRQ(3) - #endif /* !(_MIPS_MALTAINT_H) */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index cfdbab015769..f6231b91b724 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -8,16 +8,18 @@ * option) any later version. */ +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-cm.h +#endif + #ifndef __MIPS_ASM_MIPS_CM_H__ #define __MIPS_ASM_MIPS_CM_H__ #include <linux/bitops.h> #include <linux/errno.h> -#include <linux/io.h> -#include <linux/types.h> /* The base address of the CM GCR block */ -extern void __iomem *mips_cm_base; +extern void __iomem *mips_gcr_base; /* The base address of the CM L2-only sync region */ extern void __iomem *mips_cm_l2sync_base; @@ -80,7 +82,7 @@ static inline int mips_cm_probe(void) static inline bool mips_cm_present(void) { #ifdef CONFIG_MIPS_CM - return mips_cm_base != NULL; + return mips_gcr_base != NULL; #else return false; #endif @@ -112,321 +114,219 @@ static inline bool mips_cm_has_l2sync(void) /* Size of the L2-only sync region */ #define MIPS_CM_L2SYNC_SIZE 0x1000 -/* Macros to ease the creation of register access functions */ -#define BUILD_CM_R_(name, off) \ -static inline unsigned long __iomem *addr_gcr_##name(void) \ -{ \ - return (unsigned long __iomem *)(mips_cm_base + (off)); \ -} \ - \ -static inline u32 read32_gcr_##name(void) \ -{ \ - return __raw_readl(addr_gcr_##name()); \ -} \ - \ -static inline u64 read64_gcr_##name(void) \ -{ \ - void __iomem *addr = addr_gcr_##name(); \ - u64 ret; \ - \ - if (mips_cm_is64) { \ - ret = __raw_readq(addr); \ - } else { \ - ret = __raw_readl(addr); \ - ret |= (u64)__raw_readl(addr + 0x4) << 32; \ - } \ - \ - return ret; \ -} \ - \ -static inline unsigned long read_gcr_##name(void) \ -{ \ - if (mips_cm_is64) \ - return read64_gcr_##name(); \ - else \ - return read32_gcr_##name(); \ -} - -#define BUILD_CM__W(name, off) \ -static inline void write32_gcr_##name(u32 value) \ -{ \ - __raw_writel(value, addr_gcr_##name()); \ -} \ - \ -static inline void write64_gcr_##name(u64 value) \ -{ \ - __raw_writeq(value, addr_gcr_##name()); \ -} \ - \ -static inline void write_gcr_##name(unsigned long value) \ -{ \ - if (mips_cm_is64) \ - write64_gcr_##name(value); \ - else \ - write32_gcr_##name(value); \ -} - -#define BUILD_CM_RW(name, off) \ - BUILD_CM_R_(name, off) \ - BUILD_CM__W(name, off) - -#define BUILD_CM_Cx_R_(name, off) \ - BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off)) \ - BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off)) - -#define BUILD_CM_Cx__W(name, off) \ - BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off)) \ - BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off)) - -#define BUILD_CM_Cx_RW(name, off) \ - BUILD_CM_Cx_R_(name, off) \ - BUILD_CM_Cx__W(name, off) - -/* GCB register accessor functions */ -BUILD_CM_R_(config, MIPS_CM_GCB_OFS + 0x00) -BUILD_CM_RW(base, MIPS_CM_GCB_OFS + 0x08) -BUILD_CM_RW(access, MIPS_CM_GCB_OFS + 0x20) -BUILD_CM_R_(rev, MIPS_CM_GCB_OFS + 0x30) -BUILD_CM_RW(err_control, MIPS_CM_GCB_OFS + 0x38) -BUILD_CM_RW(error_mask, MIPS_CM_GCB_OFS + 0x40) -BUILD_CM_RW(error_cause, MIPS_CM_GCB_OFS + 0x48) -BUILD_CM_RW(error_addr, MIPS_CM_GCB_OFS + 0x50) -BUILD_CM_RW(error_mult, MIPS_CM_GCB_OFS + 0x58) -BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70) -BUILD_CM_RW(gic_base, MIPS_CM_GCB_OFS + 0x80) -BUILD_CM_RW(cpc_base, MIPS_CM_GCB_OFS + 0x88) -BUILD_CM_RW(reg0_base, MIPS_CM_GCB_OFS + 0x90) -BUILD_CM_RW(reg0_mask, MIPS_CM_GCB_OFS + 0x98) -BUILD_CM_RW(reg1_base, MIPS_CM_GCB_OFS + 0xa0) -BUILD_CM_RW(reg1_mask, MIPS_CM_GCB_OFS + 0xa8) -BUILD_CM_RW(reg2_base, MIPS_CM_GCB_OFS + 0xb0) -BUILD_CM_RW(reg2_mask, MIPS_CM_GCB_OFS + 0xb8) -BUILD_CM_RW(reg3_base, MIPS_CM_GCB_OFS + 0xc0) -BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) -BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) -BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) -BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) -BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) -BUILD_CM_RW(l2_pft_control, MIPS_CM_GCB_OFS + 0x300) -BUILD_CM_RW(l2_pft_control_b, MIPS_CM_GCB_OFS + 0x308) -BUILD_CM_RW(bev_base, MIPS_CM_GCB_OFS + 0x680) - -/* Core Local & Core Other register accessor functions */ -BUILD_CM_Cx_RW(reset_release, 0x00) -BUILD_CM_Cx_RW(coherence, 0x08) -BUILD_CM_Cx_R_(config, 0x10) -BUILD_CM_Cx_RW(other, 0x18) -BUILD_CM_Cx_RW(reset_base, 0x20) -BUILD_CM_Cx_R_(id, 0x28) -BUILD_CM_Cx_RW(reset_ext_base, 0x30) -BUILD_CM_Cx_R_(tcid_0_priority, 0x40) -BUILD_CM_Cx_R_(tcid_1_priority, 0x48) -BUILD_CM_Cx_R_(tcid_2_priority, 0x50) -BUILD_CM_Cx_R_(tcid_3_priority, 0x58) -BUILD_CM_Cx_R_(tcid_4_priority, 0x60) -BUILD_CM_Cx_R_(tcid_5_priority, 0x68) -BUILD_CM_Cx_R_(tcid_6_priority, 0x70) -BUILD_CM_Cx_R_(tcid_7_priority, 0x78) -BUILD_CM_Cx_R_(tcid_8_priority, 0x80) - -/* GCR_CONFIG register fields */ -#define CM_GCR_CONFIG_NUMIOCU_SHF 8 -#define CM_GCR_CONFIG_NUMIOCU_MSK (_ULCAST_(0xf) << 8) -#define CM_GCR_CONFIG_PCORES_SHF 0 -#define CM_GCR_CONFIG_PCORES_MSK (_ULCAST_(0xff) << 0) - -/* GCR_BASE register fields */ -#define CM_GCR_BASE_GCRBASE_SHF 15 -#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15) -#define CM_GCR_BASE_CMDEFTGT_SHF 0 -#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0) +#define GCR_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_GCB_OFS + off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name) + +#define GCR_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_GCB_OFS + off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, redir_##name) + +#define GCR_CX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RO(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name) + +#define GCR_CX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RW(gcr, sz, MIPS_CM_COCB_OFS + off, co_##name) + +/* GCR_CONFIG - Information about the system */ +GCR_ACCESSOR_RO(64, 0x000, config) +#define CM_GCR_CONFIG_CLUSTER_COH_CAPABLE BIT_ULL(43) +#define CM_GCR_CONFIG_CLUSTER_ID GENMASK_ULL(39, 32) +#define CM_GCR_CONFIG_NUM_CLUSTERS GENMASK(29, 23) +#define CM_GCR_CONFIG_NUMIOCU GENMASK(15, 8) +#define CM_GCR_CONFIG_PCORES GENMASK(7, 0) + +/* GCR_BASE - Base address of the Global Configuration Registers (GCRs) */ +GCR_ACCESSOR_RW(64, 0x008, base) +#define CM_GCR_BASE_GCRBASE GENMASK_ULL(47, 15) +#define CM_GCR_BASE_CMDEFTGT GENMASK(1, 0) #define CM_GCR_BASE_CMDEFTGT_DISABLED 0 #define CM_GCR_BASE_CMDEFTGT_MEM 1 #define CM_GCR_BASE_CMDEFTGT_IOCU0 2 #define CM_GCR_BASE_CMDEFTGT_IOCU1 3 -/* GCR_RESET_EXT_BASE register fields */ -#define CM_GCR_RESET_EXT_BASE_EVARESET BIT(31) -#define CM_GCR_RESET_EXT_BASE_UEB BIT(30) - -/* GCR_ACCESS register fields */ -#define CM_GCR_ACCESS_ACCESSEN_SHF 0 -#define CM_GCR_ACCESS_ACCESSEN_MSK (_ULCAST_(0xff) << 0) +/* GCR_ACCESS - Controls core/IOCU access to GCRs */ +GCR_ACCESSOR_RW(32, 0x020, access) +#define CM_GCR_ACCESS_ACCESSEN GENMASK(7, 0) -/* GCR_REV register fields */ -#define CM_GCR_REV_MAJOR_SHF 8 -#define CM_GCR_REV_MAJOR_MSK (_ULCAST_(0xff) << 8) -#define CM_GCR_REV_MINOR_SHF 0 -#define CM_GCR_REV_MINOR_MSK (_ULCAST_(0xff) << 0) +/* GCR_REV - Indicates the Coherence Manager revision */ +GCR_ACCESSOR_RO(32, 0x030, rev) +#define CM_GCR_REV_MAJOR GENMASK(15, 8) +#define CM_GCR_REV_MINOR GENMASK(7, 0) #define CM_ENCODE_REV(major, minor) \ - (((major) << CM_GCR_REV_MAJOR_SHF) | \ - ((minor) << CM_GCR_REV_MINOR_SHF)) + (((major) << __ffs(CM_GCR_REV_MAJOR)) | \ + ((minor) << __ffs(CM_GCR_REV_MINOR))) #define CM_REV_CM2 CM_ENCODE_REV(6, 0) #define CM_REV_CM2_5 CM_ENCODE_REV(7, 0) #define CM_REV_CM3 CM_ENCODE_REV(8, 0) - -/* GCR_ERR_CONTROL register fields */ -#define CM_GCR_ERR_CONTROL_L2_ECC_EN_SHF 1 -#define CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK (_ULCAST_(0x1) << 1) -#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_SHF 0 -#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK (_ULCAST_(0x1) << 0) - -/* GCR_ERROR_CAUSE register fields */ -#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF 27 -#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK (_ULCAST_(0x1f) << 27) -#define CM3_GCR_ERROR_CAUSE_ERRTYPE_SHF 58 -#define CM3_GCR_ERROR_CAUSE_ERRTYPE_MSK GENMASK_ULL(63, 58) -#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF 0 -#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK (_ULCAST_(0x7ffffff) << 0) - -/* GCR_ERROR_MULT register fields */ -#define CM_GCR_ERROR_MULT_ERR2ND_SHF 0 -#define CM_GCR_ERROR_MULT_ERR2ND_MSK (_ULCAST_(0x1f) << 0) - -/* GCR_L2_ONLY_SYNC_BASE register fields */ -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF 12 -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK (_ULCAST_(0xfffff) << 12) -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF 0 -#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_GIC_BASE register fields */ -#define CM_GCR_GIC_BASE_GICBASE_SHF 17 -#define CM_GCR_GIC_BASE_GICBASE_MSK (_ULCAST_(0x7fff) << 17) -#define CM_GCR_GIC_BASE_GICEN_SHF 0 -#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_CPC_BASE register fields */ -#define CM_GCR_CPC_BASE_CPCBASE_SHF 15 -#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x1ffff) << 15) -#define CM_GCR_CPC_BASE_CPCEN_SHF 0 -#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_GIC_STATUS register fields */ -#define CM_GCR_GIC_STATUS_GICEX_SHF 0 -#define CM_GCR_GIC_STATUS_GICEX_MSK (_ULCAST_(0x1) << 0) - -/* GCR_REGn_BASE register fields */ -#define CM_GCR_REGn_BASE_BASEADDR_SHF 16 -#define CM_GCR_REGn_BASE_BASEADDR_MSK (_ULCAST_(0xffff) << 16) - -/* GCR_REGn_MASK register fields */ -#define CM_GCR_REGn_MASK_ADDRMASK_SHF 16 -#define CM_GCR_REGn_MASK_ADDRMASK_MSK (_ULCAST_(0xffff) << 16) -#define CM_GCR_REGn_MASK_CCAOVR_SHF 5 -#define CM_GCR_REGn_MASK_CCAOVR_MSK (_ULCAST_(0x3) << 5) -#define CM_GCR_REGn_MASK_CCAOVREN_SHF 4 -#define CM_GCR_REGn_MASK_CCAOVREN_MSK (_ULCAST_(0x1) << 4) -#define CM_GCR_REGn_MASK_DROPL2_SHF 2 -#define CM_GCR_REGn_MASK_DROPL2_MSK (_ULCAST_(0x1) << 2) -#define CM_GCR_REGn_MASK_CMTGT_SHF 0 -#define CM_GCR_REGn_MASK_CMTGT_MSK (_ULCAST_(0x3) << 0) -#define CM_GCR_REGn_MASK_CMTGT_DISABLED (_ULCAST_(0x0) << 0) -#define CM_GCR_REGn_MASK_CMTGT_MEM (_ULCAST_(0x1) << 0) -#define CM_GCR_REGn_MASK_CMTGT_IOCU0 (_ULCAST_(0x2) << 0) -#define CM_GCR_REGn_MASK_CMTGT_IOCU1 (_ULCAST_(0x3) << 0) - -/* GCR_GIC_STATUS register fields */ -#define CM_GCR_GIC_STATUS_EX_SHF 0 -#define CM_GCR_GIC_STATUS_EX_MSK (_ULCAST_(0x1) << 0) - -/* GCR_CPC_STATUS register fields */ -#define CM_GCR_CPC_STATUS_EX_SHF 0 -#define CM_GCR_CPC_STATUS_EX_MSK (_ULCAST_(0x1) << 0) - -/* GCR_L2_CONFIG register fields */ -#define CM_GCR_L2_CONFIG_BYPASS_SHF 20 -#define CM_GCR_L2_CONFIG_BYPASS_MSK (_ULCAST_(0x1) << 20) -#define CM_GCR_L2_CONFIG_SET_SIZE_SHF 12 -#define CM_GCR_L2_CONFIG_SET_SIZE_MSK (_ULCAST_(0xf) << 12) -#define CM_GCR_L2_CONFIG_LINE_SIZE_SHF 8 -#define CM_GCR_L2_CONFIG_LINE_SIZE_MSK (_ULCAST_(0xf) << 8) -#define CM_GCR_L2_CONFIG_ASSOC_SHF 0 -#define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) - -/* GCR_SYS_CONFIG2 register fields */ -#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 -#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) - -/* GCR_L2_PFT_CONTROL register fields */ -#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_SHF 12 -#define CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK (_ULCAST_(0xfffff) << 12) -#define CM_GCR_L2_PFT_CONTROL_PFTEN_SHF 8 -#define CM_GCR_L2_PFT_CONTROL_PFTEN_MSK (_ULCAST_(0x1) << 8) -#define CM_GCR_L2_PFT_CONTROL_NPFT_SHF 0 -#define CM_GCR_L2_PFT_CONTROL_NPFT_MSK (_ULCAST_(0xff) << 0) - -/* GCR_L2_PFT_CONTROL_B register fields */ -#define CM_GCR_L2_PFT_CONTROL_B_CEN_SHF 8 -#define CM_GCR_L2_PFT_CONTROL_B_CEN_MSK (_ULCAST_(0x1) << 8) -#define CM_GCR_L2_PFT_CONTROL_B_PORTID_SHF 0 -#define CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK (_ULCAST_(0xff) << 0) - -/* GCR_Cx_COHERENCE register fields */ -#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 -#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) -#define CM3_GCR_Cx_COHERENCE_COHEN_MSK (_ULCAST_(0x1) << 0) - -/* GCR_Cx_CONFIG register fields */ -#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10 -#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK (_ULCAST_(0x3) << 10) -#define CM_GCR_Cx_CONFIG_PVPE_SHF 0 -#define CM_GCR_Cx_CONFIG_PVPE_MSK (_ULCAST_(0x3ff) << 0) - -/* GCR_Cx_OTHER register fields */ -#define CM_GCR_Cx_OTHER_CORENUM_SHF 16 -#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16) -#define CM3_GCR_Cx_OTHER_CORE_SHF 8 -#define CM3_GCR_Cx_OTHER_CORE_MSK (_ULCAST_(0x3f) << 8) -#define CM3_GCR_Cx_OTHER_VP_SHF 0 -#define CM3_GCR_Cx_OTHER_VP_MSK (_ULCAST_(0x7) << 0) - -/* GCR_Cx_RESET_BASE register fields */ -#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12 -#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK (_ULCAST_(0xfffff) << 12) - -/* GCR_Cx_RESET_EXT_BASE register fields */ -#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF 31 -#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK (_ULCAST_(0x1) << 31) -#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF 30 -#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK (_ULCAST_(0x1) << 30) -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF 20 -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK (_ULCAST_(0xff) << 20) -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF 1 -#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK (_ULCAST_(0x7f) << 1) -#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF 0 -#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK (_ULCAST_(0x1) << 0) - -/** - * mips_cm_numcores - return the number of cores present in the system - * - * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or - * zero if no Coherence Manager is present. - */ -static inline unsigned mips_cm_numcores(void) -{ - if (!mips_cm_present()) - return 0; - - return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK) - >> CM_GCR_CONFIG_PCORES_SHF) + 1; -} - -/** - * mips_cm_numiocu - return the number of IOCUs present in the system - * - * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero - * if no Coherence Manager is present. - */ -static inline unsigned mips_cm_numiocu(void) -{ - if (!mips_cm_present()) - return 0; - - return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK) - >> CM_GCR_CONFIG_NUMIOCU_SHF; -} +#define CM_REV_CM3_5 CM_ENCODE_REV(9, 0) + +/* GCR_ERR_CONTROL - Control error checking logic */ +GCR_ACCESSOR_RW(32, 0x038, err_control) +#define CM_GCR_ERR_CONTROL_L2_ECC_EN BIT(1) +#define CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT BIT(0) + +/* GCR_ERR_MASK - Control which errors are reported as interrupts */ +GCR_ACCESSOR_RW(64, 0x040, error_mask) + +/* GCR_ERR_CAUSE - Indicates the type of error that occurred */ +GCR_ACCESSOR_RW(64, 0x048, error_cause) +#define CM_GCR_ERROR_CAUSE_ERRTYPE GENMASK(31, 27) +#define CM3_GCR_ERROR_CAUSE_ERRTYPE GENMASK_ULL(63, 58) +#define CM_GCR_ERROR_CAUSE_ERRINFO GENMASK(26, 0) + +/* GCR_ERR_ADDR - Indicates the address associated with an error */ +GCR_ACCESSOR_RW(64, 0x050, error_addr) + +/* GCR_ERR_MULT - Indicates when multiple errors have occurred */ +GCR_ACCESSOR_RW(64, 0x058, error_mult) +#define CM_GCR_ERROR_MULT_ERR2ND GENMASK(4, 0) + +/* GCR_L2_ONLY_SYNC_BASE - Base address of the L2 cache-only sync region */ +GCR_ACCESSOR_RW(64, 0x070, l2_only_sync_base) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE GENMASK(31, 12) +#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN BIT(0) + +/* GCR_GIC_BASE - Base address of the Global Interrupt Controller (GIC) */ +GCR_ACCESSOR_RW(64, 0x080, gic_base) +#define CM_GCR_GIC_BASE_GICBASE GENMASK(31, 17) +#define CM_GCR_GIC_BASE_GICEN BIT(0) + +/* GCR_CPC_BASE - Base address of the Cluster Power Controller (CPC) */ +GCR_ACCESSOR_RW(64, 0x088, cpc_base) +#define CM_GCR_CPC_BASE_CPCBASE GENMASK(31, 15) +#define CM_GCR_CPC_BASE_CPCEN BIT(0) + +/* GCR_REGn_BASE - Base addresses of CM address regions */ +GCR_ACCESSOR_RW(64, 0x090, reg0_base) +GCR_ACCESSOR_RW(64, 0x0a0, reg1_base) +GCR_ACCESSOR_RW(64, 0x0b0, reg2_base) +GCR_ACCESSOR_RW(64, 0x0c0, reg3_base) +#define CM_GCR_REGn_BASE_BASEADDR GENMASK(31, 16) + +/* GCR_REGn_MASK - Size & destination of CM address regions */ +GCR_ACCESSOR_RW(64, 0x098, reg0_mask) +GCR_ACCESSOR_RW(64, 0x0a8, reg1_mask) +GCR_ACCESSOR_RW(64, 0x0b8, reg2_mask) +GCR_ACCESSOR_RW(64, 0x0c8, reg3_mask) +#define CM_GCR_REGn_MASK_ADDRMASK GENMASK(31, 16) +#define CM_GCR_REGn_MASK_CCAOVR GENMASK(7, 5) +#define CM_GCR_REGn_MASK_CCAOVREN BIT(4) +#define CM_GCR_REGn_MASK_DROPL2 BIT(2) +#define CM_GCR_REGn_MASK_CMTGT GENMASK(1, 0) +#define CM_GCR_REGn_MASK_CMTGT_DISABLED 0x0 +#define CM_GCR_REGn_MASK_CMTGT_MEM 0x1 +#define CM_GCR_REGn_MASK_CMTGT_IOCU0 0x2 +#define CM_GCR_REGn_MASK_CMTGT_IOCU1 0x3 + +/* GCR_GIC_STATUS - Indicates presence of a Global Interrupt Controller (GIC) */ +GCR_ACCESSOR_RO(32, 0x0d0, gic_status) +#define CM_GCR_GIC_STATUS_EX BIT(0) + +/* GCR_CPC_STATUS - Indicates presence of a Cluster Power Controller (CPC) */ +GCR_ACCESSOR_RO(32, 0x0f0, cpc_status) +#define CM_GCR_CPC_STATUS_EX BIT(0) + +/* GCR_L2_CONFIG - Indicates L2 cache configuration when Config5.L2C=1 */ +GCR_ACCESSOR_RW(32, 0x130, l2_config) +#define CM_GCR_L2_CONFIG_BYPASS BIT(20) +#define CM_GCR_L2_CONFIG_SET_SIZE GENMASK(15, 12) +#define CM_GCR_L2_CONFIG_LINE_SIZE GENMASK(11, 8) +#define CM_GCR_L2_CONFIG_ASSOC GENMASK(7, 0) + +/* GCR_SYS_CONFIG2 - Further information about the system */ +GCR_ACCESSOR_RO(32, 0x150, sys_config2) +#define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0) + +/* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */ +GCR_ACCESSOR_RW(32, 0x300, l2_pft_control) +#define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12) +#define CM_GCR_L2_PFT_CONTROL_PFTEN BIT(8) +#define CM_GCR_L2_PFT_CONTROL_NPFT GENMASK(7, 0) + +/* GCR_L2_PFT_CONTROL_B - Controls hardware L2 prefetching */ +GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b) +#define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8) +#define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0) + +/* GCR_L2SM_COP - L2 cache op state machine control */ +GCR_ACCESSOR_RW(32, 0x620, l2sm_cop) +#define CM_GCR_L2SM_COP_PRESENT BIT(31) +#define CM_GCR_L2SM_COP_RESULT GENMASK(8, 6) +#define CM_GCR_L2SM_COP_RESULT_DONTCARE 0 +#define CM_GCR_L2SM_COP_RESULT_DONE_OK 1 +#define CM_GCR_L2SM_COP_RESULT_DONE_ERROR 2 +#define CM_GCR_L2SM_COP_RESULT_ABORT_OK 3 +#define CM_GCR_L2SM_COP_RESULT_ABORT_ERROR 4 +#define CM_GCR_L2SM_COP_RUNNING BIT(5) +#define CM_GCR_L2SM_COP_TYPE GENMASK(4, 2) +#define CM_GCR_L2SM_COP_TYPE_IDX_WBINV 0 +#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAG 1 +#define CM_GCR_L2SM_COP_TYPE_IDX_STORETAGDATA 2 +#define CM_GCR_L2SM_COP_TYPE_HIT_INV 4 +#define CM_GCR_L2SM_COP_TYPE_HIT_WBINV 5 +#define CM_GCR_L2SM_COP_TYPE_HIT_WB 6 +#define CM_GCR_L2SM_COP_TYPE_FETCHLOCK 7 +#define CM_GCR_L2SM_COP_CMD GENMASK(1, 0) +#define CM_GCR_L2SM_COP_CMD_START 1 /* only when idle */ +#define CM_GCR_L2SM_COP_CMD_ABORT 3 /* only when running */ + +/* GCR_L2SM_TAG_ADDR_COP - L2 cache op state machine address control */ +GCR_ACCESSOR_RW(64, 0x628, l2sm_tag_addr_cop) +#define CM_GCR_L2SM_TAG_ADDR_COP_NUM_LINES GENMASK_ULL(63, 48) +#define CM_GCR_L2SM_TAG_ADDR_COP_START_TAG GENMASK_ULL(47, 6) + +/* GCR_BEV_BASE - Controls the location of the BEV for powered up cores */ +GCR_ACCESSOR_RW(64, 0x680, bev_base) + +/* GCR_Cx_RESET_RELEASE - Controls core reset for CM 1.x */ +GCR_CX_ACCESSOR_RW(32, 0x000, reset_release) + +/* GCR_Cx_COHERENCE - Controls core coherence */ +GCR_CX_ACCESSOR_RW(32, 0x008, coherence) +#define CM_GCR_Cx_COHERENCE_COHDOMAINEN GENMASK(7, 0) +#define CM3_GCR_Cx_COHERENCE_COHEN BIT(0) + +/* GCR_Cx_CONFIG - Information about a core's configuration */ +GCR_CX_ACCESSOR_RO(32, 0x010, config) +#define CM_GCR_Cx_CONFIG_IOCUTYPE GENMASK(11, 10) +#define CM_GCR_Cx_CONFIG_PVPE GENMASK(9, 0) + +/* GCR_Cx_OTHER - Configure the core-other/redirect GCR block */ +GCR_CX_ACCESSOR_RW(32, 0x018, other) +#define CM_GCR_Cx_OTHER_CORENUM GENMASK(31, 16) /* CM < 3 */ +#define CM_GCR_Cx_OTHER_CLUSTER_EN BIT(31) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_GIC_EN BIT(30) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_BLOCK GENMASK(25, 24) /* CM >= 3.5 */ +#define CM_GCR_Cx_OTHER_BLOCK_LOCAL 0 +#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL 1 +#define CM_GCR_Cx_OTHER_BLOCK_USER 2 +#define CM_GCR_Cx_OTHER_BLOCK_GLOBAL_HIGH 3 +#define CM_GCR_Cx_OTHER_CLUSTER GENMASK(21, 16) /* CM >= 3.5 */ +#define CM3_GCR_Cx_OTHER_CORE GENMASK(13, 8) /* CM >= 3 */ +#define CM_GCR_Cx_OTHER_CORE_CM 32 +#define CM3_GCR_Cx_OTHER_VP GENMASK(2, 0) /* CM >= 3 */ + +/* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */ +GCR_CX_ACCESSOR_RW(32, 0x020, reset_base) +#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12) + +/* GCR_Cx_ID - Identify the current core */ +GCR_CX_ACCESSOR_RO(32, 0x028, id) +#define CM_GCR_Cx_ID_CLUSTER GENMASK(15, 8) +#define CM_GCR_Cx_ID_CORE GENMASK(7, 0) + +/* GCR_Cx_RESET_EXT_BASE - Configure behaviour when cores reset or power up */ +GCR_CX_ACCESSOR_RW(32, 0x030, reset_ext_base) +#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET BIT(31) +#define CM_GCR_Cx_RESET_EXT_BASE_UEB BIT(30) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK GENMASK(27, 20) +#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA GENMASK(7, 1) +#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT BIT(0) /** * mips_cm_l2sync - perform an L2-only sync operation @@ -469,7 +369,7 @@ static inline unsigned int mips_cm_max_vp_width(void) uint32_t cfg; if (mips_cm_revision() >= CM_REV_CM3) - return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW; if (mips_cm_present()) { /* @@ -477,8 +377,8 @@ static inline unsigned int mips_cm_max_vp_width(void) * number of VP(E)s, and if that ever changes then this will * need revisiting. */ - cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; - return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; + cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE; + return (cfg >> __ffs(CM_GCR_Cx_CONFIG_PVPE)) + 1; } if (IS_ENABLED(CONFIG_SMP)) @@ -499,7 +399,7 @@ static inline unsigned int mips_cm_max_vp_width(void) */ static inline unsigned int mips_cm_vp_id(unsigned int cpu) { - unsigned int core = cpu_data[cpu].core; + unsigned int core = cpu_core(&cpu_data[cpu]); unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); return (core * mips_cm_max_vp_width()) + vp; @@ -508,29 +408,56 @@ static inline unsigned int mips_cm_vp_id(unsigned int cpu) #ifdef CONFIG_MIPS_CM /** - * mips_cm_lock_other - lock access to another core + * mips_cm_lock_other - lock access to redirect/other region + * @cluster: the other cluster to be accessed * @core: the other core to be accessed * @vp: the VP within the other core to be accessed + * @block: the register block to be accessed * - * Call before operating upon a core via the 'other' register region in - * order to prevent the region being moved during access. Must be followed - * by a call to mips_cm_unlock_other. + * Configure the redirect/other region for the local core/VP (depending upon + * the CM revision) to target the specified @cluster, @core, @vp & register + * @block. Must be called before using the redirect/other region, and followed + * by a call to mips_cm_unlock_other() when access to the redirect/other region + * is complete. + * + * This function acquires a spinlock such that code between it & + * mips_cm_unlock_other() calls cannot be pre-empted by anything which may + * reconfigure the redirect/other region, and cannot be interfered with by + * another VP in the core. As such calls to this function should not be nested. */ -extern void mips_cm_lock_other(unsigned int core, unsigned int vp); +extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block); /** - * mips_cm_unlock_other - unlock access to another core + * mips_cm_unlock_other - unlock access to redirect/other region * - * Call after operating upon another core via the 'other' register region. - * Must be called after mips_cm_lock_other. + * Must be called after mips_cm_lock_other() once all required access to the + * redirect/other region has been completed. */ extern void mips_cm_unlock_other(void); #else /* !CONFIG_MIPS_CM */ -static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { } +static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, + unsigned int vp, unsigned int block) { } static inline void mips_cm_unlock_other(void) { } #endif /* !CONFIG_MIPS_CM */ +/** + * mips_cm_lock_other_cpu - lock access to redirect/other region + * @cpu: the other CPU whose register we want to access + * + * Configure the redirect/other region for the local core/VP (depending upon + * the CM revision) to target the specified @cpu & register @block. This is + * equivalent to calling mips_cm_lock_other() but accepts a Linux CPU number + * for convenience. + */ +static inline void mips_cm_lock_other_cpu(unsigned int cpu, unsigned int block) +{ + struct cpuinfo_mips *d = &cpu_data[cpu]; + + mips_cm_lock_other(cpu_cluster(d), cpu_core(d), cpu_vpe_id(d), block); +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index 8c519f9827a3..f885051a8378 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h @@ -8,11 +8,15 @@ * option) any later version. */ +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-cpc.h +#endif + #ifndef __MIPS_ASM_MIPS_CPC_H__ #define __MIPS_ASM_MIPS_CPC_H__ -#include <linux/io.h> -#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/errno.h> /* The base address of the CPC registers */ extern void __iomem *mips_cpc_base; @@ -61,89 +65,92 @@ static inline bool mips_cpc_present(void) #define MIPS_CPC_CLCB_OFS 0x2000 #define MIPS_CPC_COCB_OFS 0x4000 -/* Macros to ease the creation of register access functions */ -#define BUILD_CPC_R_(name, off) \ -static inline u32 *addr_cpc_##name(void) \ -{ \ - return (u32 *)(mips_cpc_base + (off)); \ -} \ - \ -static inline u32 read_cpc_##name(void) \ -{ \ - return __raw_readl(mips_cpc_base + (off)); \ -} - -#define BUILD_CPC__W(name, off) \ -static inline void write_cpc_##name(u32 value) \ -{ \ - __raw_writel(value, mips_cpc_base + (off)); \ -} - -#define BUILD_CPC_RW(name, off) \ - BUILD_CPC_R_(name, off) \ - BUILD_CPC__W(name, off) - -#define BUILD_CPC_Cx_R_(name, off) \ - BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \ - BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off)) - -#define BUILD_CPC_Cx__W(name, off) \ - BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \ - BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off)) - -#define BUILD_CPC_Cx_RW(name, off) \ - BUILD_CPC_Cx_R_(name, off) \ - BUILD_CPC_Cx__W(name, off) - -/* GCB register accessor functions */ -BUILD_CPC_RW(access, MIPS_CPC_GCB_OFS + 0x00) -BUILD_CPC_RW(seqdel, MIPS_CPC_GCB_OFS + 0x08) -BUILD_CPC_RW(rail, MIPS_CPC_GCB_OFS + 0x10) -BUILD_CPC_RW(resetlen, MIPS_CPC_GCB_OFS + 0x18) -BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20) - -/* Core Local & Core Other accessor functions */ -BUILD_CPC_Cx_RW(cmd, 0x00) -BUILD_CPC_Cx_RW(stat_conf, 0x08) -BUILD_CPC_Cx_RW(other, 0x10) -BUILD_CPC_Cx_RW(vp_stop, 0x20) -BUILD_CPC_Cx_RW(vp_run, 0x28) -BUILD_CPC_Cx_RW(vp_running, 0x30) - -/* CPC_Cx_CMD register fields */ -#define CPC_Cx_CMD_SHF 0 -#define CPC_Cx_CMD_MSK (_ULCAST_(0xf) << 0) -#define CPC_Cx_CMD_CLOCKOFF (_ULCAST_(0x1) << 0) -#define CPC_Cx_CMD_PWRDOWN (_ULCAST_(0x2) << 0) -#define CPC_Cx_CMD_PWRUP (_ULCAST_(0x3) << 0) -#define CPC_Cx_CMD_RESET (_ULCAST_(0x4) << 0) - -/* CPC_Cx_STAT_CONF register fields */ -#define CPC_Cx_STAT_CONF_PWRUPE_SHF 23 -#define CPC_Cx_STAT_CONF_PWRUPE_MSK (_ULCAST_(0x1) << 23) -#define CPC_Cx_STAT_CONF_SEQSTATE_SHF 19 -#define CPC_Cx_STAT_CONF_SEQSTATE_MSK (_ULCAST_(0xf) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D0 (_ULCAST_(0x0) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U0 (_ULCAST_(0x1) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U1 (_ULCAST_(0x2) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U2 (_ULCAST_(0x3) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U3 (_ULCAST_(0x4) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U4 (_ULCAST_(0x5) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U5 (_ULCAST_(0x6) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_U6 (_ULCAST_(0x7) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D1 (_ULCAST_(0x8) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D3 (_ULCAST_(0x9) << 19) -#define CPC_Cx_STAT_CONF_SEQSTATE_D2 (_ULCAST_(0xa) << 19) -#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF 17 -#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK (_ULCAST_(0x1) << 17) -#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF 16 -#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK (_ULCAST_(0x1) << 16) -#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF 15 -#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK (_ULCAST_(0x1) << 15) - -/* CPC_Cx_OTHER register fields */ -#define CPC_Cx_OTHER_CORENUM_SHF 16 -#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) +#define CPC_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name) + +#define CPC_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_GCB_OFS + off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, redir_##name) + +#define CPC_CX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RO(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name) + +#define CPC_CX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_CLCB_OFS + off, cl_##name) \ + CPS_ACCESSOR_RW(cpc, sz, MIPS_CPC_COCB_OFS + off, co_##name) + +/* CPC_ACCESS - Control core/IOCU access to CPC registers prior to CM 3 */ +CPC_ACCESSOR_RW(32, 0x000, access) + +/* CPC_SEQDEL - Configure delays between command sequencer steps */ +CPC_ACCESSOR_RW(32, 0x008, seqdel) + +/* CPC_RAIL - Configure the delay from rail power-up to stability */ +CPC_ACCESSOR_RW(32, 0x010, rail) + +/* CPC_RESETLEN - Configure the length of reset sequences */ +CPC_ACCESSOR_RW(32, 0x018, resetlen) + +/* CPC_REVISION - Indicates the revisison of the CPC */ +CPC_ACCESSOR_RO(32, 0x020, revision) + +/* CPC_PWRUP_CTL - Control power to the Coherence Manager (CM) */ +CPC_ACCESSOR_RW(32, 0x030, pwrup_ctl) +#define CPC_PWRUP_CTL_CM_PWRUP BIT(0) + +/* CPC_CONFIG - Mirrors GCR_CONFIG */ +CPC_ACCESSOR_RW(64, 0x138, config) + +/* CPC_SYS_CONFIG - Control cluster endianness */ +CPC_ACCESSOR_RW(32, 0x140, sys_config) +#define CPC_SYS_CONFIG_BE_IMMEDIATE BIT(2) +#define CPC_SYS_CONFIG_BE_STATUS BIT(1) +#define CPC_SYS_CONFIG_BE BIT(0) + +/* CPC_Cx_CMD - Instruct the CPC to take action on a core */ +CPC_CX_ACCESSOR_RW(32, 0x000, cmd) +#define CPC_Cx_CMD GENMASK(3, 0) +#define CPC_Cx_CMD_CLOCKOFF 0x1 +#define CPC_Cx_CMD_PWRDOWN 0x2 +#define CPC_Cx_CMD_PWRUP 0x3 +#define CPC_Cx_CMD_RESET 0x4 + +/* CPC_Cx_STAT_CONF - Indicates core configuration & state */ +CPC_CX_ACCESSOR_RW(32, 0x008, stat_conf) +#define CPC_Cx_STAT_CONF_PWRUPE BIT(23) +#define CPC_Cx_STAT_CONF_SEQSTATE GENMASK(22, 19) +#define CPC_Cx_STAT_CONF_SEQSTATE_D0 0x0 +#define CPC_Cx_STAT_CONF_SEQSTATE_U0 0x1 +#define CPC_Cx_STAT_CONF_SEQSTATE_U1 0x2 +#define CPC_Cx_STAT_CONF_SEQSTATE_U2 0x3 +#define CPC_Cx_STAT_CONF_SEQSTATE_U3 0x4 +#define CPC_Cx_STAT_CONF_SEQSTATE_U4 0x5 +#define CPC_Cx_STAT_CONF_SEQSTATE_U5 0x6 +#define CPC_Cx_STAT_CONF_SEQSTATE_U6 0x7 +#define CPC_Cx_STAT_CONF_SEQSTATE_D1 0x8 +#define CPC_Cx_STAT_CONF_SEQSTATE_D3 0x9 +#define CPC_Cx_STAT_CONF_SEQSTATE_D2 0xa +#define CPC_Cx_STAT_CONF_CLKGAT_IMPL BIT(17) +#define CPC_Cx_STAT_CONF_PWRDN_IMPL BIT(16) +#define CPC_Cx_STAT_CONF_EJTAG_PROBE BIT(15) + +/* CPC_Cx_OTHER - Configure the core-other register block prior to CM 3 */ +CPC_CX_ACCESSOR_RW(32, 0x010, other) +#define CPC_Cx_OTHER_CORENUM GENMASK(23, 16) + +/* CPC_Cx_VP_STOP - Stop Virtual Processors (VPs) within a core from running */ +CPC_CX_ACCESSOR_RW(32, 0x020, vp_stop) + +/* CPC_Cx_VP_START - Start Virtual Processors (VPs) within a core running */ +CPC_CX_ACCESSOR_RW(32, 0x028, vp_run) + +/* CPC_Cx_VP_RUNNING - Indicate which Virtual Processors (VPs) are running */ +CPC_CX_ACCESSOR_RW(32, 0x030, vp_running) + +/* CPC_Cx_CONFIG - Mirrors GCR_Cx_CONFIG */ +CPC_CX_ACCESSOR_RW(32, 0x090, config) #ifdef CONFIG_MIPS_CPC diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h new file mode 100644 index 000000000000..bf02b5070a98 --- /dev/null +++ b/arch/mips/include/asm/mips-cps.h @@ -0,0 +1,240 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPS_H__ +#define __MIPS_ASM_MIPS_CPS_H__ + +#include <linux/io.h> +#include <linux/types.h> + +extern unsigned long __cps_access_bad_size(void) + __compiletime_error("Bad size for CPS accessor"); + +#define CPS_ACCESSOR_A(unit, off, name) \ +static inline void *addr_##unit##_##name(void) \ +{ \ + return mips_##unit##_base + (off); \ +} + +#define CPS_ACCESSOR_R(unit, sz, name) \ +static inline uint##sz##_t read_##unit##_##name(void) \ +{ \ + uint64_t val64; \ + \ + switch (sz) { \ + case 32: \ + return __raw_readl(addr_##unit##_##name()); \ + \ + case 64: \ + if (mips_cm_is64) \ + return __raw_readq(addr_##unit##_##name()); \ + \ + val64 = __raw_readl(addr_##unit##_##name() + 4); \ + val64 <<= 32; \ + val64 |= __raw_readl(addr_##unit##_##name()); \ + return val64; \ + \ + default: \ + return __cps_access_bad_size(); \ + } \ +} + +#define CPS_ACCESSOR_W(unit, sz, name) \ +static inline void write_##unit##_##name(uint##sz##_t val) \ +{ \ + switch (sz) { \ + case 32: \ + __raw_writel(val, addr_##unit##_##name()); \ + break; \ + \ + case 64: \ + if (mips_cm_is64) { \ + __raw_writeq(val, addr_##unit##_##name()); \ + break; \ + } \ + \ + __raw_writel((uint64_t)val >> 32, \ + addr_##unit##_##name() + 4); \ + __raw_writel(val, addr_##unit##_##name()); \ + break; \ + \ + default: \ + __cps_access_bad_size(); \ + break; \ + } \ +} + +#define CPS_ACCESSOR_M(unit, sz, name) \ +static inline void change_##unit##_##name(uint##sz##_t mask, \ + uint##sz##_t val) \ +{ \ + uint##sz##_t reg_val = read_##unit##_##name(); \ + reg_val &= ~mask; \ + reg_val |= val; \ + write_##unit##_##name(reg_val); \ +} \ + \ +static inline void set_##unit##_##name(uint##sz##_t val) \ +{ \ + change_##unit##_##name(val, val); \ +} \ + \ +static inline void clear_##unit##_##name(uint##sz##_t val) \ +{ \ + change_##unit##_##name(val, 0); \ +} + +#define CPS_ACCESSOR_RO(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_R(unit, sz, name) + +#define CPS_ACCESSOR_WO(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_W(unit, sz, name) + +#define CPS_ACCESSOR_RW(unit, sz, off, name) \ + CPS_ACCESSOR_A(unit, off, name) \ + CPS_ACCESSOR_R(unit, sz, name) \ + CPS_ACCESSOR_W(unit, sz, name) \ + CPS_ACCESSOR_M(unit, sz, name) + +#include <asm/mips-cm.h> +#include <asm/mips-cpc.h> +#include <asm/mips-gic.h> + +/** + * mips_cps_numclusters - return the number of clusters present in the system + * + * Returns the number of clusters in the system. + */ +static inline unsigned int mips_cps_numclusters(void) +{ + unsigned int num_clusters; + + if (mips_cm_revision() < CM_REV_CM3_5) + return 1; + + num_clusters = read_gcr_config() & CM_GCR_CONFIG_NUM_CLUSTERS; + num_clusters >>= __ffs(CM_GCR_CONFIG_NUM_CLUSTERS); + return num_clusters; +} + +/** + * mips_cps_cluster_config - return (GCR|CPC)_CONFIG from a cluster + * @cluster: the ID of the cluster whose config we want + * + * Read the value of GCR_CONFIG (or its CPC_CONFIG mirror) from a @cluster. + * + * Returns the value of GCR_CONFIG. + */ +static inline uint64_t mips_cps_cluster_config(unsigned int cluster) +{ + uint64_t config; + + if (mips_cm_revision() < CM_REV_CM3_5) { + /* + * Prior to CM 3.5 we don't have the notion of multiple + * clusters so we can trivially read the GCR_CONFIG register + * within this cluster. + */ + WARN_ON(cluster != 0); + config = read_gcr_config(); + } else { + /* + * From CM 3.5 onwards we read the CPC_CONFIG mirror of + * GCR_CONFIG via the redirect region, since the CPC is always + * powered up allowing us not to need to power up the CM. + */ + mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); + config = read_cpc_redir_config(); + mips_cm_unlock_other(); + } + + return config; +} + +/** + * mips_cps_numcores - return the number of cores present in a cluster + * @cluster: the ID of the cluster whose core count we want + * + * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or + * zero if no Coherence Manager is present. + */ +static inline unsigned int mips_cps_numcores(unsigned int cluster) +{ + if (!mips_cm_present()) + return 0; + + /* Add one before masking to handle 0xff indicating no cores */ + return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; +} + +/** + * mips_cps_numiocu - return the number of IOCUs present in a cluster + * @cluster: the ID of the cluster whose IOCU count we want + * + * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero + * if no Coherence Manager is present. + */ +static inline unsigned int mips_cps_numiocu(unsigned int cluster) +{ + unsigned int num_iocu; + + if (!mips_cm_present()) + return 0; + + num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU; + num_iocu >>= __ffs(CM_GCR_CONFIG_NUMIOCU); + return num_iocu; +} + +/** + * mips_cps_numvps - return the number of VPs (threads) supported by a core + * @cluster: the ID of the cluster containing the core we want to examine + * @core: the ID of the core whose VP count we want + * + * Returns the number of Virtual Processors (VPs, ie. hardware threads) that + * are supported by the given @core in the given @cluster. If the core or the + * kernel do not support hardware mutlti-threading this returns 1. + */ +static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) +{ + unsigned int cfg; + + if (!mips_cm_present()) + return 1; + + if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) + && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp)) + return 1; + + mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); + + if (mips_cm_revision() < CM_REV_CM3_5) { + /* + * Prior to CM 3.5 we can only have one cluster & don't have + * CPC_Cx_CONFIG, so we read GCR_Cx_CONFIG. + */ + cfg = read_gcr_co_config(); + } else { + /* + * From CM 3.5 onwards we read CPC_Cx_CONFIG because the CPC is + * always powered, which allows us to not worry about powering + * up the cluster's CM here. + */ + cfg = read_cpc_co_config(); + } + + mips_cm_unlock_other(); + + return (cfg + 1) & CM_GCR_Cx_CONFIG_PVPE; +} + +#endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/mips-gic.h b/arch/mips/include/asm/mips-gic.h new file mode 100644 index 000000000000..a2badf572632 --- /dev/null +++ b/arch/mips/include/asm/mips-gic.h @@ -0,0 +1,347 @@ +/* + * Copyright (C) 2017 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_MIPS_CPS_H__ +# error Please include asm/mips-cps.h rather than asm/mips-gic.h +#endif + +#ifndef __MIPS_ASM_MIPS_GIC_H__ +#define __MIPS_ASM_MIPS_GIC_H__ + +#include <linux/bitops.h> + +/* The base address of the GIC registers */ +extern void __iomem *mips_gic_base; + +/* Offsets from the GIC base address to various control blocks */ +#define MIPS_GIC_SHARED_OFS 0x00000 +#define MIPS_GIC_SHARED_SZ 0x08000 +#define MIPS_GIC_LOCAL_OFS 0x08000 +#define MIPS_GIC_LOCAL_SZ 0x04000 +#define MIPS_GIC_REDIR_OFS 0x0c000 +#define MIPS_GIC_REDIR_SZ 0x04000 +#define MIPS_GIC_USER_OFS 0x10000 +#define MIPS_GIC_USER_SZ 0x10000 + +/* For read-only shared registers */ +#define GIC_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_SHARED_OFS + off, name) + +/* For read-write shared registers */ +#define GIC_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_SHARED_OFS + off, name) + +/* For read-only local registers */ +#define GIC_VX_ACCESSOR_RO(sz, off, name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \ + CPS_ACCESSOR_RO(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name) + +/* For read-write local registers */ +#define GIC_VX_ACCESSOR_RW(sz, off, name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_LOCAL_OFS + off, vl_##name) \ + CPS_ACCESSOR_RW(gic, sz, MIPS_GIC_REDIR_OFS + off, vo_##name) + +/* For read-only shared per-interrupt registers */ +#define GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ +static inline void __iomem *addr_gic_##name(unsigned int intr) \ +{ \ + return mips_gic_base + (off) + (intr * (stride)); \ +} \ + \ +static inline unsigned int read_gic_##name(unsigned int intr) \ +{ \ + BUILD_BUG_ON(sz != 32); \ + return __raw_readl(addr_gic_##name(intr)); \ +} + +/* For read-write shared per-interrupt registers */ +#define GIC_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ + \ +static inline void write_gic_##name(unsigned int intr, \ + unsigned int val) \ +{ \ + BUILD_BUG_ON(sz != 32); \ + __raw_writel(val, addr_gic_##name(intr)); \ +} + +/* For read-only local per-interrupt registers */ +#define GIC_VX_ACCESSOR_RO_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \ + stride, vl_##name) \ + GIC_ACCESSOR_RO_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \ + stride, vo_##name) + +/* For read-write local per-interrupt registers */ +#define GIC_VX_ACCESSOR_RW_INTR_REG(sz, off, stride, name) \ + GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_LOCAL_OFS + off, \ + stride, vl_##name) \ + GIC_ACCESSOR_RW_INTR_REG(sz, MIPS_GIC_REDIR_OFS + off, \ + stride, vo_##name) + +/* For read-only shared bit-per-interrupt registers */ +#define GIC_ACCESSOR_RO_INTR_BIT(off, name) \ +static inline void __iomem *addr_gic_##name(void) \ +{ \ + return mips_gic_base + (off); \ +} \ + \ +static inline unsigned int read_gic_##name(unsigned int intr) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + unsigned int val; \ + \ + if (mips_cm_is64) { \ + addr += (intr / 64) * sizeof(uint64_t); \ + val = __raw_readq(addr) >> intr % 64; \ + } else { \ + addr += (intr / 32) * sizeof(uint32_t); \ + val = __raw_readl(addr) >> intr % 32; \ + } \ + \ + return val & 0x1; \ +} + +/* For read-write shared bit-per-interrupt registers */ +#define GIC_ACCESSOR_RW_INTR_BIT(off, name) \ + GIC_ACCESSOR_RO_INTR_BIT(off, name) \ + \ +static inline void write_gic_##name(unsigned int intr) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + \ + if (mips_cm_is64) { \ + addr += (intr / 64) * sizeof(uint64_t); \ + __raw_writeq(BIT(intr % 64), addr); \ + } else { \ + addr += (intr / 32) * sizeof(uint32_t); \ + __raw_writel(BIT(intr % 32), addr); \ + } \ +} \ + \ +static inline void change_gic_##name(unsigned int intr, \ + unsigned int val) \ +{ \ + void __iomem *addr = addr_gic_##name(); \ + \ + if (mips_cm_is64) { \ + uint64_t _val; \ + \ + addr += (intr / 64) * sizeof(uint64_t); \ + _val = __raw_readq(addr); \ + _val &= ~BIT_ULL(intr % 64); \ + _val |= (uint64_t)val << (intr % 64); \ + __raw_writeq(_val, addr); \ + } else { \ + uint32_t _val; \ + \ + addr += (intr / 32) * sizeof(uint32_t); \ + _val = __raw_readl(addr); \ + _val &= ~BIT(intr % 32); \ + _val |= val << (intr % 32); \ + __raw_writel(_val, addr); \ + } \ +} + +/* For read-only local bit-per-interrupt registers */ +#define GIC_VX_ACCESSOR_RO_INTR_BIT(sz, off, name) \ + GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \ + vl_##name) \ + GIC_ACCESSOR_RO_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \ + vo_##name) + +/* For read-write local bit-per-interrupt registers */ +#define GIC_VX_ACCESSOR_RW_INTR_BIT(sz, off, name) \ + GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_LOCAL_OFS + off, \ + vl_##name) \ + GIC_ACCESSOR_RW_INTR_BIT(sz, MIPS_GIC_REDIR_OFS + off, \ + vo_##name) + +/* GIC_SH_CONFIG - Information about the GIC configuration */ +GIC_ACCESSOR_RW(32, 0x000, config) +#define GIC_CONFIG_COUNTSTOP BIT(28) +#define GIC_CONFIG_COUNTBITS GENMASK(27, 24) +#define GIC_CONFIG_NUMINTERRUPTS GENMASK(23, 16) +#define GIC_CONFIG_PVPS GENMASK(6, 0) + +/* GIC_SH_COUNTER - Shared global counter value */ +GIC_ACCESSOR_RW(64, 0x010, counter) +GIC_ACCESSOR_RW(32, 0x010, counter_32l) +GIC_ACCESSOR_RW(32, 0x014, counter_32h) + +/* GIC_SH_POL_* - Configures interrupt polarity */ +GIC_ACCESSOR_RW_INTR_BIT(0x100, pol) +#define GIC_POL_ACTIVE_LOW 0 /* when level triggered */ +#define GIC_POL_ACTIVE_HIGH 1 /* when level triggered */ +#define GIC_POL_FALLING_EDGE 0 /* when single-edge triggered */ +#define GIC_POL_RISING_EDGE 1 /* when single-edge triggered */ + +/* GIC_SH_TRIG_* - Configures interrupts to be edge or level triggered */ +GIC_ACCESSOR_RW_INTR_BIT(0x180, trig) +#define GIC_TRIG_LEVEL 0 +#define GIC_TRIG_EDGE 1 + +/* GIC_SH_DUAL_* - Configures whether interrupts trigger on both edges */ +GIC_ACCESSOR_RW_INTR_BIT(0x200, dual) +#define GIC_DUAL_SINGLE 0 /* when edge-triggered */ +#define GIC_DUAL_DUAL 1 /* when edge-triggered */ + +/* GIC_SH_WEDGE - Write an 'edge', ie. trigger an interrupt */ +GIC_ACCESSOR_RW(32, 0x280, wedge) +#define GIC_WEDGE_RW BIT(31) +#define GIC_WEDGE_INTR GENMASK(7, 0) + +/* GIC_SH_RMASK_* - Reset/clear shared interrupt mask bits */ +GIC_ACCESSOR_RW_INTR_BIT(0x300, rmask) + +/* GIC_SH_SMASK_* - Set shared interrupt mask bits */ +GIC_ACCESSOR_RW_INTR_BIT(0x380, smask) + +/* GIC_SH_MASK_* - Read the current shared interrupt mask */ +GIC_ACCESSOR_RO_INTR_BIT(0x400, mask) + +/* GIC_SH_PEND_* - Read currently pending shared interrupts */ +GIC_ACCESSOR_RO_INTR_BIT(0x480, pend) + +/* GIC_SH_MAPx_PIN - Map shared interrupts to a particular CPU pin */ +GIC_ACCESSOR_RW_INTR_REG(32, 0x500, 0x4, map_pin) +#define GIC_MAP_PIN_MAP_TO_PIN BIT(31) +#define GIC_MAP_PIN_MAP_TO_NMI BIT(30) +#define GIC_MAP_PIN_MAP GENMASK(5, 0) + +/* GIC_SH_MAPx_VP - Map shared interrupts to a particular Virtual Processor */ +GIC_ACCESSOR_RW_INTR_REG(32, 0x2000, 0x20, map_vp) + +/* GIC_Vx_CTL - VP-level interrupt control */ +GIC_VX_ACCESSOR_RW(32, 0x000, ctl) +#define GIC_VX_CTL_FDC_ROUTABLE BIT(4) +#define GIC_VX_CTL_SWINT_ROUTABLE BIT(3) +#define GIC_VX_CTL_PERFCNT_ROUTABLE BIT(2) +#define GIC_VX_CTL_TIMER_ROUTABLE BIT(1) +#define GIC_VX_CTL_EIC BIT(0) + +/* GIC_Vx_PEND - Read currently pending local interrupts */ +GIC_VX_ACCESSOR_RO(32, 0x004, pend) + +/* GIC_Vx_MASK - Read the current local interrupt mask */ +GIC_VX_ACCESSOR_RO(32, 0x008, mask) + +/* GIC_Vx_RMASK - Reset/clear local interrupt mask bits */ +GIC_VX_ACCESSOR_RW(32, 0x00c, rmask) + +/* GIC_Vx_SMASK - Set local interrupt mask bits */ +GIC_VX_ACCESSOR_RW(32, 0x010, smask) + +/* GIC_Vx_*_MAP - Route local interrupts to the desired pins */ +GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x040, 0x4, map) + +/* GIC_Vx_WD_MAP - Route the local watchdog timer interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x040, wd_map) + +/* GIC_Vx_COMPARE_MAP - Route the local count/compare interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x044, compare_map) + +/* GIC_Vx_TIMER_MAP - Route the local CPU timer (cp0 count/compare) interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x048, timer_map) + +/* GIC_Vx_FDC_MAP - Route the local fast debug channel interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x04c, fdc_map) + +/* GIC_Vx_PERFCTR_MAP - Route the local performance counter interrupt */ +GIC_VX_ACCESSOR_RW(32, 0x050, perfctr_map) + +/* GIC_Vx_SWINT0_MAP - Route the local software interrupt 0 */ +GIC_VX_ACCESSOR_RW(32, 0x054, swint0_map) + +/* GIC_Vx_SWINT1_MAP - Route the local software interrupt 1 */ +GIC_VX_ACCESSOR_RW(32, 0x058, swint1_map) + +/* GIC_Vx_OTHER - Configure access to other Virtual Processor registers */ +GIC_VX_ACCESSOR_RW(32, 0x080, other) +#define GIC_VX_OTHER_VPNUM GENMASK(5, 0) + +/* GIC_Vx_IDENT - Retrieve the local Virtual Processor's ID */ +GIC_VX_ACCESSOR_RO(32, 0x088, ident) +#define GIC_VX_IDENT_VPNUM GENMASK(5, 0) + +/* GIC_Vx_COMPARE - Value to compare with GIC_SH_COUNTER */ +GIC_VX_ACCESSOR_RW(64, 0x0a0, compare) + +/* GIC_Vx_EIC_SHADOW_SET_BASE - Set shadow register set for each interrupt */ +GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x100, 0x4, eic_shadow_set) + +/** + * enum mips_gic_local_interrupt - GIC local interrupts + * @GIC_LOCAL_INT_WD: GIC watchdog timer interrupt + * @GIC_LOCAL_INT_COMPARE: GIC count/compare interrupt + * @GIC_LOCAL_INT_TIMER: CP0 count/compare interrupt + * @GIC_LOCAL_INT_PERFCTR: Performance counter interrupt + * @GIC_LOCAL_INT_SWINT0: Software interrupt 0 + * @GIC_LOCAL_INT_SWINT1: Software interrupt 1 + * @GIC_LOCAL_INT_FDC: Fast debug channel interrupt + * @GIC_NUM_LOCAL_INTRS: The number of local interrupts + * + * Enumerates interrupts provided by the GIC that are local to a VP. + */ +enum mips_gic_local_interrupt { + GIC_LOCAL_INT_WD, + GIC_LOCAL_INT_COMPARE, + GIC_LOCAL_INT_TIMER, + GIC_LOCAL_INT_PERFCTR, + GIC_LOCAL_INT_SWINT0, + GIC_LOCAL_INT_SWINT1, + GIC_LOCAL_INT_FDC, + GIC_NUM_LOCAL_INTRS +}; + +/** + * mips_gic_present() - Determine whether a GIC is present + * + * Determines whether a MIPS Global Interrupt Controller (GIC) is present in + * the system that the kernel is running on. + * + * Return true if a GIC is present, else false. + */ +static inline bool mips_gic_present(void) +{ + return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base; +} + +/** + * gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq + * + * Determine the virq number to use for the coprocessor 0 count/compare + * interrupt, which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_compare_int(void); + +/** + * gic_get_c0_perfcount_int() - Return performance counter interrupt virq + * + * Determine the virq number to use for CPU performance counter interrupts, + * which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_perfcount_int(void); + +/** + * gic_get_c0_fdc_int() - Return fast debug channel interrupt virq + * + * Determine the virq number to use for fast debug channel (FDC) interrupts, + * which may be routed via the GIC. + * + * Returns the virq number or a negative error number. + */ +extern int gic_get_c0_fdc_int(void); + +#endif /* __MIPS_ASM_MIPS_CPS_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index dbb0eceda2c6..e4ed1bc9a734 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -48,6 +48,7 @@ #define CP0_ENTRYLO0 $2 #define CP0_ENTRYLO1 $3 #define CP0_CONF $3 +#define CP0_GLOBALNUMBER $3, 1 #define CP0_CONTEXT $4 #define CP0_PAGEMASK $5 #define CP0_SEGCTL0 $5, 2 @@ -148,6 +149,16 @@ #define MIPS_ENTRYLO_RI (_ULCAST_(1) << (BITS_PER_LONG - 1)) /* + * MIPSr6+ GlobalNumber register definitions + */ +#define MIPS_GLOBALNUMBER_VP_SHF 0 +#define MIPS_GLOBALNUMBER_VP (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_VP_SHF) +#define MIPS_GLOBALNUMBER_CORE_SHF 8 +#define MIPS_GLOBALNUMBER_CORE (_ULCAST_(0xff) << MIPS_GLOBALNUMBER_CORE_SHF) +#define MIPS_GLOBALNUMBER_CLUSTER_SHF 16 +#define MIPS_GLOBALNUMBER_CLUSTER (_ULCAST_(0xf) << MIPS_GLOBALNUMBER_CLUSTER_SHF) + +/* * Values for PageMask register */ #ifdef CONFIG_CPU_VR41XX @@ -1446,6 +1457,8 @@ do { \ #define read_c0_conf() __read_32bit_c0_register($3, 0) #define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) +#define read_c0_globalnumber() __read_32bit_c0_register($3, 1) + #define read_c0_context() __read_ulong_c0_register($4, 0) #define write_c0_context(val) __write_ulong_c0_register($4, 0, val) diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index e51add184717..06552a965cf4 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -114,8 +114,6 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "R5432 " #elif defined CONFIG_CPU_R5500 #define MODULE_PROC_FAMILY "R5500 " -#elif defined CONFIG_CPU_R6000 -#define MODULE_PROC_FAMILY "R6000 " #elif defined CONFIG_CPU_NEVADA #define MODULE_PROC_FAMILY "NEVADA " #elif defined CONFIG_CPU_R8000 diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h index e0717d10e650..a6e6cbebe046 100644 --- a/arch/mips/include/asm/netlogic/common.h +++ b/arch/mips/include/asm/netlogic/common.h @@ -84,7 +84,7 @@ nlm_set_nmi_handler(void *handler) */ void nlm_init_boot_cpu(void); unsigned int nlm_get_cpu_frequency(void); -extern struct plat_smp_ops nlm_smp_ops; +extern const struct plat_smp_ops nlm_smp_ops; extern char nlm_reset_entry[], nlm_reset_entry_end[]; /* SWIOTLB */ diff --git a/arch/mips/include/asm/octeon/cvmx-boot-vector.h b/arch/mips/include/asm/octeon/cvmx-boot-vector.h new file mode 100644 index 000000000000..8db08241d53c --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-boot-vector.h @@ -0,0 +1,53 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003-2017 Cavium, Inc. + */ + +#ifndef __CVMX_BOOT_VECTOR_H__ +#define __CVMX_BOOT_VECTOR_H__ + +#include <asm/octeon/octeon.h> + +/* + * The boot vector table is made up of an array of 1024 elements of + * struct cvmx_boot_vector_element. There is one entry for each + * possible MIPS CPUNum, indexed by the CPUNum. + * + * Once cvmx_boot_vector_get() returns a non-NULL value (indicating + * success), NMI to a core will cause execution to transfer to the + * target_ptr location for that core's entry in the vector table. + * + * The struct cvmx_boot_vector_element fields app0, app1, and app2 can + * be used by the application that has set the target_ptr in any + * application specific manner, they are not touched by the vectoring + * code. + * + * The boot vector code clobbers the CP0_DESAVE register, and on + * OCTEON II and later CPUs also clobbers CP0_KScratch2. All GP + * registers are preserved, except on pre-OCTEON II CPUs, where k1 is + * clobbered. + * + */ + + +/* + * Applications install the boot bus code in cvmx-boot-vector.c, which + * uses this magic: + */ +#define OCTEON_BOOT_MOVEABLE_MAGIC1 0xdb00110ad358eacdull + +struct cvmx_boot_vector_element { + /* kseg0 or xkphys address of target code. */ + uint64_t target_ptr; + /* Three application specific arguments. */ + uint64_t app0; + uint64_t app1; + uint64_t app2; +}; + +struct cvmx_boot_vector_element *cvmx_boot_vector_get(void); + +#endif /* __CVMX_BOOT_VECTOR_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h index 374562507d0b..72d2e403a6e4 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootmem.h +++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h @@ -255,6 +255,34 @@ extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name); +/** + * Allocate if needed a block of memory from a specific range of the + * free list that was passed to the application by the bootloader, and + * assign it a name in the global named block table. (part of the + * cvmx_bootmem_descriptor_t structure) Named blocks can later be + * freed. If the requested name block is already allocated, return + * the pointer to block of memory. If request cannot be satisfied + * within the address range specified, NULL is returned + * + * @param size Size in bytes of block to allocate + * @param min_addr minimum address of range + * @param max_addr maximum address of range + * @param align Alignment of memory to be allocated. (must be a power of 2) + * @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes + * @param init Initialization function + * + * The initialization function is optional, if omitted the named block + * is initialized to all zeros when it is created, i.e. once. + * + * @return pointer to block of memory, NULL on error + */ +void *cvmx_bootmem_alloc_named_range_once(uint64_t size, + uint64_t min_addr, + uint64_t max_addr, + uint64_t align, + char *name, + void (*init) (void *)); + extern int cvmx_bootmem_free_named(char *name); /** diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h index 0dd0e40c96d4..6e61792d9248 100644 --- a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h @@ -128,6 +128,7 @@ static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset) case OCTEON_CN52XX & OCTEON_FAMILY_MASK: case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: case OCTEON_CN61XX & OCTEON_FAMILY_MASK: + case OCTEON_CN70XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: case OCTEON_CN50XX & OCTEON_FAMILY_MASK: @@ -143,6 +144,10 @@ static inline uint64_t CVMX_CIU_PP_POKEX(unsigned long offset) return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070100100200ull) + (offset) * 8; + case OCTEON_CNF75XX & OCTEON_FAMILY_MASK: + case OCTEON_CN73XX & OCTEON_FAMILY_MASK: + case OCTEON_CN78XX & OCTEON_FAMILY_MASK: + return CVMX_ADD_IO_SEG(0x0001010000030000ull) + (offset) * 8; } return CVMX_ADD_IO_SEG(0x0001070000000580ull) + (offset) * 8; } @@ -180,6 +185,7 @@ static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset) case OCTEON_CN52XX & OCTEON_FAMILY_MASK: case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: case OCTEON_CN61XX & OCTEON_FAMILY_MASK: + case OCTEON_CN70XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; case OCTEON_CN31XX & OCTEON_FAMILY_MASK: case OCTEON_CN50XX & OCTEON_FAMILY_MASK: @@ -195,6 +201,10 @@ static inline uint64_t CVMX_CIU_WDOGX(unsigned long offset) return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; case OCTEON_CN68XX & OCTEON_FAMILY_MASK: return CVMX_ADD_IO_SEG(0x0001070100100000ull) + (offset) * 8; + case OCTEON_CNF75XX & OCTEON_FAMILY_MASK: + case OCTEON_CN73XX & OCTEON_FAMILY_MASK: + case OCTEON_CN78XX & OCTEON_FAMILY_MASK: + return CVMX_ADD_IO_SEG(0x0001010000020000ull) + (offset) * 8; } return CVMX_ADD_IO_SEG(0x0001070000000500ull) + (offset) * 8; } diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index e638735cc3ac..205ab2ce10f8 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -357,6 +357,34 @@ static inline unsigned int cvmx_get_local_core_num(void) return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1); } +#define CVMX_NODE_BITS (2) /* Number of bits to define a node */ +#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS) +#define CVMX_NODE_IO_SHIFT (36) +#define CVMX_NODE_MEM_SHIFT (40) +#define CVMX_NODE_IO_MASK ((uint64_t)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT) + +static inline void cvmx_write_csr_node(uint64_t node, uint64_t csr_addr, + uint64_t val) +{ + uint64_t composite_csr_addr, node_addr; + + node_addr = (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT; + composite_csr_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | node_addr; + + cvmx_write64_uint64(composite_csr_addr, val); + if (((csr_addr >> 40) & 0x7ffff) == (0x118)) + cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT | node_addr); +} + +static inline uint64_t cvmx_read_csr_node(uint64_t node, uint64_t csr_addr) +{ + uint64_t node_addr; + + node_addr = (csr_addr & ~CVMX_NODE_IO_MASK) | + (node & CVMX_NODE_MASK) << CVMX_NODE_IO_SHIFT; + return cvmx_read_csr(node_addr); +} + /** * Returns the number of bits set in the provided value. * Simple wrapper for POP instruction. diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 07c0516ef4d5..c99c4b6a79f4 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -362,4 +362,6 @@ extern void octeon_fixup_irqs(void); extern struct semaphore octeon_bootbus_sem; +struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block); + #endif /* __ASM_OCTEON_OCTEON_H */ diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index db7c322f057f..53b2cb8e5966 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -13,7 +13,7 @@ #include <linux/errno.h> -#include <asm/mips-cm.h> +#include <asm/mips-cps.h> #ifdef CONFIG_SMP @@ -26,7 +26,7 @@ struct plat_smp_ops { void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); void (*init_secondary)(void); void (*smp_finish)(void); - void (*boot_secondary)(int cpu, struct task_struct *idle); + int (*boot_secondary)(int cpu, struct task_struct *idle); void (*smp_setup)(void); void (*prepare_cpus)(unsigned int max_cpus); #ifdef CONFIG_HOTPLUG_CPU @@ -35,11 +35,11 @@ struct plat_smp_ops { #endif }; -extern void register_smp_ops(struct plat_smp_ops *ops); +extern void register_smp_ops(const struct plat_smp_ops *ops); static inline void plat_smp_setup(void) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->smp_setup(); } @@ -57,7 +57,7 @@ static inline void plat_smp_setup(void) /* UP, nothing to do ... */ } -static inline void register_smp_ops(struct plat_smp_ops *ops) +static inline void register_smp_ops(const struct plat_smp_ops *ops) { } @@ -66,7 +66,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops) static inline int register_up_smp_ops(void) { #ifdef CONFIG_SMP_UP - extern struct plat_smp_ops up_smp_ops; + extern const struct plat_smp_ops up_smp_ops; register_smp_ops(&up_smp_ops); @@ -79,7 +79,7 @@ static inline int register_up_smp_ops(void) static inline int register_cmp_smp_ops(void) { #ifdef CONFIG_MIPS_CMP - extern struct plat_smp_ops cmp_smp_ops; + extern const struct plat_smp_ops cmp_smp_ops; if (!mips_cm_present()) return -ENODEV; @@ -95,7 +95,7 @@ static inline int register_cmp_smp_ops(void) static inline int register_vsmp_smp_ops(void) { #ifdef CONFIG_MIPS_MT_SMP - extern struct plat_smp_ops vsmp_smp_ops; + extern const struct plat_smp_ops vsmp_smp_ops; register_smp_ops(&vsmp_smp_ops); diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index bab3d41e5987..9e494f8d9c03 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -58,7 +58,7 @@ extern void calculate_cpu_foreign_map(void); */ static inline void smp_send_reschedule(int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); } @@ -66,14 +66,14 @@ static inline void smp_send_reschedule(int cpu) #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ return mp_ops->cpu_disable(); } static inline void __cpu_die(unsigned int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->cpu_die(cpu); } @@ -97,14 +97,14 @@ int mips_smp_ipi_free(const struct cpumask *mask); static inline void arch_send_call_function_single_ipi(int cpu) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION); } static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - extern struct plat_smp_ops *mp_ops; /* private */ + extern const struct plat_smp_ops *mp_ops; /* private */ mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); } diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index eaa5a4d7d5e5..5d3563c55e0c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -19,20 +19,43 @@ #include <asm/asm-offsets.h> #include <asm/thread_info.h> +/* Make the addition of cfi info a little easier. */ + .macro cfi_rel_offset reg offset=0 docfi=0 + .if \docfi + .cfi_rel_offset \reg, \offset + .endif + .endm + + .macro cfi_st reg offset=0 docfi=0 + LONG_S \reg, \offset(sp) + cfi_rel_offset \reg, \offset, \docfi + .endm + + .macro cfi_restore reg offset=0 docfi=0 + .if \docfi + .cfi_restore \reg + .endif + .endm + + .macro cfi_ld reg offset=0 docfi=0 + LONG_L \reg, \offset(sp) + cfi_restore \reg \offset \docfi + .endm + #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #define STATMASK 0x3f #else #define STATMASK 0x1f #endif - .macro SAVE_AT + .macro SAVE_AT docfi=0 .set push .set noat - LONG_S $1, PT_R1(sp) + cfi_st $1, PT_R1, \docfi .set pop .endm - .macro SAVE_TEMP + .macro SAVE_TEMP docfi=0 #ifdef CONFIG_CPU_HAS_SMARTMIPS mflhxu v1 LONG_S v1, PT_LO(sp) @@ -44,20 +67,20 @@ mfhi v1 #endif #ifdef CONFIG_32BIT - LONG_S $8, PT_R8(sp) - LONG_S $9, PT_R9(sp) + cfi_st $8, PT_R8, \docfi + cfi_st $9, PT_R9, \docfi #endif - LONG_S $10, PT_R10(sp) - LONG_S $11, PT_R11(sp) - LONG_S $12, PT_R12(sp) + cfi_st $10, PT_R10, \docfi + cfi_st $11, PT_R11, \docfi + cfi_st $12, PT_R12, \docfi #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_HI(sp) mflo v1 #endif - LONG_S $13, PT_R13(sp) - LONG_S $14, PT_R14(sp) - LONG_S $15, PT_R15(sp) - LONG_S $24, PT_R24(sp) + cfi_st $13, PT_R13, \docfi + cfi_st $14, PT_R14, \docfi + cfi_st $15, PT_R15, \docfi + cfi_st $24, PT_R24, \docfi #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_LO(sp) #endif @@ -71,20 +94,28 @@ #endif .endm - .macro SAVE_STATIC - LONG_S $16, PT_R16(sp) - LONG_S $17, PT_R17(sp) - LONG_S $18, PT_R18(sp) - LONG_S $19, PT_R19(sp) - LONG_S $20, PT_R20(sp) - LONG_S $21, PT_R21(sp) - LONG_S $22, PT_R22(sp) - LONG_S $23, PT_R23(sp) - LONG_S $30, PT_R30(sp) + .macro SAVE_STATIC docfi=0 + cfi_st $16, PT_R16, \docfi + cfi_st $17, PT_R17, \docfi + cfi_st $18, PT_R18, \docfi + cfi_st $19, PT_R19, \docfi + cfi_st $20, PT_R20, \docfi + cfi_st $21, PT_R21, \docfi + cfi_st $22, PT_R22, \docfi + cfi_st $23, PT_R23, \docfi + cfi_st $30, PT_R30, \docfi .endm +/* + * get_saved_sp returns the SP for the current CPU by looking in the + * kernelsp array for it. If tosp is set, it stores the current sp in + * k0 and loads the new value in sp. If not, it clobbers k0 and + * stores the new value in k1, leaving sp unaffected. + */ #ifdef CONFIG_SMP - .macro get_saved_sp /* SMP variation */ + + /* SMP variation */ + .macro get_saved_sp docfi=0 tosp=0 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(kernelsp) @@ -97,7 +128,15 @@ #endif LONG_SRL k0, SMP_CPUID_PTRSHIFT LONG_ADDU k1, k0 + .if \tosp + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif + LONG_L sp, %lo(kernelsp)(k1) + .else LONG_L k1, %lo(kernelsp)(k1) + .endif .endm .macro set_saved_sp stackp temp temp2 @@ -106,7 +145,8 @@ LONG_S \stackp, kernelsp(\temp) .endm #else /* !CONFIG_SMP */ - .macro get_saved_sp /* Uniprocessor variation */ + /* Uniprocessor variation */ + .macro get_saved_sp docfi=0 tosp=0 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS /* * Clear BTB (branch target buffer), forbid RAS (return address @@ -135,7 +175,15 @@ daddiu k1, %hi(kernelsp) dsll k1, k1, 16 #endif + .if \tosp + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif + LONG_L sp, %lo(kernelsp)(k1) + .else LONG_L k1, %lo(kernelsp)(k1) + .endif .endm .macro set_saved_sp stackp temp temp2 @@ -143,7 +191,7 @@ .endm #endif - .macro SAVE_SOME + .macro SAVE_SOME docfi=0 .set push .set noat .set reorder @@ -151,7 +199,6 @@ sll k0, 3 /* extract cu0 bit */ .set noreorder bltz k0, 8f - move k1, sp #ifdef CONFIG_EVA /* * Flush interAptiv's Return Prediction Stack (RPS) by writing @@ -178,20 +225,26 @@ MTC0 k0, CP0_ENTRYHI #endif .set reorder + move k0, sp + .if \docfi + .cfi_register sp, k0 + .endif /* Called from user mode, new stack. */ - get_saved_sp -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -8: move k0, sp - PTR_SUBU sp, k1, PT_SIZE -#else - .set at=k0 -8: PTR_SUBU k1, PT_SIZE + get_saved_sp docfi=\docfi tosp=1 +8: +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set at=k1 +#endif + PTR_SUBU sp, PT_SIZE +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS .set noat - move k0, sp - move sp, k1 #endif - LONG_S k0, PT_R29(sp) - LONG_S $3, PT_R3(sp) + .if \docfi + .cfi_def_cfa sp,0 + .endif + cfi_st k0, PT_R29, \docfi + cfi_rel_offset sp, PT_R29, \docfi + cfi_st v1, PT_R3, \docfi /* * You might think that you don't need to save $0, * but the FPU emulator and gdb remote debug stub @@ -199,23 +252,26 @@ */ LONG_S $0, PT_R0(sp) mfc0 v1, CP0_STATUS - LONG_S $2, PT_R2(sp) + cfi_st v0, PT_R2, \docfi LONG_S v1, PT_STATUS(sp) - LONG_S $4, PT_R4(sp) + cfi_st $4, PT_R4, \docfi mfc0 v1, CP0_CAUSE - LONG_S $5, PT_R5(sp) + cfi_st $5, PT_R5, \docfi LONG_S v1, PT_CAUSE(sp) - LONG_S $6, PT_R6(sp) - MFC0 v1, CP0_EPC - LONG_S $7, PT_R7(sp) + cfi_st $6, PT_R6, \docfi + cfi_st ra, PT_R31, \docfi + MFC0 ra, CP0_EPC + cfi_st $7, PT_R7, \docfi #ifdef CONFIG_64BIT - LONG_S $8, PT_R8(sp) - LONG_S $9, PT_R9(sp) + cfi_st $8, PT_R8, \docfi + cfi_st $9, PT_R9, \docfi #endif - LONG_S v1, PT_EPC(sp) - LONG_S $25, PT_R25(sp) - LONG_S $28, PT_R28(sp) - LONG_S $31, PT_R31(sp) + LONG_S ra, PT_EPC(sp) + .if \docfi + .cfi_rel_offset ra, PT_EPC + .endif + cfi_st $25, PT_R25, \docfi + cfi_st $28, PT_R28, \docfi /* Set thread_info if we're coming from user mode */ mfc0 k0, CP0_STATUS @@ -232,21 +288,21 @@ .set pop .endm - .macro SAVE_ALL - SAVE_SOME - SAVE_AT - SAVE_TEMP - SAVE_STATIC + .macro SAVE_ALL docfi=0 + SAVE_SOME \docfi + SAVE_AT \docfi + SAVE_TEMP \docfi + SAVE_STATIC \docfi .endm - .macro RESTORE_AT + .macro RESTORE_AT docfi=0 .set push .set noat - LONG_L $1, PT_R1(sp) + cfi_ld $1, PT_R1, \docfi .set pop .endm - .macro RESTORE_TEMP + .macro RESTORE_TEMP docfi=0 #ifdef CONFIG_CPU_CAVIUM_OCTEON /* Restore the Octeon multiplier state */ jal octeon_mult_restore @@ -265,33 +321,37 @@ mthi $24 #endif #ifdef CONFIG_32BIT - LONG_L $8, PT_R8(sp) - LONG_L $9, PT_R9(sp) + cfi_ld $8, PT_R8, \docfi + cfi_ld $9, PT_R9, \docfi #endif - LONG_L $10, PT_R10(sp) - LONG_L $11, PT_R11(sp) - LONG_L $12, PT_R12(sp) - LONG_L $13, PT_R13(sp) - LONG_L $14, PT_R14(sp) - LONG_L $15, PT_R15(sp) - LONG_L $24, PT_R24(sp) + cfi_ld $10, PT_R10, \docfi + cfi_ld $11, PT_R11, \docfi + cfi_ld $12, PT_R12, \docfi + cfi_ld $13, PT_R13, \docfi + cfi_ld $14, PT_R14, \docfi + cfi_ld $15, PT_R15, \docfi + cfi_ld $24, PT_R24, \docfi .endm - .macro RESTORE_STATIC - LONG_L $16, PT_R16(sp) - LONG_L $17, PT_R17(sp) - LONG_L $18, PT_R18(sp) - LONG_L $19, PT_R19(sp) - LONG_L $20, PT_R20(sp) - LONG_L $21, PT_R21(sp) - LONG_L $22, PT_R22(sp) - LONG_L $23, PT_R23(sp) - LONG_L $30, PT_R30(sp) + .macro RESTORE_STATIC docfi=0 + cfi_ld $16, PT_R16, \docfi + cfi_ld $17, PT_R17, \docfi + cfi_ld $18, PT_R18, \docfi + cfi_ld $19, PT_R19, \docfi + cfi_ld $20, PT_R20, \docfi + cfi_ld $21, PT_R21, \docfi + cfi_ld $22, PT_R22, \docfi + cfi_ld $23, PT_R23, \docfi + cfi_ld $30, PT_R30, \docfi + .endm + + .macro RESTORE_SP docfi=0 + cfi_ld sp, PT_R29, \docfi .endm #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) - .macro RESTORE_SOME + .macro RESTORE_SOME docfi=0 .set push .set reorder .set noat @@ -306,30 +366,30 @@ and v0, v1 or v0, a0 mtc0 v0, CP0_STATUS - LONG_L $31, PT_R31(sp) - LONG_L $28, PT_R28(sp) - LONG_L $25, PT_R25(sp) - LONG_L $7, PT_R7(sp) - LONG_L $6, PT_R6(sp) - LONG_L $5, PT_R5(sp) - LONG_L $4, PT_R4(sp) - LONG_L $3, PT_R3(sp) - LONG_L $2, PT_R2(sp) + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $3, PT_R3, \docfi + cfi_ld $2, PT_R2, \docfi .set pop .endm - .macro RESTORE_SP_AND_RET + .macro RESTORE_SP_AND_RET docfi=0 .set push .set noreorder LONG_L k0, PT_EPC(sp) - LONG_L sp, PT_R29(sp) + RESTORE_SP \docfi jr k0 rfe .set pop .endm #else - .macro RESTORE_SOME + .macro RESTORE_SOME docfi=0 .set push .set reorder .set noat @@ -346,24 +406,24 @@ mtc0 v0, CP0_STATUS LONG_L v1, PT_EPC(sp) MTC0 v1, CP0_EPC - LONG_L $31, PT_R31(sp) - LONG_L $28, PT_R28(sp) - LONG_L $25, PT_R25(sp) + cfi_ld $31, PT_R31, \docfi + cfi_ld $28, PT_R28, \docfi + cfi_ld $25, PT_R25, \docfi #ifdef CONFIG_64BIT - LONG_L $8, PT_R8(sp) - LONG_L $9, PT_R9(sp) + cfi_ld $8, PT_R8, \docfi + cfi_ld $9, PT_R9, \docfi #endif - LONG_L $7, PT_R7(sp) - LONG_L $6, PT_R6(sp) - LONG_L $5, PT_R5(sp) - LONG_L $4, PT_R4(sp) - LONG_L $3, PT_R3(sp) - LONG_L $2, PT_R2(sp) + cfi_ld $7, PT_R7, \docfi + cfi_ld $6, PT_R6, \docfi + cfi_ld $5, PT_R5, \docfi + cfi_ld $4, PT_R4, \docfi + cfi_ld $3, PT_R3, \docfi + cfi_ld $2, PT_R2, \docfi .set pop .endm - .macro RESTORE_SP_AND_RET - LONG_L sp, PT_R29(sp) + .macro RESTORE_SP_AND_RET docfi=0 + RESTORE_SP \docfi #ifdef CONFIG_CPU_MIPSR6 eretnc #else @@ -375,16 +435,12 @@ #endif - .macro RESTORE_SP - LONG_L sp, PT_R29(sp) - .endm - - .macro RESTORE_ALL - RESTORE_TEMP - RESTORE_STATIC - RESTORE_AT - RESTORE_SOME - RESTORE_SP + .macro RESTORE_ALL docfi=0 + RESTORE_TEMP \docfi + RESTORE_STATIC \docfi + RESTORE_AT \docfi + RESTORE_SOME \docfi + RESTORE_SP \docfi .endm /* diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 780ee2c2a2ac..10c4e9c84448 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h @@ -2,6 +2,8 @@ #define _ASM_STACKTRACE_H #include <asm/ptrace.h> +#include <asm/asm.h> +#include <linux/stringify.h> #ifdef CONFIG_KALLSYMS extern int raw_show_trace; @@ -20,6 +22,14 @@ static inline unsigned long unwind_stack(struct task_struct *task, } #endif +#define STR_PTR_LA __stringify(PTR_LA) +#define STR_LONG_S __stringify(LONG_S) +#define STR_LONG_L __stringify(LONG_L) +#define STR_LONGSIZE __stringify(LONGSIZE) + +#define STORE_ONE_REG(r) \ + STR_LONG_S " $" __stringify(r)",("STR_LONGSIZE"*"__stringify(r)")(%1)\n\t" + static __always_inline void prepare_frametrace(struct pt_regs *regs) { #ifndef CONFIG_KALLSYMS @@ -32,21 +42,47 @@ static __always_inline void prepare_frametrace(struct pt_regs *regs) __asm__ __volatile__( ".set push\n\t" ".set noat\n\t" -#ifdef CONFIG_64BIT - "1: dla $1, 1b\n\t" - "sd $1, %0\n\t" - "sd $29, %1\n\t" - "sd $31, %2\n\t" -#else - "1: la $1, 1b\n\t" - "sw $1, %0\n\t" - "sw $29, %1\n\t" - "sw $31, %2\n\t" -#endif + /* Store $1 so we can use it */ + STR_LONG_S " $1,"STR_LONGSIZE"(%1)\n\t" + /* Store the PC */ + "1: " STR_PTR_LA " $1, 1b\n\t" + STR_LONG_S " $1,%0\n\t" + STORE_ONE_REG(2) + STORE_ONE_REG(3) + STORE_ONE_REG(4) + STORE_ONE_REG(5) + STORE_ONE_REG(6) + STORE_ONE_REG(7) + STORE_ONE_REG(8) + STORE_ONE_REG(9) + STORE_ONE_REG(10) + STORE_ONE_REG(11) + STORE_ONE_REG(12) + STORE_ONE_REG(13) + STORE_ONE_REG(14) + STORE_ONE_REG(15) + STORE_ONE_REG(16) + STORE_ONE_REG(17) + STORE_ONE_REG(18) + STORE_ONE_REG(19) + STORE_ONE_REG(20) + STORE_ONE_REG(21) + STORE_ONE_REG(22) + STORE_ONE_REG(23) + STORE_ONE_REG(24) + STORE_ONE_REG(25) + STORE_ONE_REG(26) + STORE_ONE_REG(27) + STORE_ONE_REG(28) + STORE_ONE_REG(29) + STORE_ONE_REG(30) + STORE_ONE_REG(31) + /* Restore $1 */ + STR_LONG_L " $1,"STR_LONGSIZE"(%1)\n\t" ".set pop\n\t" - : "=m" (regs->cp0_epc), - "=m" (regs->regs[29]), "=m" (regs->regs[31]) - : : "memory"); + : "=m" (regs->cp0_epc) + : "r" (regs->regs) + : "memory"); } #endif /* _ASM_STACKTRACE_H */ diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 7afda4150a59..0673d2d0f2e6 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h @@ -13,7 +13,7 @@ #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data[cpu].package) -#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_id(cpu) (cpu_core(&cpu_data[cpu])) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu]) #endif |