From 55bdd694116597d2f16510b121463cd579ba78da Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 21 May 2010 18:06:41 +0100 Subject: ARM: Add base support for ARMv7-M MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds the base support for the ARMv7-M architecture. It consists of the corresponding arch/arm/mm/ files and various #ifdef's around the kernel. Exception handling is implemented by a subsequent patch. [ukleinek: squash in some changes originating from commit b5717ba (Cortex-M3: Add support for the Microcontroller Prototyping System) from the v2.6.33-arm1 patch stack, port to post 3.6, drop zImage support, drop reorganisation of pt_regs, assert CONFIG_CPU_V7M doesn't leak into installed headers and a few cosmetic changes] Signed-off-by: Catalin Marinas Reviewed-by: Jonathan Austin Tested-by: Jonathan Austin Signed-off-by: Uwe Kleine-König --- arch/arm/kernel/setup.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/setup.c') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1cc9e1796415..829124590e4c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -128,7 +128,9 @@ struct stack { u32 und[3]; } ____cacheline_aligned; +#ifndef CONFIG_CPU_V7M static struct stack stacks[NR_CPUS]; +#endif char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); @@ -207,7 +209,7 @@ static const char *proc_arch[] = { "5TEJ", "6TEJ", "7", - "?(11)", + "7M", "?(12)", "?(13)", "?(14)", @@ -216,6 +218,12 @@ static const char *proc_arch[] = { "?(17)", }; +#ifdef CONFIG_CPU_V7M +static int __get_cpu_architecture(void) +{ + return CPU_ARCH_ARMv7M; +} +#else static int __get_cpu_architecture(void) { int cpu_arch; @@ -248,6 +256,7 @@ static int __get_cpu_architecture(void) return cpu_arch; } +#endif int __pure cpu_architecture(void) { @@ -293,7 +302,9 @@ static void __init cacheid_init(void) { unsigned int arch = cpu_architecture(); - if (arch >= CPU_ARCH_ARMv6) { + if (arch == CPU_ARCH_ARMv7M) { + cacheid = 0; + } else if (arch >= CPU_ARCH_ARMv6) { unsigned int cachetype = read_cpuid_cachetype(); if ((cachetype & (7 << 29)) == 4 << 29) { /* ARMv7 register format */ @@ -375,6 +386,7 @@ static void __init feat_v6_fixup(void) */ void cpu_init(void) { +#ifndef CONFIG_CPU_V7M unsigned int cpu = smp_processor_id(); struct stack *stk = &stacks[cpu]; @@ -425,6 +437,7 @@ void cpu_init(void) "I" (offsetof(struct stack, und[0])), PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); +#endif } int __cpu_logical_map[NR_CPUS]; -- cgit v1.2.3 From 05774088391c7430f6a4c1d5d18196ef17bb3ba9 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 21 May 2013 14:24:11 +0000 Subject: arm: introduce psci_smp_ops Rename virt_smp_ops to psci_smp_ops and move them to arch/arm/kernel/psci_smp.c. Remove mach-virt/platsmp.c, now unused. Compile psci_smp if CONFIG_ARM_PSCI and CONFIG_SMP. Add a cpu_die smp_op based on psci_ops.cpu_off. Initialize PSCI before setting smp_ops in setup_arch. If PSCI is available on the platform, prefer psci_smp_ops over the platform smp_ops. Signed-off-by: Stefano Stabellini Acked-by: Will Deacon CC: arnd@arndb.de CC: marc.zyngier@arm.com CC: linux@arm.linux.org.uk CC: nico@linaro.org CC: rob.herring@calxeda.com --- arch/arm/include/asm/psci.h | 9 +++++ arch/arm/kernel/Makefile | 5 ++- arch/arm/kernel/psci.c | 7 ++-- arch/arm/kernel/psci_smp.c | 84 ++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kernel/setup.c | 7 +++- arch/arm/mach-virt/Makefile | 1 - arch/arm/mach-virt/platsmp.c | 50 -------------------------- arch/arm/mach-virt/virt.c | 3 -- 8 files changed, 106 insertions(+), 60 deletions(-) create mode 100644 arch/arm/kernel/psci_smp.c delete mode 100644 arch/arm/mach-virt/platsmp.c (limited to 'arch/arm/kernel/setup.c') diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h index ce0dbe7c1625..c4ae171850f8 100644 --- a/arch/arm/include/asm/psci.h +++ b/arch/arm/include/asm/psci.h @@ -32,5 +32,14 @@ struct psci_operations { }; extern struct psci_operations psci_ops; +extern struct smp_operations psci_smp_ops; + +#ifdef CONFIG_ARM_PSCI +void psci_init(void); +bool psci_smp_available(void); +#else +static inline void psci_init(void) { } +static inline bool psci_smp_available(void) { return false; } +#endif #endif /* __ASM_ARM_PSCI_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5f3338eacad2..dd9d90ab65d0 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -82,6 +82,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o -obj-$(CONFIG_ARM_PSCI) += psci.o +ifeq ($(CONFIG_ARM_PSCI),y) +obj-y += psci.o +obj-$(CONFIG_SMP) += psci_smp.o +endif extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c index 36531643cc2c..46931880093d 100644 --- a/arch/arm/kernel/psci.c +++ b/arch/arm/kernel/psci.c @@ -158,7 +158,7 @@ static const struct of_device_id psci_of_match[] __initconst = { {}, }; -static int __init psci_init(void) +void __init psci_init(void) { struct device_node *np; const char *method; @@ -166,7 +166,7 @@ static int __init psci_init(void) np = of_find_matching_node(NULL, psci_of_match); if (!np) - return 0; + return; pr_info("probing function IDs from device-tree\n"); @@ -206,6 +206,5 @@ static int __init psci_init(void) out_put_node: of_node_put(np); - return 0; + return; } -early_initcall(psci_init); diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c new file mode 100644 index 000000000000..23a11424c568 --- /dev/null +++ b/arch/arm/kernel/psci_smp.c @@ -0,0 +1,84 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon + */ + +#include +#include +#include +#include + +#include +#include + +/* + * psci_smp assumes that the following is true about PSCI: + * + * cpu_suspend Suspend the execution on a CPU + * @state we don't currently describe affinity levels, so just pass 0. + * @entry_point the first instruction to be executed on return + * returns 0 success, < 0 on failure + * + * cpu_off Power down a CPU + * @state we don't currently describe affinity levels, so just pass 0. + * no return on successful call + * + * cpu_on Power up a CPU + * @cpuid cpuid of target CPU, as from MPIDR + * @entry_point the first instruction to be executed on return + * returns 0 success, < 0 on failure + * + * migrate Migrate the context to a different CPU + * @cpuid cpuid of target CPU, as from MPIDR + * returns 0 success, < 0 on failure + * + */ + +extern void secondary_startup(void); + +static int __cpuinit psci_boot_secondary(unsigned int cpu, + struct task_struct *idle) +{ + if (psci_ops.cpu_on) + return psci_ops.cpu_on(cpu_logical_map(cpu), + __pa(secondary_startup)); + return -ENODEV; +} + +#ifdef CONFIG_HOTPLUG_CPU +void __ref psci_cpu_die(unsigned int cpu) +{ + const struct psci_power_state ps = { + .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, + }; + + if (psci_ops.cpu_off) + psci_ops.cpu_off(ps); + + /* We should never return */ + panic("psci: cpu %d failed to shutdown\n", cpu); +} +#else +#define psci_cpu_die NULL +#endif + +bool __init psci_smp_available(void) +{ + /* is cpu_on available at least? */ + return (psci_ops.cpu_on != NULL); +} + +struct smp_operations __initdata psci_smp_ops = { + .smp_boot_secondary = psci_boot_secondary, + .cpu_die = psci_cpu_die, +}; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1522c7ae31b0..8f7a6e9926a8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -796,9 +797,13 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); arm_dt_init_cpu_maps(); + psci_init(); #ifdef CONFIG_SMP if (is_smp()) { - smp_set_ops(mdesc->smp); + if (psci_smp_available()) + smp_set_ops(&psci_smp_ops); + else if (mdesc->smp) + smp_set_ops(mdesc->smp); smp_init_cpus(); } #endif diff --git a/arch/arm/mach-virt/Makefile b/arch/arm/mach-virt/Makefile index 042afc1f8c44..7ddbfa60227f 100644 --- a/arch/arm/mach-virt/Makefile +++ b/arch/arm/mach-virt/Makefile @@ -3,4 +3,3 @@ # obj-y := virt.o -obj-$(CONFIG_SMP) += platsmp.o diff --git a/arch/arm/mach-virt/platsmp.c b/arch/arm/mach-virt/platsmp.c deleted file mode 100644 index f4143f5bfa5b..000000000000 --- a/arch/arm/mach-virt/platsmp.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Dummy Virtual Machine - does what it says on the tin. - * - * Copyright (C) 2012 ARM Ltd - * Author: Will Deacon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include -#include - -extern void secondary_startup(void); - -static void __init virt_smp_init_cpus(void) -{ -} - -static void __init virt_smp_prepare_cpus(unsigned int max_cpus) -{ -} - -static int __cpuinit virt_boot_secondary(unsigned int cpu, - struct task_struct *idle) -{ - if (psci_ops.cpu_on) - return psci_ops.cpu_on(cpu_logical_map(cpu), - __pa(secondary_startup)); - return -ENODEV; -} - -struct smp_operations __initdata virt_smp_ops = { - .smp_init_cpus = virt_smp_init_cpus, - .smp_prepare_cpus = virt_smp_prepare_cpus, - .smp_boot_secondary = virt_boot_secondary, -}; diff --git a/arch/arm/mach-virt/virt.c b/arch/arm/mach-virt/virt.c index 061f283f579e..a67d2dd5bb60 100644 --- a/arch/arm/mach-virt/virt.c +++ b/arch/arm/mach-virt/virt.c @@ -36,11 +36,8 @@ static const char *virt_dt_match[] = { NULL }; -extern struct smp_operations virt_smp_ops; - DT_MACHINE_START(VIRT, "Dummy Virtual Machine") .init_irq = irqchip_init, .init_machine = virt_init, - .smp = smp_ops(virt_smp_ops), .dt_compat = virt_dt_match, MACHINE_END -- cgit v1.2.3 From b382b940f821784107ca22de3455bb90e4512557 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Tue, 21 May 2013 13:40:51 +0000 Subject: ARM: Enable selection of SMP operations at boot time Add a new 'smp_init' hook to machine_desc so platforms can specify a function to be used to setup smp ops instead of having a statically defined value. The hook must return true when smp_ops are initialized. If false the static mdesc->smp_ops will be used by default. Add the definition of "bool" by including the linux/types.h file to asm/mach/arch.h and make it self-contained. Signed-off-by: Jon Medhurst Signed-off-by: Nicolas Pitre Signed-off-by: Stefano Stabellini Signed-off-by: Nicolas Ferre Reviewed-by: Santosh Shilimkar --- arch/arm/include/asm/mach/arch.h | 5 +++++ arch/arm/kernel/setup.c | 10 ++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'arch/arm/kernel/setup.c') diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 308ad7d6f98b..75bf07910b81 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -8,6 +8,8 @@ * published by the Free Software Foundation. */ +#include + #ifndef __ASSEMBLY__ struct tag; @@ -16,8 +18,10 @@ struct pt_regs; struct smp_operations; #ifdef CONFIG_SMP #define smp_ops(ops) (&(ops)) +#define smp_init_ops(ops) (&(ops)) #else #define smp_ops(ops) (struct smp_operations *)NULL +#define smp_init_ops(ops) (bool (*)(void))NULL #endif struct machine_desc { @@ -41,6 +45,7 @@ struct machine_desc { unsigned char reserve_lp2 :1; /* never has lp2 */ char restart_mode; /* default restart mode */ struct smp_operations *smp; /* SMP operations */ + bool (*smp_init)(void); void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 8f7a6e9926a8..2b3ba15408bd 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -800,10 +800,12 @@ void __init setup_arch(char **cmdline_p) psci_init(); #ifdef CONFIG_SMP if (is_smp()) { - if (psci_smp_available()) - smp_set_ops(&psci_smp_ops); - else if (mdesc->smp) - smp_set_ops(mdesc->smp); + if (!mdesc->smp_init || !mdesc->smp_init()) { + if (psci_smp_available()) + smp_set_ops(&psci_smp_ops); + else if (mdesc->smp) + smp_set_ops(mdesc->smp); + } smp_init_cpus(); } #endif -- cgit v1.2.3 From a469abd0f868c902b75532579bf87553dcf1b360 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 8 Apr 2013 17:13:12 +0100 Subject: ARM: elf: add new hwcap for identifying atomic ldrd/strd instructions CPUs implementing LPAE have atomic ldrd/strd instructions, meaning that userspace software can avoid having to use the exclusive variants of these instructions if they wish. This patch advertises the atomicity of these instructions via the hwcaps, so userspace can detect this CPU feature. Reported-by: Vladimir Danushevsky Signed-off-by: Will Deacon --- arch/arm/include/uapi/asm/hwcap.h | 2 +- arch/arm/kernel/setup.c | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/setup.c') diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h index 3688fd15a32d..6d34d080372a 100644 --- a/arch/arm/include/uapi/asm/hwcap.h +++ b/arch/arm/include/uapi/asm/hwcap.h @@ -25,6 +25,6 @@ #define HWCAP_IDIVT (1 << 18) #define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */ #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) - +#define HWCAP_LPAE (1 << 20) #endif /* _UAPI__ASMARM_HWCAP_H */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1522c7ae31b0..bdcd4dd13230 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -355,7 +355,7 @@ void __init early_print(const char *str, ...) static void __init cpuid_init_hwcaps(void) { - unsigned int divide_instrs; + unsigned int divide_instrs, vmsa; if (cpu_architecture() < CPU_ARCH_ARMv7) return; @@ -368,6 +368,11 @@ static void __init cpuid_init_hwcaps(void) case 1: elf_hwcap |= HWCAP_IDIVT; } + + /* LPAE implies atomic ldrd/strd instructions */ + vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0; + if (vmsa >= 5) + elf_hwcap |= HWCAP_LPAE; } static void __init feat_v6_fixup(void) @@ -872,6 +877,7 @@ static const char *hwcap_str[] = { "vfpv4", "idiva", "idivt", + "lpae", NULL }; -- cgit v1.2.3 From 8cf72172d739639f2699131821a3ebc291287cf2 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Thu, 16 May 2013 10:32:09 +0100 Subject: ARM: kernel: build MPIDR hash function data structure On ARM SMP systems, cores are identified by their MPIDR register. The MPIDR guidelines in the ARM ARM do not provide strict enforcement of MPIDR layout, only recommendations that, if followed, split the MPIDR on ARM 32 bit platforms in three affinity levels. In multi-cluster systems like big.LITTLE, if the affinity guidelines are followed, the MPIDR can not be considered an index anymore. This means that the association between logical CPU in the kernel and the HW CPU identifier becomes somewhat more complicated requiring methods like hashing to associate a given MPIDR to a CPU logical index, in order for the look-up to be carried out in an efficient and scalable way. This patch provides a function in the kernel that starting from the cpu_logical_map, implement collision-free hashing of MPIDR values by checking all significative bits of MPIDR affinity level bitfields. The hashing can then be carried out through bits shifting and ORing; the resulting hash algorithm is a collision-free though not minimal hash that can be executed with few assembly instructions. The mpidr is filtered through a mpidr mask that is built by checking all bits that toggle in the set of MPIDRs corresponding to possible CPUs. Bits that do not toggle do not carry information so they do not contribute to the resulting hash. Pseudo code: /* check all bits that toggle, so they are required */ for (i = 1, mpidr_mask = 0; i < num_possible_cpus(); i++) mpidr_mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); /* * Build shifts to be applied to aff0, aff1, aff2 values to hash the mpidr * fls() returns the last bit set in a word, 0 if none * ffs() returns the first bit set in a word, 0 if none */ fs0 = mpidr_mask[7:0] ? ffs(mpidr_mask[7:0]) - 1 : 0; fs1 = mpidr_mask[15:8] ? ffs(mpidr_mask[15:8]) - 1 : 0; fs2 = mpidr_mask[23:16] ? ffs(mpidr_mask[23:16]) - 1 : 0; ls0 = fls(mpidr_mask[7:0]); ls1 = fls(mpidr_mask[15:8]); ls2 = fls(mpidr_mask[23:16]); bits0 = ls0 - fs0; bits1 = ls1 - fs1; bits2 = ls2 - fs2; aff0_shift = fs0; aff1_shift = 8 + fs1 - bits0; aff2_shift = 16 + fs2 - (bits0 + bits1); u32 hash(u32 mpidr) { u32 l0, l1, l2; u32 mpidr_masked = mpidr & mpidr_mask; l0 = mpidr_masked & 0xff; l1 = mpidr_masked & 0xff00; l2 = mpidr_masked & 0xff0000; return (l0 >> aff0_shift | l1 >> aff1_shift | l2 >> aff2_shift); } The hashing algorithm relies on the inherent properties set in the ARM ARM recommendations for the MPIDR. Exotic configurations, where for instance the MPIDR values at a given affinity level have large holes, can end up requiring big hash tables since the compression of values that can be achieved through shifting is somewhat crippled when holes are present. Kernel warns if the number of buckets of the resulting hash table exceeds the number of possible CPUs by a factor of 4, which is a symptom of a very sparse HW MPIDR configuration. The hash algorithm is quite simple and can easily be implemented in assembly code, to be used in code paths where the kernel virtual address space is not set-up (ie cpu_resume) and instruction and data fetches are strongly ordered so code must be compact and must carry out few data accesses. Cc: Will Deacon Cc: Catalin Marinas Cc: Russell King Cc: Colin Cross Cc: Santosh Shilimkar Cc: Daniel Lezcano Cc: Amit Kucheria Signed-off-by: Lorenzo Pieralisi Reviewed-by: Dave Martin Reviewed-by: Nicolas Pitre Tested-by: Shawn Guo Tested-by: Kevin Hilman Tested-by: Stephen Warren --- arch/arm/include/asm/smp_plat.h | 12 ++++++++ arch/arm/kernel/setup.c | 67 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) (limited to 'arch/arm/kernel/setup.c') diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 1c7b6f8101ae..f75f8a234b3f 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -70,4 +70,16 @@ static inline int get_logical_index(u32 mpidr) return -EINVAL; } +struct mpidr_hash { + u32 mask; + u32 shift_aff[3]; + u32 bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ + return 1 << mpidr_hash.bits; +} #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ca34224f891f..9048513cbe0d 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -478,6 +478,72 @@ void __init smp_setup_processor_id(void) printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); } +struct mpidr_hash mpidr_hash; +#ifdef CONFIG_SMP +/** + * smp_build_mpidr_hash - Pre-compute shifts required at each affinity + * level in order to build a linear index from an + * MPIDR value. Resulting algorithm is a collision + * free hash carried out through shifting and ORing + */ +static void __init smp_build_mpidr_hash(void) +{ + u32 i, affinity; + u32 fs[3], bits[3], ls, mask = 0; + /* + * Pre-scan the list of MPIDRS and filter out bits that do + * not contribute to affinity levels, ie they never toggle. + */ + for_each_possible_cpu(i) + mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); + pr_debug("mask of set bits 0x%x\n", mask); + /* + * Find and stash the last and first bit set at all affinity levels to + * check how many bits are required to represent them. + */ + for (i = 0; i < 3; i++) { + affinity = MPIDR_AFFINITY_LEVEL(mask, i); + /* + * Find the MSB bit and LSB bits position + * to determine how many bits are required + * to express the affinity level. + */ + ls = fls(affinity); + fs[i] = affinity ? ffs(affinity) - 1 : 0; + bits[i] = ls - fs[i]; + } + /* + * An index can be created from the MPIDR by isolating the + * significant bits at each affinity level and by shifting + * them in order to compress the 24 bits values space to a + * compressed set of values. This is equivalent to hashing + * the MPIDR through shifting and ORing. It is a collision free + * hash though not minimal since some levels might contain a number + * of CPUs that is not an exact power of 2 and their bit + * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. + */ + mpidr_hash.shift_aff[0] = fs[0]; + mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; + mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - + (bits[1] + bits[0]); + mpidr_hash.mask = mask; + mpidr_hash.bits = bits[2] + bits[1] + bits[0]; + pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", + mpidr_hash.shift_aff[0], + mpidr_hash.shift_aff[1], + mpidr_hash.shift_aff[2], + mpidr_hash.mask, + mpidr_hash.bits); + /* + * 4x is an arbitrary value used to warn on a hash table much bigger + * than expected on most systems. + */ + if (mpidr_hash_size() > 4 * num_possible_cpus()) + pr_warn("Large number of MPIDR hash buckets detected\n"); + sync_cache_w(&mpidr_hash); +} +#endif + static void __init setup_processor(void) { struct proc_info_list *list; @@ -825,6 +891,7 @@ void __init setup_arch(char **cmdline_p) smp_set_ops(mdesc->smp); } smp_init_cpus(); + smp_build_mpidr_hash(); } #endif -- cgit v1.2.3