diff options
author | Christian Krafft <krafft@de.ibm.com> | 2007-04-23 23:35:39 +0400 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-04-23 23:44:38 +0400 |
commit | 91a69c9646a5b709381d99a171890e77377b1b9c (patch) | |
tree | 3827389a195eecd0e010e53d926318328edab25f /arch/powerpc/platforms/cell/cbe_regs.c | |
parent | 390cbb56a731546edc0f35fbc4c5045676467581 (diff) | |
download | linux-91a69c9646a5b709381d99a171890e77377b1b9c.tar.xz |
[POWERPC] cell: add cbe_node_to_cpu function
This patch adds code to deal with conversion of
logical cpu to cbe nodes. It removes code that
assummed there were two logical CPUs per CBE.
Signed-off-by: Christian Krafft <krafft@de.ibm.com>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/cbe_regs.c')
-rw-r--r-- | arch/powerpc/platforms/cell/cbe_regs.c | 53 |
1 files changed, 39 insertions, 14 deletions
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index 7c94af4ac439..dca39c0af7ea 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -38,8 +38,13 @@ static struct cbe_thread_map { struct device_node *cpu_node; struct cbe_regs_map *regs; + unsigned int thread_id; + unsigned int cbe_id; } cbe_thread_map[NR_CPUS]; +static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; +static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; + static struct cbe_regs_map *cbe_find_map(struct device_node *np) { int i; @@ -130,31 +135,40 @@ struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) } EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); -/* FIXME - * This is little more than a stub at the moment. It should be - * fleshed out so that it works for both SMT and non-SMT, no - * matter if the passed cpu is odd or even. - * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1. - * For SMT disabled, returns 0 for all cpus. - */ u32 cbe_get_hw_thread_id(int cpu) { - return (cpu & 1); + return cbe_thread_map[cpu].thread_id; } EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); +u32 cbe_cpu_to_node(int cpu) +{ + return cbe_thread_map[cpu].cbe_id; +} +EXPORT_SYMBOL_GPL(cbe_cpu_to_node); + +u32 cbe_node_to_cpu(int node) +{ + return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); +} +EXPORT_SYMBOL_GPL(cbe_node_to_cpu); + void __init cbe_regs_init(void) { int i; + unsigned int thread_id; struct device_node *cpu; /* Build local fast map of CPUs */ - for_each_possible_cpu(i) - cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL); + for_each_possible_cpu(i) { + cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); + cbe_thread_map[i].thread_id = thread_id; + } /* Find maps for each device tree CPU */ for_each_node_by_type(cpu, "cpu") { - struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++]; + struct cbe_regs_map *map; + unsigned int cbe_id; /* That hack must die die die ! */ const struct address_prop { @@ -162,6 +176,8 @@ void __init cbe_regs_init(void) unsigned int len; } __attribute__((packed)) *prop; + cbe_id = cbe_regs_map_count++; + map = &cbe_regs_maps[cbe_id]; if (cbe_regs_map_count > MAX_CBE) { printk(KERN_ERR "cbe_regs: More BE chips than supported" @@ -170,9 +186,18 @@ void __init cbe_regs_init(void) return; } map->cpu_node = cpu; - for_each_possible_cpu(i) - if (cbe_thread_map[i].cpu_node == cpu) - cbe_thread_map[i].regs = map; + + for_each_possible_cpu(i) { + struct cbe_thread_map *thread = &cbe_thread_map[i]; + + if (thread->cpu_node == cpu) { + thread->regs = map; + thread->cbe_id = cbe_id; + cpu_set(i, cbe_local_mask[cbe_id]); + if(thread->thread_id == 0) + cpu_set(i, cbe_first_online_cpu); + } + } prop = of_get_property(cpu, "pervasive", NULL); if (prop != NULL) |