diff options
Diffstat (limited to 'arch/powerpc/platforms/pseries/xics.c')
-rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 97 |
1 files changed, 52 insertions, 45 deletions
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index d80f193cd871..f19d19468393 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -120,14 +120,14 @@ static inline void direct_qirr_info(int n_cpu, u8 value) /* LPAR low level accessors */ -static inline unsigned int lpar_xirr_info_get(void) +static inline unsigned int lpar_xirr_info_get(unsigned char cppr) { unsigned long lpar_rc; unsigned long return_value; - lpar_rc = plpar_xirr(&return_value); + lpar_rc = plpar_xirr(&return_value, cppr); if (lpar_rc != H_SUCCESS) - panic(" bad return code xirr - rc = %lx \n", lpar_rc); + panic(" bad return code xirr - rc = %lx\n", lpar_rc); return (unsigned int)return_value; } @@ -163,39 +163,43 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) /* Interface to generic irq subsystem */ #ifdef CONFIG_SMP -static int get_irq_server(unsigned int virq, cpumask_t cpumask, +/* + * For the moment we only implement delivery to all cpus or one cpu. + * + * If the requested affinity is cpu_all_mask, we set global affinity. + * If not we set it to the first cpu in the mask, even if multiple cpus + * are set. This is so things like irqbalance (which set core and package + * wide affinities) do the right thing. + */ +static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check) { - int server; - /* For the moment only implement delivery to all cpus or one cpu */ - cpumask_t tmp = CPU_MASK_NONE; if (!distribute_irqs) return default_server; - if (!cpus_equal(cpumask, CPU_MASK_ALL)) { - cpus_and(tmp, cpu_online_map, cpumask); - - server = first_cpu(tmp); + if (!cpumask_equal(cpumask, cpu_all_mask)) { + int server = cpumask_first_and(cpu_online_mask, cpumask); - if (server < NR_CPUS) + if (server < nr_cpu_ids) return get_hard_smp_processor_id(server); if (strict_check) return -1; } - if (cpus_equal(cpu_online_map, cpu_present_map)) + /* + * Workaround issue with some versions of JS20 firmware that + * deliver interrupts to cpus which haven't been started. This + * happens when using the maxcpus= boot option. + */ + if (cpumask_equal(cpu_online_mask, cpu_present_mask)) return default_distrib_server; return default_server; } #else -static int get_irq_server(unsigned int virq, cpumask_t cpumask, - unsigned int strict_check) -{ - return default_server; -} +#define get_irq_server(virq, cpumask, strict_check) (default_server) #endif static void xics_unmask_irq(unsigned int virq) @@ -211,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq) if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) return; - server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0); + server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, DEFAULT_PRIORITY); @@ -335,7 +339,8 @@ static unsigned int xics_get_irq_direct(void) static unsigned int xics_get_irq_lpar(void) { - unsigned int xirr = lpar_xirr_info_get(); + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); + unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); unsigned int vec = xics_xirr_vector(xirr); unsigned int irq; @@ -401,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) return -1; } - /* - * For the moment only implement delivery to all cpus or one cpu. - * Get current irq_server for the given irq - */ - irq_server = get_irq_server(virq, *cpumask, 1); + irq_server = get_irq_server(virq, cpumask, 1); if (irq_server == -1) { char cpulist[128]; cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); @@ -428,7 +429,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) } static struct irq_chip xics_pic_direct = { - .name = " XICS ", + .name = "XICS", .startup = xics_startup, .mask = xics_mask_irq, .unmask = xics_unmask_irq, @@ -437,7 +438,7 @@ static struct irq_chip xics_pic_direct = { }; static struct irq_chip xics_pic_lpar = { - .name = " XICS ", + .name = "XICS", .startup = xics_startup, .mask = xics_mask_irq, .unmask = xics_unmask_irq, @@ -514,15 +515,13 @@ static void __init xics_init_host(void) /* * XICS only has a single IPI, so encode the messages per CPU */ -struct xics_ipi_struct { - unsigned long value; - } ____cacheline_aligned; - -static struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; +static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); static inline void smp_xics_do_message(int cpu, int msg) { - set_bit(msg, &xics_ipi_message[cpu].value); + unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); + + set_bit(msg, tgt); mb(); if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_qirr_info(cpu, IPI_PRIORITY); @@ -548,25 +547,23 @@ void smp_xics_message_pass(int target, int msg) static irqreturn_t xics_ipi_dispatch(int cpu) { + unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); + WARN_ON(cpu_is_offline(cpu)); mb(); /* order mmio clearing qirr */ - while (xics_ipi_message[cpu].value) { - if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, - &xics_ipi_message[cpu].value)) { + while (*tgt) { + if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { smp_message_recv(PPC_MSG_CALL_FUNCTION); } - if (test_and_clear_bit(PPC_MSG_RESCHEDULE, - &xics_ipi_message[cpu].value)) { + if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { smp_message_recv(PPC_MSG_RESCHEDULE); } - if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, - &xics_ipi_message[cpu].value)) { + if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); } #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) - if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, - &xics_ipi_message[cpu].value)) { + if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { smp_message_recv(PPC_MSG_DEBUGGER_BREAK); } #endif @@ -618,7 +615,7 @@ int __init smp_xics_probe(void) { xics_request_ipi(); - return cpus_weight(cpu_possible_map); + return cpumask_weight(cpu_possible_mask); } #endif /* CONFIG_SMP */ @@ -788,9 +785,13 @@ static void xics_set_cpu_priority(unsigned char cppr) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); - BUG_ON(os_cppr->index != 0); + /* + * we only really want to set the priority when there's + * just one cppr value on the stack + */ + WARN_ON(os_cppr->index != 0); - os_cppr->stack[os_cppr->index] = cppr; + os_cppr->stack[0] = cppr; if (firmware_has_feature(FW_FEATURE_LPAR)) lpar_cppr_info(cppr); @@ -825,8 +826,14 @@ void xics_setup_cpu(void) void xics_teardown_cpu(void) { + struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); int cpu = smp_processor_id(); + /* + * we have to reset the cppr index to 0 because we're + * not going to return from the IPI + */ + os_cppr->index = 0; xics_set_cpu_priority(0); /* Clear any pending IPI request */ |