summaryrefslogtreecommitdiff
path: root/arch/mips/mti-malta
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mti-malta')
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c12
-rw-r--r--arch/mips/mti-malta/malta-time.c2
3 files changed, 8 insertions, 8 deletions
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index b79b24afe3a2..9027061f0ead 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -472,7 +472,7 @@ static void __init fill_ipi_map(void)
void __init arch_init_ipiirq(int irq, struct irqaction *action)
{
setup_irq(irq, action);
- set_irq_handler(irq, handle_percpu_irq);
+ irq_set_handler(irq, handle_percpu_irq);
}
void __init arch_init_irq(void)
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 192cfd2a539c..49a38b09a488 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -34,7 +34,6 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
*/
static void __cpuinit msmtc_init_secondary(void)
{
- void smtc_init_secondary(void);
int myvpe;
/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
@@ -114,7 +113,8 @@ struct plat_smp_ops msmtc_smp_ops = {
*/
-int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
+int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
+ bool force)
{
cpumask_t tmask;
int cpu = 0;
@@ -130,7 +130,7 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
* cleared in the affinity mask, there will never be any
* interrupt forwarding. But as soon as a program or operator
* sets affinity for one of the related IRQs, we need to make
- * sure that we don't ever try to forward across the VPE boundry,
+ * sure that we don't ever try to forward across the VPE boundary,
* at least not until we engineer a system where the interrupt
* _ack() or _end() function can somehow know that it corresponds
* to an interrupt taken on another VPE, and perform the appropriate
@@ -144,7 +144,7 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
cpu_clear(cpu, tmask);
}
- cpumask_copy(irq_desc[irq].affinity, &tmask);
+ cpumask_copy(d->affinity, &tmask);
if (cpus_empty(tmask))
/*
@@ -155,8 +155,8 @@ int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
"IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
/* Do any generic SMTC IRQ affinity setup */
- smtc_set_irq_affinity(irq, tmask);
+ smtc_set_irq_affinity(d->irq, tmask);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 3c6f190aa61c..1620b83cd13e 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -119,7 +119,7 @@ static void __init plat_perf_setup(void)
set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
#ifdef CONFIG_SMP
- set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq);
+ irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq);
#endif
}
}