diff options
author | Marc Zyngier <maz@kernel.org> | 2020-05-15 19:57:51 +0300 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2020-05-18 12:30:42 +0300 |
commit | 2f13ff1d1d5c0257c97ea76b86a2d9c99c44a4b9 (patch) | |
tree | 0a265cac9c0366e6e0137e1caeed0b373d4f5362 /drivers/irqchip | |
parent | 337cbeb2c13eb4cab84f576fd402d7ae4ed31ae1 (diff) | |
download | linux-2f13ff1d1d5c0257c97ea76b86a2d9c99c44a4b9.tar.xz |
irqchip/gic-v3-its: Track LPI distribution on a per CPU basis
In order to improve the distribution of LPIs among CPUs, let start by
tracking the number of LPIs assigned to CPUs, both for managed and
non-managed interrupts (as separate counters).
Signed-off-by: Marc Zyngier <maz@kernel.org>
Tested-by: John Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/20200515165752.121296-2-maz@kernel.org
Diffstat (limited to 'drivers/irqchip')
-rw-r--r-- | drivers/irqchip/irq-gic-v3-its.c | 49 |
1 files changed, 46 insertions, 3 deletions
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 124251b0ccba..4eb8441d0c2b 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -174,6 +174,13 @@ static struct { int next_victim; } vpe_proxy; +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); + static LIST_HEAD(its_nodes); static DEFINE_RAW_SPINLOCK(its_lock); static struct rdists *gic_rdists; @@ -1510,6 +1517,30 @@ static void its_unmask_irq(struct irq_data *d) lpi_update_config(d, 0, LPI_PROP_ENABLED); } +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); +} + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { @@ -1518,34 +1549,44 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + int prev_cpu; /* A forwarded interrupt should use irq_set_vcpu_affinity */ if (irqd_is_forwarded_to_vcpu(d)) return -EINVAL; + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + /* lpi cannot be routed to a redistributor that is on a foreign node */ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { if (its_dev->its->numa_node >= 0) { cpu_mask = cpumask_of_node(its_dev->its->numa_node); if (!cpumask_intersects(mask_val, cpu_mask)) - return -EINVAL; + goto err; } } cpu = cpumask_any_and(mask_val, cpu_mask); if (cpu >= nr_cpu_ids) - return -EINVAL; + goto err; /* don't set the affinity when the target cpu is same as current one */ - if (cpu != its_dev->event_map.col_map[id]) { + if (cpu != prev_cpu) { target_col = &its_dev->its->collections[cpu]; its_send_movi(its_dev, target_col, id); its_dev->event_map.col_map[id] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); } + its_inc_lpi_count(d, cpu); + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; } static u64 its_irq_get_msi_base(struct its_device *its_dev) @@ -3448,6 +3489,7 @@ static int its_irq_domain_activate(struct irq_domain *domain, cpu = cpumask_first(cpu_online_mask); } + its_inc_lpi_count(d, cpu); its_dev->event_map.col_map[event] = cpu; irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -3462,6 +3504,7 @@ static void its_irq_domain_deactivate(struct irq_domain *domain, struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); /* Stop the delivery of interrupts */ its_send_discard(its_dev, event); } |