summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/apic
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2012-06-26 00:38:28 +0400
committerIngo Molnar <mingo@kernel.org>2012-07-06 13:00:22 +0400
commit1ac322d0b169c95ce34d55b3ed6d40ce1a5f3a02 (patch)
tree21a911ca299c18c3c29569af1abf768230c7990a /arch/x86/kernel/apic
parentb39f25a849d7677a7dbf183f2483fd41c201a5ce (diff)
downloadlinux-1ac322d0b169c95ce34d55b3ed6d40ce1a5f3a02.tar.xz
x86/apic/x2apic: Limit the vector reservation to the user specified mask
For the x2apic cluster mode, vector for an interrupt is currently reserved on all the cpu's that are part of the x2apic cluster. But the interrupts will be routed only to the cluster (derived from the first cpu in the mask) members specified in the mask. So there is no need to reserve the vector in the unused cluster members. Modify __assign_irq_vector() to reserve the vectors based on the user specified irq destination mask. If the new mask is a proper subset of the currently used mask, cleanup the vector allocation on the unused cpu members. Also, allow the apic driver to tune the vector domain based on the affinity mask (which in most cases is the user-specified mask). Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Yinghai Lu <yinghai@kernel.org> Acked-by: Alexander Gordeev <agordeev@redhat.com> Acked-by: Cyrill Gorcunov <gorcunov@openvz.org> Link: http://lkml.kernel.org/r/1340656709-11423-3-git-send-email-suresh.b.siddha@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/apic')
-rw-r--r--arch/x86/kernel/apic/apic_noop.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c31
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c6
3 files changed, 20 insertions, 20 deletions
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 08c337bc49ff..e145f28b4099 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,7 +100,8 @@ static unsigned long noop_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map);
}
-static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
+static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
{
if (cpu != 0)
pr_warning("APIC: Vector allocated for non-BSP cpu\n");
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8a08f09aa505..9684f963befe 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1113,7 +1113,6 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
*/
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16;
- unsigned int old_vector;
int cpu, err;
cpumask_var_t tmp_mask;
@@ -1123,28 +1122,28 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
return -ENOMEM;
- old_vector = cfg->vector;
- if (old_vector) {
- cpumask_and(tmp_mask, mask, cpu_online_mask);
- if (cpumask_subset(tmp_mask, cfg->domain)) {
- free_cpumask_var(tmp_mask);
- return 0;
- }
- }
-
/* Only try and allocate irqs on cpus that are present */
err = -ENOSPC;
cpumask_clear(cfg->old_domain);
cpu = cpumask_first_and(mask, cpu_online_mask);
while (cpu < nr_cpu_ids) {
- int new_cpu;
- int vector, offset;
+ int new_cpu, vector, offset;
- apic->vector_allocation_domain(cpu, tmp_mask);
+ apic->vector_allocation_domain(cpu, tmp_mask, mask);
if (cpumask_subset(tmp_mask, cfg->domain)) {
- free_cpumask_var(tmp_mask);
- return 0;
+ err = 0;
+ if (cpumask_equal(tmp_mask, cfg->domain))
+ break;
+ /*
+ * New cpumask using the vector is a proper subset of
+ * the current in use mask. So cleanup the vector
+ * allocation for the members that are not used anymore.
+ */
+ cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
+ cfg->move_in_progress = 1;
+ cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+ break;
}
vector = current_vector;
@@ -1172,7 +1171,7 @@ next:
/* Found one! */
current_vector = vector;
current_offset = offset;
- if (old_vector) {
+ if (cfg->vector) {
cfg->move_in_progress = 1;
cpumask_copy(cfg->old_domain, cfg->domain);
}
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index b5d889b5659a..bde78d0098a4 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -212,10 +212,10 @@ static int x2apic_cluster_probe(void)
/*
* Each x2apic cluster is an allocation domain.
*/
-static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask)
+static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
{
- cpumask_clear(retmask);
- cpumask_copy(retmask, per_cpu(cpus_in_cluster, cpu));
+ cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
}
static struct apic apic_x2apic_cluster = {