diff options
author | Dou Liyang <douliyangs@gmail.com> | 2018-12-04 18:51:20 +0300 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-12-19 13:32:08 +0300 |
commit | bec04037e4e484f41ee4d9409e40616874169d20 (patch) | |
tree | 5fde4bc9e597250dbdac3a4c0741b13766da8d4e /kernel/irq/affinity.c | |
parent | c2899c3470de04823870b28646981695c0046efe (diff) | |
download | linux-bec04037e4e484f41ee4d9409e40616874169d20.tar.xz |
genirq/core: Introduce struct irq_affinity_desc
The interrupt affinity management uses straight cpumask pointers to convey
the automatically assigned affinity masks for managed interrupts. The core
interrupt descriptor allocation also decides based on the pointer being non
NULL whether an interrupt is managed or not.
Devices which use managed interrupts usually have two classes of
interrupts:
- Interrupts for multiple device queues
- Interrupts for general device management
Currently both classes are treated the same way, i.e. as managed
interrupts. The general interrupts get the default affinity mask assigned
while the device queue interrupts are spread out over the possible CPUs.
Treating the general interrupts as managed is both a limitation and under
certain circumstances a bug. Assume the following situation:
default_irq_affinity = 4..7
So if CPUs 4-7 are offlined, then the core code will shut down the device
management interrupts because the last CPU in their affinity mask went
offline.
It's also a limitation because it's desired to allow manual placement of
the general device interrupts for various reasons. If they are marked
managed then the interrupt affinity setting from both user and kernel space
is disabled.
To remedy that situation it's required to convey more information than the
cpumasks through various interfaces related to interrupt descriptor
allocation.
Instead of adding yet another argument, create a new data structure
'irq_affinity_desc' which for now just contains the cpumask. This struct
can be expanded to convey auxilliary information in the next step.
No functional change, just preparatory work.
[ tglx: Simplified logic and clarified changelog ]
Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Suggested-by: Bjorn Helgaas <bhelgaas@google.com>
Signed-off-by: Dou Liyang <douliyangs@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-pci@vger.kernel.org
Cc: kashyap.desai@broadcom.com
Cc: shivasharan.srikanteshwara@broadcom.com
Cc: sumit.saxena@broadcom.com
Cc: ming.lei@redhat.com
Cc: hch@lst.de
Cc: douliyang1@huawei.com
Link: https://lkml.kernel.org/r/20181204155122.6327-2-douliyangs@gmail.com
Diffstat (limited to 'kernel/irq/affinity.c')
-rw-r--r-- | kernel/irq/affinity.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index e423bff1928c..c0fe591b0dc9 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -99,7 +99,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, struct cpumask *nmsk, - struct cpumask *masks) + struct irq_affinity_desc *masks) { int n, nodes, cpus_per_vec, extra_vecs, done = 0; int last_affv = firstvec + numvecs; @@ -117,7 +117,9 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, */ if (numvecs <= nodes) { for_each_node_mask(n, nodemsk) { - cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]); + cpumask_or(&masks[curvec].mask, + &masks[curvec].mask, + node_to_cpumask[n]); if (++curvec == last_affv) curvec = firstvec; } @@ -150,7 +152,8 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, cpus_per_vec++; --extra_vecs; } - irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); + irq_spread_init_one(&masks[curvec].mask, nmsk, + cpus_per_vec); } done += v; @@ -173,7 +176,7 @@ out: static int irq_build_affinity_masks(const struct irq_affinity *affd, int startvec, int numvecs, int firstvec, cpumask_var_t *node_to_cpumask, - struct cpumask *masks) + struct irq_affinity_desc *masks) { int curvec = startvec, nr_present, nr_others; int ret = -ENOMEM; @@ -226,15 +229,15 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, * @nvecs: The total number of vectors * @affd: Description of the affinity requirements * - * Returns the masks pointer or NULL if allocation failed. + * Returns the irq_affinity_desc pointer or NULL if allocation failed. */ -struct cpumask * +struct irq_affinity_desc * irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) { int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; int curvec, usedvecs; cpumask_var_t *node_to_cpumask; - struct cpumask *masks = NULL; + struct irq_affinity_desc *masks = NULL; int i, nr_sets; /* @@ -254,8 +257,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /* Fill out vectors at the beginning that don't need affinity */ for (curvec = 0; curvec < affd->pre_vectors; curvec++) - cpumask_copy(masks + curvec, irq_default_affinity); - + cpumask_copy(&masks[curvec].mask, irq_default_affinity); /* * Spread on present CPUs starting from affd->pre_vectors. If we * have multiple sets, build each sets affinity mask separately. @@ -285,7 +287,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) else curvec = affd->pre_vectors + usedvecs; for (; curvec < nvecs; curvec++) - cpumask_copy(masks + curvec, irq_default_affinity); + cpumask_copy(&masks[curvec].mask, irq_default_affinity); outnodemsk: free_node_to_cpumask(node_to_cpumask); |