summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/intel_rdt.c
diff options
context:
space:
mode:
authorVikas Shivappa <vikas.shivappa@linux.intel.com>2017-07-26 00:14:44 +0300
committerThomas Gleixner <tglx@linutronix.de>2017-08-01 23:41:28 +0300
commit895c663ecef16c8138e20a7d5c052e0fcc400241 (patch)
tree73db0eee0642e938a2eb90e7456542d8354bedf9 /arch/x86/kernel/cpu/intel_rdt.c
parent748b6b881ccdda8f0663c68605f431279e06f49a (diff)
downloadlinux-895c663ecef16c8138e20a7d5c052e0fcc400241.tar.xz
x86/intel_rdt/cqm: Add CPU hotplug support
Resource groups have a per domain directory under "mon_data". Add or remove these directories as and when domains come online and go offline. Also update the per cpu rmids and cache upon onlining and offlining cpus. Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: ravi.v.shankar@intel.com Cc: tony.luck@intel.com Cc: fenghua.yu@intel.com Cc: peterz@infradead.org Cc: eranian@google.com Cc: vikas.shivappa@intel.com Cc: ak@linux.intel.com Cc: davidcc@google.com Cc: reinette.chatre@intel.com Link: http://lkml.kernel.org/r/1501017287-28083-26-git-send-email-vikas.shivappa@linux.intel.com
Diffstat (limited to 'arch/x86/kernel/cpu/intel_rdt.c')
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 050ea15259ea..f12bb91b8c66 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -479,6 +479,13 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r)
cpumask_set_cpu(cpu, &d->cpu_mask);
list_add_tail(&d->list, add_pos);
+
+ /*
+ * If resctrl is mounted, add
+ * per domain monitor data directories.
+ */
+ if (static_branch_unlikely(&rdt_mon_enable_key))
+ mkdir_mondata_subdir_allrdtgrp(r, d);
}
static void domain_remove_cpu(int cpu, struct rdt_resource *r)
@@ -494,6 +501,12 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
cpumask_clear_cpu(cpu, &d->cpu_mask);
if (cpumask_empty(&d->cpu_mask)) {
+ /*
+ * If resctrl is mounted, remove all the
+ * per domain monitor data directories.
+ */
+ if (static_branch_unlikely(&rdt_mon_enable_key))
+ rmdir_mondata_subdir_allrdtgrp(r, d->id);
kfree(d->ctrl_val);
kfree(d->rmid_busy_llc);
list_del(&d->list);
@@ -501,13 +514,14 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
}
}
-static void clear_closid(int cpu)
+static void clear_closid_rmid(int cpu)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
per_cpu(rdt_cpu_default.closid, cpu) = 0;
state->closid = 0;
- wrmsr(IA32_PQR_ASSOC, state->rmid, 0);
+ state->rmid = 0;
+ wrmsr(IA32_PQR_ASSOC, 0, 0);
}
static int intel_rdt_online_cpu(unsigned int cpu)
@@ -515,29 +529,42 @@ static int intel_rdt_online_cpu(unsigned int cpu)
struct rdt_resource *r;
mutex_lock(&rdtgroup_mutex);
- for_each_alloc_capable_rdt_resource(r)
+ for_each_capable_rdt_resource(r)
domain_add_cpu(cpu, r);
/* The cpu is set in default rdtgroup after online. */
cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
- clear_closid(cpu);
+ clear_closid_rmid(cpu);
mutex_unlock(&rdtgroup_mutex);
return 0;
}
+static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
+{
+ struct rdtgroup *cr;
+
+ list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
+ if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
+ break;
+ }
+ }
+}
+
static int intel_rdt_offline_cpu(unsigned int cpu)
{
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
mutex_lock(&rdtgroup_mutex);
- for_each_alloc_capable_rdt_resource(r)
+ for_each_capable_rdt_resource(r)
domain_remove_cpu(cpu, r);
list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
- if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
+ if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
+ clear_childcpus(rdtgrp, cpu);
break;
+ }
}
- clear_closid(cpu);
+ clear_closid_rmid(cpu);
mutex_unlock(&rdtgroup_mutex);
return 0;