diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/patch.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/time.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/topology.c | 221 |
6 files changed, 19 insertions, 228 deletions
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index be3b3fbd382f..af2a7f1e3103 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_init(void) * driven low on this core and there isn't an architected way to * determine that. */ - get_online_cpus(); + cpus_read_lock(); register_undef_hook(&debug_reg_hook); /* @@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_init(void) * assume that a halting debugger will leave the world in a nice state * for us. */ - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online", - dbg_reset_online, NULL); + ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN, + "arm/hw_breakpoint:online", + dbg_reset_online, NULL); unregister_undef_hook(&debug_reg_hook); if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) { core_num_brps = 0; core_num_wrps = 0; if (ret > 0) - cpuhp_remove_state_nocalls(ret); - put_online_cpus(); + cpuhp_remove_state_nocalls_cpuslocked(ret); + cpus_read_unlock(); return 0; } @@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_init(void) TRAP_HWBKPT, "watchpoint debug exception"); hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, "breakpoint debug exception"); - put_online_cpus(); + cpus_read_unlock(); /* Register PM notifiers. */ pm_init(); diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 020560b2dcb7..a1a34722c655 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, unsigned int insn) .insn = insn, }; - stop_machine(patch_text_stop_machine, &patch, NULL); + stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 572a8df1b766..c9a0a5299827 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock); */ static void ipi_cpu_stop(unsigned int cpu) { - if (system_state == SYSTEM_BOOTING || - system_state == SYSTEM_RUNNING) { + if (system_state <= SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); pr_crit("CPU%u: stopping\n", cpu); dump_stack(); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 895ae5197159..b30eafeef096 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -403,7 +403,7 @@ out: WARN(err, "twd_local_timer_of_register failed (%d)\n", err); return err; } -CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); -CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); -CLOCKSOURCE_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register); +TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); +TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); +TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register); #endif diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 97b22fa7cb3a..629f8e9981f1 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -120,6 +120,6 @@ void __init time_init(void) #ifdef CONFIG_COMMON_CLK of_clk_init(NULL); #endif - clocksource_probe(); + timer_probe(); } } diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index f8a3ab82e77f..bf949a763dbe 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -11,6 +11,7 @@ * for more details. */ +#include <linux/arch_topology.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> @@ -44,77 +45,6 @@ * to run the rebalance_domains for all idle cores and the cpu_capacity can be * updated during this sequence. */ -static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; -static DEFINE_MUTEX(cpu_scale_mutex); - -unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) -{ - return per_cpu(cpu_scale, cpu); -} - -static void set_capacity_scale(unsigned int cpu, unsigned long capacity) -{ - per_cpu(cpu_scale, cpu) = capacity; -} - -#ifdef CONFIG_PROC_SYSCTL -static ssize_t cpu_capacity_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - - return sprintf(buf, "%lu\n", - arch_scale_cpu_capacity(NULL, cpu->dev.id)); -} - -static ssize_t cpu_capacity_store(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct cpu *cpu = container_of(dev, struct cpu, dev); - int this_cpu = cpu->dev.id, i; - unsigned long new_capacity; - ssize_t ret; - - if (count) { - ret = kstrtoul(buf, 0, &new_capacity); - if (ret) - return ret; - if (new_capacity > SCHED_CAPACITY_SCALE) - return -EINVAL; - - mutex_lock(&cpu_scale_mutex); - for_each_cpu(i, &cpu_topology[this_cpu].core_sibling) - set_capacity_scale(i, new_capacity); - mutex_unlock(&cpu_scale_mutex); - } - - return count; -} - -static DEVICE_ATTR_RW(cpu_capacity); - -static int register_cpu_capacity_sysctl(void) -{ - int i; - struct device *cpu; - - for_each_possible_cpu(i) { - cpu = get_cpu_device(i); - if (!cpu) { - pr_err("%s: too early to get CPU%d device!\n", - __func__, i); - continue; - } - device_create_file(cpu, &dev_attr_cpu_capacity); - } - - return 0; -} -subsys_initcall(register_cpu_capacity_sysctl); -#endif #ifdef CONFIG_OF struct cpu_efficiency { @@ -143,145 +73,6 @@ static unsigned long *__cpu_capacity; static unsigned long middle_capacity = 1; static bool cap_from_dt = true; -static u32 *raw_capacity; -static bool cap_parsing_failed; -static u32 capacity_scale; - -static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu) -{ - int ret = 1; - u32 cpu_capacity; - - if (cap_parsing_failed) - return !ret; - - ret = of_property_read_u32(cpu_node, - "capacity-dmips-mhz", - &cpu_capacity); - if (!ret) { - if (!raw_capacity) { - raw_capacity = kcalloc(num_possible_cpus(), - sizeof(*raw_capacity), - GFP_KERNEL); - if (!raw_capacity) { - pr_err("cpu_capacity: failed to allocate memory for raw capacities\n"); - cap_parsing_failed = true; - return !ret; - } - } - capacity_scale = max(cpu_capacity, capacity_scale); - raw_capacity[cpu] = cpu_capacity; - pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n", - cpu_node->full_name, raw_capacity[cpu]); - } else { - if (raw_capacity) { - pr_err("cpu_capacity: missing %s raw capacity\n", - cpu_node->full_name); - pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); - } - cap_parsing_failed = true; - kfree(raw_capacity); - } - - return !ret; -} - -static void normalize_cpu_capacity(void) -{ - u64 capacity; - int cpu; - - if (!raw_capacity || cap_parsing_failed) - return; - - pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale); - mutex_lock(&cpu_scale_mutex); - for_each_possible_cpu(cpu) { - capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT) - / capacity_scale; - set_capacity_scale(cpu, capacity); - pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", - cpu, arch_scale_cpu_capacity(NULL, cpu)); - } - mutex_unlock(&cpu_scale_mutex); -} - -#ifdef CONFIG_CPU_FREQ -static cpumask_var_t cpus_to_visit; -static bool cap_parsing_done; -static void parsing_done_workfn(struct work_struct *work); -static DECLARE_WORK(parsing_done_work, parsing_done_workfn); - -static int -init_cpu_capacity_callback(struct notifier_block *nb, - unsigned long val, - void *data) -{ - struct cpufreq_policy *policy = data; - int cpu; - - if (cap_parsing_failed || cap_parsing_done) - return 0; - - switch (val) { - case CPUFREQ_NOTIFY: - pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", - cpumask_pr_args(policy->related_cpus), - cpumask_pr_args(cpus_to_visit)); - cpumask_andnot(cpus_to_visit, - cpus_to_visit, - policy->related_cpus); - for_each_cpu(cpu, policy->related_cpus) { - raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) * - policy->cpuinfo.max_freq / 1000UL; - capacity_scale = max(raw_capacity[cpu], capacity_scale); - } - if (cpumask_empty(cpus_to_visit)) { - normalize_cpu_capacity(); - kfree(raw_capacity); - pr_debug("cpu_capacity: parsing done\n"); - cap_parsing_done = true; - schedule_work(&parsing_done_work); - } - } - return 0; -} - -static struct notifier_block init_cpu_capacity_notifier = { - .notifier_call = init_cpu_capacity_callback, -}; - -static int __init register_cpufreq_notifier(void) -{ - if (cap_parsing_failed) - return -EINVAL; - - if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) { - pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n"); - return -ENOMEM; - } - cpumask_copy(cpus_to_visit, cpu_possible_mask); - - return cpufreq_register_notifier(&init_cpu_capacity_notifier, - CPUFREQ_POLICY_NOTIFIER); -} -core_initcall(register_cpufreq_notifier); - -static void parsing_done_workfn(struct work_struct *work) -{ - cpufreq_unregister_notifier(&init_cpu_capacity_notifier, - CPUFREQ_POLICY_NOTIFIER); -} - -#else -static int __init free_raw_capacity(void) -{ - kfree(raw_capacity); - - return 0; -} -core_initcall(free_raw_capacity); -#endif /* * Iterate all CPUs' descriptor in DT and compute the efficiency @@ -320,7 +111,7 @@ static void __init parse_dt_topology(void) continue; } - if (parse_cpu_capacity(cn, cpu)) { + if (topology_parse_cpu_capacity(cn, cpu)) { of_node_put(cn); continue; } @@ -368,8 +159,8 @@ static void __init parse_dt_topology(void) middle_capacity = ((max_capacity / 3) >> (SCHED_CAPACITY_SHIFT-1)) + 1; - if (cap_from_dt && !cap_parsing_failed) - normalize_cpu_capacity(); + if (cap_from_dt) + topology_normalize_cpu_scale(); } /* @@ -382,10 +173,10 @@ static void update_cpu_capacity(unsigned int cpu) if (!cpu_capacity(cpu) || cap_from_dt) return; - set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); + topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity); pr_info("CPU%u: update cpu_capacity %lu\n", - cpu, arch_scale_cpu_capacity(NULL, cpu)); + cpu, topology_get_cpu_scale(NULL, cpu)); } #else |