diff options
author | Dave Martin <dave.martin@arm.com> | 2018-03-26 17:12:28 +0300 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-03-26 20:00:37 +0300 |
commit | c0cda3b8ee6b4b6851b2fd8b6db91fd7b0e2524a (patch) | |
tree | f5f1e50fa87d124d2c3a2f9bb16d6746fa940898 /arch/arm64/include | |
parent | 5043694eb8270a19040fb798f6d6f1dbd86c7a3c (diff) | |
download | linux-c0cda3b8ee6b4b6851b2fd8b6db91fd7b0e2524a.tar.xz |
arm64: capabilities: Update prototype for enable call back
We issue the enable() call back for all CPU hwcaps capabilities
available on the system, on all the CPUs. So far we have ignored
the argument passed to the call back, which had a prototype to
accept a "void *" for use with on_each_cpu() and later with
stop_machine(). However, with commit 0a0d111d40fd1
("arm64: cpufeature: Pass capability structure to ->enable callback"),
there are some users of the argument who wants the matching capability
struct pointer where there are multiple matching criteria for a single
capability. Clean up the declaration of the call back to make it clear.
1) Renamed to cpu_enable(), to imply taking necessary actions on the
called CPU for the entry.
2) Pass const pointer to the capability, to allow the call back to
check the entry. (e.,g to check if any action is needed on the CPU)
3) We don't care about the result of the call back, turning this to
a void.
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: James Morse <james.morse@arm.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Signed-off-by: Dave Martin <dave.martin@arm.com>
[suzuki: convert more users, rename call back and drop results]
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimd.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 7 |
3 files changed, 13 insertions, 5 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index fbf0aab94d67..cba67cb24b22 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -100,7 +100,12 @@ struct arm64_cpu_capabilities { u16 capability; int def_scope; /* default scope */ bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); - int (*enable)(void *); /* Called on all active CPUs */ + /* + * Take the appropriate actions to enable this capability for this CPU. + * For each successfully booted CPU, this method is called for each + * globally detected capability. + */ + void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); union { struct { /* To be used for erratum handling only */ u32 midr_model; diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 8857a0f0d0f7..7623762f7fa6 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -83,7 +83,9 @@ extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); -extern int sve_kernel_enable(void *); + +struct arm64_cpu_capabilities; +extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern int __ro_after_init sve_max_vl; diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index fce604e3e599..4fc8867fde4d 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -37,6 +37,7 @@ #include <linux/string.h> #include <asm/alternative.h> +#include <asm/cpufeature.h> #include <asm/fpsimd.h> #include <asm/hw_breakpoint.h> #include <asm/lse.h> @@ -227,9 +228,9 @@ static inline void spin_lock_prefetch(const void *ptr) #endif -int cpu_enable_pan(void *__unused); -int cpu_enable_cache_maint_trap(void *__unused); -int cpu_clear_disr(void *__unused); +void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); +void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); +void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) |