summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/traps.c28
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S12
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c11
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/mm/mem_encrypt.c2
7 files changed, 37 insertions, 34 deletions
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 948c648fea00..0fcd82f01388 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
+ bad = get_user(val, &((u16 *)addr)[i]);
else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ mm_segment_t fs;
+
+ if (!user_mode(regs)) {
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 93b945597ecf..7cfba738f104 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -157,8 +157,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 8fe6338bcc84..16c4ccb1f154 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -155,8 +155,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 236999c54edc..c60922a66385 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -22,7 +22,7 @@ obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
-obj-y += aperfmperf.o
+obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 957813e0180d..0ee83321a313 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy)
s64 time_delta = ktime_ms_delta(now, s->time);
unsigned long flags;
+ /* Don't bother re-computing within the cache threshold time. */
+ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+ return;
+
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
@@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
unsigned int arch_freq_get_on_cpu(int cpu)
{
- s64 time_delta;
unsigned int khz;
if (!cpu_khz)
@@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
- /* Don't bother re-computing within the cache threshold time. */
- time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
- khz = per_cpu(samples.khz, cpu);
- if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
- return khz;
-
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
khz = per_cpu(samples.khz, cpu);
if (khz)
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 4378a729b933..6b7e17bf0b71 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -78,11 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = arch_freq_get_on_cpu(cpu);
+ unsigned int freq = cpufreq_quick_get(cpu);
if (!freq)
- freq = cpufreq_quick_get(cpu);
- if (!freq)
freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
freq / 1000, (freq % 1000));
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 16c5f37933a2..0286327e65fa 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -40,7 +40,7 @@ static char sme_cmdline_off[] __initdata = "off";
* section is later cleared.
*/
u64 sme_me_mask __section(.data) = 0;
-EXPORT_SYMBOL_GPL(sme_me_mask);
+EXPORT_SYMBOL(sme_me_mask);
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);