summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/cpuidle.c2
-rw-r--r--arch/arm/kernel/devtree.c14
-rw-r--r--arch/arm/kernel/ftrace.c2
-rw-r--r--arch/arm/kernel/head-nommu.S16
-rw-r--r--arch/arm/kernel/hyp-stub.S13
-rw-r--r--arch/arm/kernel/module-plts.c243
-rw-r--r--arch/arm/kernel/module.lds3
-rw-r--r--arch/arm/kernel/perf_event_v7.c47
-rw-r--r--arch/arm/kernel/setup.c26
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/kernel/vdso.c13
11 files changed, 234 insertions, 147 deletions
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index 7dccc964d75f..a3308ad1a024 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
__used __section(__cpuidle_method_of_table_end);
-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
+static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
/**
* arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 40ecd5f514a2..f676febbb270 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -88,6 +88,8 @@ void __init arm_dt_init_cpu_maps(void)
return;
for_each_child_of_node(cpus, cpu) {
+ const __be32 *cell;
+ int prop_bytes;
u32 hwid;
if (of_node_cmp(cpu->type, "cpu"))
@@ -99,7 +101,8 @@ void __init arm_dt_init_cpu_maps(void)
* properties is considered invalid to build the
* cpu_logical_map.
*/
- if (of_property_read_u32(cpu, "reg", &hwid)) {
+ cell = of_get_property(cpu, "reg", &prop_bytes);
+ if (!cell || prop_bytes < sizeof(*cell)) {
pr_debug(" * %s missing reg property\n",
cpu->full_name);
of_node_put(cpu);
@@ -107,10 +110,15 @@ void __init arm_dt_init_cpu_maps(void)
}
/*
- * 8 MSBs must be set to 0 in the DT since the reg property
+ * Bits n:24 must be set to 0 in the DT since the reg property
* defines the MPIDR[23:0].
*/
- if (hwid & ~MPIDR_HWID_BITMASK) {
+ do {
+ hwid = be32_to_cpu(*cell++);
+ prop_bytes -= sizeof(*cell);
+ } while (!hwid && prop_bytes > 0);
+
+ if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) {
of_node_put(cpu);
return;
}
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 709ee1d6d4df..3f1759411d51 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -218,7 +218,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
}
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer);
+ frame_pointer, NULL);
if (err == -EBUSY) {
*parent = old;
return;
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index fb1a69eb49c1..6b4eb27b8758 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -158,7 +158,21 @@ __after_proc_init:
bic r0, r0, #CR_V
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
-#endif /* CONFIG_CPU_CP15 */
+#elif defined (CONFIG_CPU_V7M)
+ /* For V7M systems we want to modify the CCR similarly to the SCTLR */
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+ bic r0, r0, #V7M_SCB_CCR_DC
+#endif
+#ifdef CONFIG_CPU_BPREDICT_DISABLE
+ bic r0, r0, #V7M_SCB_CCR_BP
+#endif
+#ifdef CONFIG_CPU_ICACHE_DISABLE
+ bic r0, r0, #V7M_SCB_CCR_IC
+#endif
+ movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
+ movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
+ str r0, [r3]
+#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
.ltorg
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 0b1e4a93d67e..15d073ae5da2 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -142,6 +142,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
and r7, #0x1f @ Preserve HPMN
mcr p15, 4, r7, c1, c1, 1 @ HDCR
+ @ Make sure NS-SVC is initialised appropriately
+ mrc p15, 0, r7, c1, c0, 0 @ SCTLR
+ orr r7, #(1 << 5) @ CP15 barriers enabled
+ bic r7, #(3 << 7) @ Clear SED/ITD for v8 (RES0 for v7)
+ bic r7, #(3 << 19) @ WXN and UWXN disabled
+ mcr p15, 0, r7, c1, c0, 0 @ SCTLR
+
+ mrc p15, 0, r7, c0, c0, 0 @ MIDR
+ mcr p15, 4, r7, c0, c0, 0 @ VPIDR
+
+ mrc p15, 0, r7, c0, c0, 5 @ MPIDR
+ mcr p15, 4, r7, c0, c0, 5 @ VMPIDR
+
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 0c7efc3446c0..3a5cba90c971 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -9,6 +9,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sort.h>
#include <asm/cache.h>
#include <asm/opcodes.h>
@@ -30,154 +31,198 @@ struct plt_entries {
u32 lit[PLT_ENT_COUNT];
};
-static bool in_init(const struct module *mod, u32 addr)
+u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
{
- return addr - (u32)mod->init_layout.base < mod->init_layout.size;
+ struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
+ int idx = 0;
+
+ /*
+ * Look for an existing entry pointing to 'val'. Given that the
+ * relocations are sorted, this will be the last entry we allocated.
+ * (if one exists).
+ */
+ if (mod->arch.plt_count > 0) {
+ plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
+ idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
+
+ if (plt->lit[idx] == val)
+ return (u32)&plt->ldr[idx];
+
+ idx = (idx + 1) % PLT_ENT_COUNT;
+ if (!idx)
+ plt++;
+ }
+
+ mod->arch.plt_count++;
+ BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
+
+ if (!idx)
+ /* Populate a new set of entries */
+ *plt = (struct plt_entries){
+ { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
+ { val, }
+ };
+ else
+ plt->lit[idx] = val;
+
+ return (u32)&plt->ldr[idx];
}
-u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
+
+static int cmp_rel(const void *a, const void *b)
{
- struct plt_entries *plt, *plt_end;
- int c, *count;
-
- if (in_init(mod, loc)) {
- plt = (void *)mod->arch.init_plt->sh_addr;
- plt_end = (void *)plt + mod->arch.init_plt->sh_size;
- count = &mod->arch.init_plt_count;
- } else {
- plt = (void *)mod->arch.core_plt->sh_addr;
- plt_end = (void *)plt + mod->arch.core_plt->sh_size;
- count = &mod->arch.core_plt_count;
- }
+ const Elf32_Rel *x = a, *y = b;
+ int i;
- /* Look for an existing entry pointing to 'val' */
- for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
- int i;
-
- if (!c) {
- /* Populate a new set of entries */
- *plt = (struct plt_entries){
- { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
- { val, }
- };
- ++*count;
- return (u32)plt->ldr;
- }
- for (i = 0; i < PLT_ENT_COUNT; i++) {
- if (!plt->lit[i]) {
- plt->lit[i] = val;
- ++*count;
- }
- if (plt->lit[i] == val)
- return (u32)&plt->ldr[i];
- }
+ /* sort by type and symbol index */
+ i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
+ if (i == 0)
+ i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
+ return i;
+}
+
+static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
+{
+ u32 *tval = (u32 *)(base + rel->r_offset);
+
+ /*
+ * Do a bitwise compare on the raw addend rather than fully decoding
+ * the offset and doing an arithmetic comparison.
+ * Note that a zero-addend jump/call relocation is encoded taking the
+ * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
+ */
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ u16 upper, lower;
+
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
+ lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
+
+ return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
+
+ case R_ARM_CALL:
+ case R_ARM_PC24:
+ case R_ARM_JUMP24:
+ return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
}
BUG();
}
-static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
- u32 mask)
+static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
{
- u32 *loc1, *loc2;
- int i;
+ const Elf32_Rel *prev;
- for (i = 0; i < num; i++) {
- if (rel[i].r_info != rel[num].r_info)
- continue;
+ /*
+ * Entries are sorted by type and symbol index. That means that,
+ * if a duplicate entry exists, it must be in the preceding
+ * slot.
+ */
+ if (!num)
+ return false;
- /*
- * Identical relocation types against identical symbols can
- * still result in different PLT entries if the addend in the
- * place is different. So resolve the target of the relocation
- * to compare the values.
- */
- loc1 = (u32 *)(base + rel[i].r_offset);
- loc2 = (u32 *)(base + rel[num].r_offset);
- if (((*loc1 ^ *loc2) & mask) == 0)
- return 1;
- }
- return 0;
+ prev = rel + num - 1;
+ return cmp_rel(rel + num, prev) == 0 &&
+ is_zero_addend_relocation(base, prev);
}
/* Count how many PLT entries we may need */
-static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
+static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
+ const Elf32_Rel *rel, int num)
{
unsigned int ret = 0;
+ const Elf32_Sym *s;
int i;
- /*
- * Sure, this is order(n^2), but it's usually short, and not
- * time critical
- */
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_ARM_CALL:
case R_ARM_PC24:
case R_ARM_JUMP24:
- if (!duplicate_rel(base, rel, i,
- __opcode_to_mem_arm(0x00ffffff)))
- ret++;
- break;
-#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- if (!duplicate_rel(base, rel, i,
- __opcode_to_mem_thumb32(0x07ff2fff)))
+ /*
+ * We only have to consider branch targets that resolve
+ * to undefined symbols. This is not simply a heuristic,
+ * it is a fundamental limitation, since the PLT itself
+ * is part of the module, and needs to be within range
+ * as well, so modules can never grow beyond that limit.
+ */
+ s = syms + ELF32_R_SYM(rel[i].r_info);
+ if (s->st_shndx != SHN_UNDEF)
+ break;
+
+ /*
+ * Jump relocations with non-zero addends against
+ * undefined symbols are supported by the ELF spec, but
+ * do not occur in practice (e.g., 'jump n bytes past
+ * the entry point of undefined function symbol f').
+ * So we need to support them, but there is no need to
+ * take them into consideration when trying to optimize
+ * this code. So let's only check for duplicates when
+ * the addend is zero.
+ */
+ if (!is_zero_addend_relocation(base, rel + i) ||
+ !duplicate_rel(base, rel, i))
ret++;
-#endif
}
+ }
return ret;
}
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
- unsigned long core_plts = 0, init_plts = 0;
+ unsigned long plts = 0;
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
+ Elf32_Sym *syms = NULL;
/*
* To store the PLTs, we expand the .text section for core module code
- * and the .init.text section for initialization code.
+ * and for initialization code.
*/
- for (s = sechdrs; s < sechdrs_end; ++s)
- if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
- mod->arch.core_plt = s;
- else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
- mod->arch.init_plt = s;
-
- if (!mod->arch.core_plt || !mod->arch.init_plt) {
- pr_err("%s: sections missing\n", mod->name);
+ for (s = sechdrs; s < sechdrs_end; ++s) {
+ if (strcmp(".plt", secstrings + s->sh_name) == 0)
+ mod->arch.plt = s;
+ else if (s->sh_type == SHT_SYMTAB)
+ syms = (Elf32_Sym *)s->sh_addr;
+ }
+
+ if (!mod->arch.plt) {
+ pr_err("%s: module PLT section missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!syms) {
+ pr_err("%s: module symtab section missing\n", mod->name);
return -ENOEXEC;
}
for (s = sechdrs + 1; s < sechdrs_end; ++s) {
- const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
+ Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
int numrels = s->sh_size / sizeof(Elf32_Rel);
Elf32_Shdr *dstsec = sechdrs + s->sh_info;
if (s->sh_type != SHT_REL)
continue;
- if (strstr(secstrings + s->sh_name, ".init"))
- init_plts += count_plts(dstsec->sh_addr, rels, numrels);
- else
- core_plts += count_plts(dstsec->sh_addr, rels, numrels);
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dstsec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ /* sort by type and symbol index */
+ sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
+
+ plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
}
- mod->arch.core_plt->sh_type = SHT_NOBITS;
- mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
- mod->arch.core_plt_count = 0;
-
- mod->arch.init_plt->sh_type = SHT_NOBITS;
- mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
- sizeof(struct plt_entries));
- mod->arch.init_plt_count = 0;
- pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
- mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
+ mod->arch.plt->sh_type = SHT_NOBITS;
+ mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
+ sizeof(struct plt_entries));
+ mod->arch.plt_count = 0;
+
+ pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
return 0;
}
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 3682fa107918..05881e2b414c 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,4 +1,3 @@
SECTIONS {
- .core.plt : { BYTE(0) }
- .init.plt : { BYTE(0) }
+ .plt : { BYTE(0) }
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 15063851cd10..b9423491b9d7 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -596,12 +596,6 @@ static struct attribute_group armv7_pmuv1_events_attr_group = {
.attrs = armv7_pmuv1_event_attrs,
};
-static const struct attribute_group *armv7_pmuv1_attr_groups[] = {
- &armv7_pmuv1_events_attr_group,
- &armv7_pmu_format_attr_group,
- NULL,
-};
-
ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
@@ -653,12 +647,6 @@ static struct attribute_group armv7_pmuv2_events_attr_group = {
.attrs = armv7_pmuv2_event_attrs,
};
-static const struct attribute_group *armv7_pmuv2_attr_groups[] = {
- &armv7_pmuv2_events_attr_group,
- &armv7_pmu_format_attr_group,
- NULL,
-};
-
/*
* Perf Events' indices
*/
@@ -1208,7 +1196,10 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init(cpu_pmu);
cpu_pmu->name = "armv7_cortex_a8";
cpu_pmu->map_event = armv7_a8_map_event;
- cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv1_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return armv7_probe_num_events(cpu_pmu);
}
@@ -1217,7 +1208,10 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init(cpu_pmu);
cpu_pmu->name = "armv7_cortex_a9";
cpu_pmu->map_event = armv7_a9_map_event;
- cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv1_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return armv7_probe_num_events(cpu_pmu);
}
@@ -1226,7 +1220,10 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
armv7pmu_init(cpu_pmu);
cpu_pmu->name = "armv7_cortex_a5";
cpu_pmu->map_event = armv7_a5_map_event;
- cpu_pmu->pmu.attr_groups = armv7_pmuv1_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv1_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return armv7_probe_num_events(cpu_pmu);
}
@@ -1236,7 +1233,10 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->name = "armv7_cortex_a15";
cpu_pmu->map_event = armv7_a15_map_event;
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return armv7_probe_num_events(cpu_pmu);
}
@@ -1246,7 +1246,10 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->name = "armv7_cortex_a7";
cpu_pmu->map_event = armv7_a7_map_event;
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return armv7_probe_num_events(cpu_pmu);
}
@@ -1256,7 +1259,10 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->name = "armv7_cortex_a12";
cpu_pmu->map_event = armv7_a12_map_event;
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
- cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return armv7_probe_num_events(cpu_pmu);
}
@@ -1264,7 +1270,10 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
{
int ret = armv7_a12_pmu_init(cpu_pmu);
cpu_pmu->name = "armv7_cortex_a17";
- cpu_pmu->pmu.attr_groups = armv7_pmuv2_attr_groups;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
return ret;
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index df7f2a75e769..34e3f3c45634 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -114,19 +114,19 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU
-struct processor processor __read_mostly;
+struct processor processor __ro_after_init;
#endif
#ifdef MULTI_TLB
-struct cpu_tlb_fns cpu_tlb __read_mostly;
+struct cpu_tlb_fns cpu_tlb __ro_after_init;
#endif
#ifdef MULTI_USER
-struct cpu_user_fns cpu_user __read_mostly;
+struct cpu_user_fns cpu_user __ro_after_init;
#endif
#ifdef MULTI_CACHE
-struct cpu_cache_fns cpu_cache __read_mostly;
+struct cpu_cache_fns cpu_cache __ro_after_init;
#endif
#ifdef CONFIG_OUTER_CACHE
-struct outer_cache_fns outer_cache __read_mostly;
+struct outer_cache_fns outer_cache __ro_after_init;
EXPORT_SYMBOL(outer_cache);
#endif
@@ -290,12 +290,9 @@ static int cpu_has_aliasing_icache(unsigned int arch)
/* arch specifies the register format */
switch (arch) {
case CPU_ARCH_ARMv7:
- asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
- : /* No output operands */
- : "r" (1));
+ set_csselr(CSSELR_ICACHE | CSSELR_L1);
isb();
- asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
- : "=r" (id_reg));
+ id_reg = read_ccsidr();
line_size = 4 << ((id_reg & 0x7) + 2);
num_sets = ((id_reg >> 13) & 0x7fff) + 1;
aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
@@ -315,11 +312,12 @@ static void __init cacheid_init(void)
{
unsigned int arch = cpu_architecture();
- if (arch == CPU_ARCH_ARMv7M) {
- cacheid = 0;
- } else if (arch >= CPU_ARCH_ARMv6) {
+ if (arch >= CPU_ARCH_ARMv6) {
unsigned int cachetype = read_cpuid_cachetype();
- if ((cachetype & (7 << 29)) == 4 << 29) {
+
+ if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
+ cacheid = 0;
+ } else if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
arch = CPU_ARCH_ARMv7;
cacheid = CACHEID_VIPT_NONALIASING;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 861521606c6d..937c8920d741 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -82,7 +82,7 @@ enum ipi_msg_type {
static DECLARE_COMPLETION(cpu_running);
-static struct smp_operations smp_ops;
+static struct smp_operations smp_ops __ro_after_init;
void __init smp_set_ops(const struct smp_operations *ops)
{
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 994e971a8538..53cf86cf2d1a 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cache.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -39,7 +40,7 @@
static struct page **vdso_text_pagelist;
/* Total number of pages needed for the data and text portions of the VDSO. */
-unsigned int vdso_total_pages __read_mostly;
+unsigned int vdso_total_pages __ro_after_init;
/*
* The VDSO data page.
@@ -47,13 +48,13 @@ unsigned int vdso_total_pages __read_mostly;
static union vdso_data_store vdso_data_store __page_aligned_data;
static struct vdso_data *vdso_data = &vdso_data_store.data;
-static struct page *vdso_data_page;
-static struct vm_special_mapping vdso_data_mapping = {
+static struct page *vdso_data_page __ro_after_init;
+static const struct vm_special_mapping vdso_data_mapping = {
.name = "[vvar]",
.pages = &vdso_data_page,
};
-static struct vm_special_mapping vdso_text_mapping = {
+static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
.name = "[vdso]",
};
@@ -67,7 +68,7 @@ struct elfinfo {
/* Cached result of boot-time check for whether the arch timer exists,
* and if so, whether the virtual counter is useable.
*/
-static bool cntvct_ok __read_mostly;
+static bool cntvct_ok __ro_after_init;
static bool __init cntvct_functional(void)
{
@@ -270,7 +271,7 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
return false;
- if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0)
+ if (!tk->tkr_mono.clock->archdata.vdso_direct)
return false;
return true;