diff options
Diffstat (limited to 'arch/arm/kernel')
30 files changed, 898 insertions, 604 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 16eed6aebfa4..43b740d0e374 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -13,7 +13,7 @@ CFLAGS_REMOVE_return_address.o = -pg # Object file lists. -obj-y := elf.o entry-armv.o entry-common.o irq.o \ +obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ process.o ptrace.o return_address.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 9943e9e74a1b..463ff4a0ec8a 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -385,6 +385,8 @@ CALL(sys_syncfs) CALL(sys_sendmmsg) /* 375 */ CALL(sys_setns) + CALL(sys_process_vm_readv) + CALL(sys_process_vm_writev) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9ad50c4208ae..3a456c6c7005 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -36,12 +36,11 @@ #ifdef CONFIG_MULTI_IRQ_HANDLER ldr r1, =handle_arch_irq mov r0, sp - ldr r1, [r1] adr lr, BSYM(9997f) - teq r1, #0 - movne pc, r1 -#endif + ldr pc, [r1] +#else arch_irq_handler_default +#endif 9997: .endm @@ -497,7 +496,7 @@ ENDPROC(__und_usr) .popsection .pushsection __ex_table,"a" .long 1b, 4b -#if __LINUX_ARM_ARCH__ >= 7 +#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 .long 2b, 4b .long 3b, 4b #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 566c54c2a1fe..14e277d2ff91 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -39,8 +39,14 @@ #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif +#ifdef CONFIG_ARM_LPAE + /* LPAE requires an additional page for the PGD */ +#define PG_DIR_SIZE 0x5000 +#define PMD_ORDER 3 +#else #define PG_DIR_SIZE 0x4000 #define PMD_ORDER 2 +#endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE @@ -164,17 +170,36 @@ __create_page_tables: teq r0, r6 bne 1b +#ifdef CONFIG_ARM_LPAE + /* + * Build the PGD table (first level) to point to the PMD table. A PGD + * entry is 64-bit wide. + */ + mov r0, r4 + add r3, r4, #0x1000 @ first PMD table address + orr r3, r3, #3 @ PGD block type + mov r6, #4 @ PTRS_PER_PGD + mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER +1: str r3, [r0], #4 @ set bottom PGD entry bits + str r7, [r0], #4 @ set top PGD entry bits + add r3, r3, #0x1000 @ next PMD table + subs r6, r6, #1 + bne 1b + + add r4, r4, #0x1000 @ point to the PMD tables +#endif + ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ - adr r0, __enable_mmu_loc + adr r0, __turn_mmu_on_loc ldmia r0, {r3, r5, r6} sub r0, r0, r3 @ virt->phys offset - add r5, r5, r0 @ phys __enable_mmu - add r6, r6, r0 @ phys __enable_mmu_end + add r5, r5, r0 @ phys __turn_mmu_on + add r6, r6, r0 @ phys __turn_mmu_on_end mov r5, r5, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT @@ -219,8 +244,8 @@ __create_page_tables: #endif /* - * Then map boot params address in r2 or - * the first 1MB of ram if boot params address is not specified. + * Then map boot params address in r2 or the first 1MB (2MB with LPAE) + * of ram if boot params address is not specified. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -251,7 +276,15 @@ __create_page_tables: mov r3, r7, lsr #SECTION_SHIFT ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags orr r3, r7, r3, lsl #SECTION_SHIFT +#ifdef CONFIG_ARM_LPAE + mov r7, #1 << (54 - 32) @ XN +#else + orr r3, r3, #PMD_SECT_XN +#endif 1: str r3, [r0], #4 +#ifdef CONFIG_ARM_LPAE + str r7, [r0], #4 +#endif add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 blo 1b @@ -283,14 +316,17 @@ __create_page_tables: str r3, [r0] #endif #endif +#ifdef CONFIG_ARM_LPAE + sub r4, r4, #0x1000 @ point to the PGD table +#endif mov pc, lr ENDPROC(__create_page_tables) .ltorg .align -__enable_mmu_loc: +__turn_mmu_on_loc: .long . - .long __enable_mmu - .long __enable_mmu_end + .long __turn_mmu_on + .long __turn_mmu_on_end #if defined(CONFIG_SMP) __CPUINIT @@ -360,7 +396,7 @@ __secondary_data: * r13 = *virtual* address to jump to upon completion */ __enable_mmu: -#ifdef CONFIG_ALIGNMENT_TRAP +#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A @@ -374,12 +410,17 @@ __enable_mmu: #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif +#ifdef CONFIG_ARM_LPAE + mov r5, #0 + mcrr p15, 0, r4, r5, c2 @ load TTBR0 +#else mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer +#endif b __turn_mmu_on ENDPROC(__enable_mmu) @@ -398,15 +439,19 @@ ENDPROC(__enable_mmu) * other registers depend on the function called upon completion */ .align 5 -__turn_mmu_on: + .pushsection .idmap.text, "ax" +ENTRY(__turn_mmu_on) mov r0, r0 + instr_sync mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg + instr_sync mov r3, r3 mov r3, r13 mov pc, r3 -__enable_mmu_end: +__turn_mmu_on_end: ENDPROC(__turn_mmu_on) + .popsection #ifdef CONFIG_SMP_ON_UP diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 814a52a9dc39..d6a95ef9131d 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1016,10 +1016,10 @@ static int __init arch_hw_breakpoint_init(void) } /* Register debug fault handler. */ - hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, - "watchpoint debug exception"); - hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, - "breakpoint debug exception"); + hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, + TRAP_HWBKPT, "watchpoint debug exception"); + hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, + TRAP_HWBKPT, "breakpoint debug exception"); /* Register hotplug notifier. */ register_cpu_notifier(&dbg_reset_nb); diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c index 9fe8910308af..8a30c89da70e 100644 --- a/arch/arm/kernel/kprobes-arm.c +++ b/arch/arm/kernel/kprobes-arm.c @@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = { static const union decode_item arm_cccc_0001_____1001_table[] = { /* Synchronization primitives */ +#if __LINUX_ARM_ARCH__ < 6 + /* Deprecated on ARMv6 and may be UNDEFINED on v7 */ /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, REGS(NOPC, NOPC, 0, 0, NOPC)), - +#endif /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ /* And unallocated instructions... */ DECODE_END diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index fc82de8bdcce..ba32b393b3f0 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c @@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void) TEST_GROUP("Synchronization primitives") - /* - * Use hard coded constants for SWP instructions to avoid warnings - * about deprecated instructions. - */ - TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]") - TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]") +#if __LINUX_ARM_ARCH__ < 6 + TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]") + TEST_R( "swpvs r0, r",1,VAL1,", [sp]") + TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]") +#else + TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]") + TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]") + TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]") +#endif TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") - TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]") - TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]") +#if __LINUX_ARM_ARCH__ < 6 + TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]") + TEST_R( "swpvsb r0, r",1,VAL1,", [sp]") +#else + TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]") + TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]") +#endif TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ @@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void) TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") - TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"") + TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"") TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c index 5e726c31c45a..5d8b85792222 100644 --- a/arch/arm/kernel/kprobes-test-thumb.c +++ b/arch/arm/kernel/kprobes-test-thumb.c @@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void) DONT_TEST_IN_ITBLOCK( TEST_BF_R( "cbnz r",0,0, ", 2f") TEST_BF_R( "cbz r",2,-1,", 2f") - TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20) - TEST_BF_RX( "cbz r",7,0, ", 2f",0x40) + TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20) + TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40) ) TEST_R("sxth r0, r",7, HH1,"") TEST_R("sxth r7, r",0, HH2,"") @@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK( TESTCASE_START(code) \ TEST_ARG_PTR(13, offset) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code,0) \ + TEST_BRANCH_F(code) \ TESTCASE_END TEST("push {r0}") @@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8, TEST_BF( "b 2f") TEST_BB( "b 2b") - TEST_BF_X("b 2f", 0x400) - TEST_BB_X("b 2b", 0x400) + TEST_BF_X("b 2f", SPACE_0x400) + TEST_BB_X("b 2b", SPACE_0x400) TEST_GROUP("Testing instructions in IT blocks") @@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22, TEST_BB("bne.w 2b") TEST_BF("bgt.w 2f") TEST_BB("blt.w 2b") - TEST_BF_X("bpl.w 2f",0x1000) + TEST_BF_X("bpl.w 2f", SPACE_0x1000) ) TEST_UNSUPPORTED("msr cpsr, r0") @@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22, TEST_BF( "b.w 2f") TEST_BB( "b.w 2b") - TEST_BF_X("b.w 2f", 0x1000) + TEST_BF_X("b.w 2f", SPACE_0x1000) TEST_BF( "bl.w 2f") TEST_BB( "bl.w 2b") - TEST_BB_X("bl.w 2b", 0x1000) + TEST_BB_X("bl.w 2b", SPACE_0x1000) TEST_X( "blx __dummy_arm_subroutine", ".arm \n\t" diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c index e17cdd6d90d8..1862d8f2fd44 100644 --- a/arch/arm/kernel/kprobes-test.c +++ b/arch/arm/kernel/kprobes-test.c @@ -202,6 +202,8 @@ #include <linux/slab.h> #include <linux/kprobes.h> +#include <asm/opcodes.h> + #include "kprobes.h" #include "kprobes-test.h" @@ -1050,65 +1052,9 @@ static int test_instance; static unsigned long test_check_cc(int cc, unsigned long cpsr) { - unsigned long temp; - - switch (cc) { - case 0x0: /* eq */ - return cpsr & PSR_Z_BIT; - - case 0x1: /* ne */ - return (~cpsr) & PSR_Z_BIT; - - case 0x2: /* cs */ - return cpsr & PSR_C_BIT; - - case 0x3: /* cc */ - return (~cpsr) & PSR_C_BIT; - - case 0x4: /* mi */ - return cpsr & PSR_N_BIT; - - case 0x5: /* pl */ - return (~cpsr) & PSR_N_BIT; - - case 0x6: /* vs */ - return cpsr & PSR_V_BIT; - - case 0x7: /* vc */ - return (~cpsr) & PSR_V_BIT; + int ret = arm_check_condition(cc << 28, cpsr); - case 0x8: /* hi */ - cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return cpsr & PSR_C_BIT; - - case 0x9: /* ls */ - cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ - return (~cpsr) & PSR_C_BIT; - - case 0xa: /* ge */ - cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return (~cpsr) & PSR_N_BIT; - - case 0xb: /* lt */ - cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - return cpsr & PSR_N_BIT; - - case 0xc: /* gt */ - temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ - return (~temp) & PSR_N_BIT; - - case 0xd: /* le */ - temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ - temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ - return temp & PSR_N_BIT; - - case 0xe: /* al */ - case 0xf: /* unconditional */ - return true; - } - BUG(); - return false; + return (ret != ARM_OPCODE_CONDTEST_FAIL); } static int is_last_scenario; @@ -1128,7 +1074,9 @@ static unsigned long test_context_cpsr(int scenario) if (!test_case_is_thumb) { /* Testing ARM code */ - probe_should_run = test_check_cc(current_instruction >> 28, cpsr) != 0; + int cc = current_instruction >> 28; + + probe_should_run = test_check_cc(cc, cpsr) != 0; if (scenario == 15) is_last_scenario = true; diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h index 0dc5d77b9356..e28a869b1ae4 100644 --- a/arch/arm/kernel/kprobes-test.h +++ b/arch/arm/kernel/kprobes-test.h @@ -149,23 +149,31 @@ struct test_arg_end { "1: "instruction" \n\t" \ " nop \n\t" -#define TEST_BRANCH_F(instruction, xtra_dist) \ +#define TEST_BRANCH_F(instruction) \ TEST_INSTRUCTION(instruction) \ - ".if "#xtra_dist" \n\t" \ " b 99f \n\t" \ - ".space "#xtra_dist" \n\t" \ - ".endif \n\t" \ + "2: nop \n\t" + +#define TEST_BRANCH_B(instruction) \ + " b 50f \n\t" \ + " b 99f \n\t" \ + "2: nop \n\t" \ + " b 99f \n\t" \ + TEST_INSTRUCTION(instruction) + +#define TEST_BRANCH_FX(instruction, codex) \ + TEST_INSTRUCTION(instruction) \ + " b 99f \n\t" \ + codex" \n\t" \ " b 99f \n\t" \ "2: nop \n\t" -#define TEST_BRANCH_B(instruction, xtra_dist) \ +#define TEST_BRANCH_BX(instruction, codex) \ " b 50f \n\t" \ " b 99f \n\t" \ "2: nop \n\t" \ " b 99f \n\t" \ - ".if "#xtra_dist" \n\t" \ - ".space "#xtra_dist" \n\t" \ - ".endif \n\t" \ + codex" \n\t" \ TEST_INSTRUCTION(instruction) #define TESTCASE_END \ @@ -301,47 +309,60 @@ struct test_arg_end { TESTCASE_START(code1 #reg1 code2) \ TEST_ARG_PTR(reg1, val1) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code1 #reg1 code2, 0) \ + TEST_BRANCH_F(code1 #reg1 code2) \ TESTCASE_END -#define TEST_BF_X(code, xtra_dist) \ +#define TEST_BF(code) \ TESTCASE_START(code) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code, xtra_dist) \ + TEST_BRANCH_F(code) \ TESTCASE_END -#define TEST_BB_X(code, xtra_dist) \ +#define TEST_BB(code) \ TESTCASE_START(code) \ TEST_ARG_END("") \ - TEST_BRANCH_B(code, xtra_dist) \ + TEST_BRANCH_B(code) \ TESTCASE_END -#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \ - TESTCASE_START(code1 #reg code2) \ - TEST_ARG_REG(reg, val) \ - TEST_ARG_END("") \ - TEST_BRANCH_F(code1 #reg code2, xtra_dist) \ +#define TEST_BF_R(code1, reg, val, code2) \ + TESTCASE_START(code1 #reg code2) \ + TEST_ARG_REG(reg, val) \ + TEST_ARG_END("") \ + TEST_BRANCH_F(code1 #reg code2) \ TESTCASE_END -#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \ - TESTCASE_START(code1 #reg code2) \ - TEST_ARG_REG(reg, val) \ - TEST_ARG_END("") \ - TEST_BRANCH_B(code1 #reg code2, xtra_dist) \ +#define TEST_BB_R(code1, reg, val, code2) \ + TESTCASE_START(code1 #reg code2) \ + TEST_ARG_REG(reg, val) \ + TEST_ARG_END("") \ + TEST_BRANCH_B(code1 #reg code2) \ TESTCASE_END -#define TEST_BF(code) TEST_BF_X(code, 0) -#define TEST_BB(code) TEST_BB_X(code, 0) - -#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0) -#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0) - #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ TEST_ARG_REG(reg1, val1) \ TEST_ARG_REG(reg2, val2) \ TEST_ARG_END("") \ - TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \ + TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \ + TESTCASE_END + +#define TEST_BF_X(code, codex) \ + TESTCASE_START(code) \ + TEST_ARG_END("") \ + TEST_BRANCH_FX(code, codex) \ + TESTCASE_END + +#define TEST_BB_X(code, codex) \ + TESTCASE_START(code) \ + TEST_ARG_END("") \ + TEST_BRANCH_BX(code, codex) \ + TESTCASE_END + +#define TEST_BF_RX(code1, reg, val, code2, codex) \ + TESTCASE_START(code1 #reg code2) \ + TEST_ARG_REG(reg, val) \ + TEST_ARG_END("") \ + TEST_BRANCH_FX(code1 #reg code2, codex) \ TESTCASE_END #define TEST_X(code, codex) \ @@ -372,6 +393,25 @@ struct test_arg_end { TESTCASE_END +/* + * Macros for defining space directives spread over multiple lines. + * These are required so the compiler guesses better the length of inline asm + * code and will spill the literal pool early enough to avoid generating PC + * relative loads with out of range offsets. + */ +#define TWICE(x) x x +#define SPACE_0x8 TWICE(".space 4\n\t") +#define SPACE_0x10 TWICE(SPACE_0x8) +#define SPACE_0x20 TWICE(SPACE_0x10) +#define SPACE_0x40 TWICE(SPACE_0x20) +#define SPACE_0x80 TWICE(SPACE_0x40) +#define SPACE_0x100 TWICE(SPACE_0x80) +#define SPACE_0x200 TWICE(SPACE_0x100) +#define SPACE_0x400 TWICE(SPACE_0x200) +#define SPACE_0x800 TWICE(SPACE_0x400) +#define SPACE_0x1000 TWICE(SPACE_0x800) + + /* Various values used in test cases... */ #define N(val) (val ^ 0xffffffff) #define VAL1 0x12345678 diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c index 0bcd38341573..1911dae19e4f 100644 --- a/arch/arm/kernel/leds.c +++ b/arch/arm/kernel/leds.c @@ -9,7 +9,7 @@ */ #include <linux/export.h> #include <linux/init.h> -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/string.h> @@ -34,8 +34,8 @@ static const struct leds_evt_name evt_names[] = { { "red", led_red_on, led_red_off }, }; -static ssize_t leds_store(struct sys_device *dev, - struct sysdev_attribute *attr, +static ssize_t leds_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { int ret = -EINVAL, len = strcspn(buf, " "); @@ -69,15 +69,16 @@ static ssize_t leds_store(struct sys_device *dev, return ret; } -static SYSDEV_ATTR(event, 0200, NULL, leds_store); +static DEVICE_ATTR(event, 0200, NULL, leds_store); -static struct sysdev_class leds_sysclass = { +static struct bus_type leds_subsys = { .name = "leds", + .dev_name = "leds", }; -static struct sys_device leds_device = { +static struct device leds_device = { .id = 0, - .cls = &leds_sysclass, + .bus = &leds_subsys, }; static int leds_suspend(void) @@ -105,11 +106,11 @@ static struct syscore_ops leds_syscore_ops = { static int __init leds_init(void) { int ret; - ret = sysdev_class_register(&leds_sysclass); + ret = subsys_system_register(&leds_subsys, NULL); if (ret == 0) - ret = sysdev_register(&leds_device); + ret = device_register(&leds_device); if (ret == 0) - ret = sysdev_create_file(&leds_device, &attr_event); + ret = device_create_file(&leds_device, &dev_attr_event); if (ret == 0) register_syscore_ops(&leds_syscore_ops); return ret; diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index c1b4463dcc83..764bd456d84f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -12,12 +12,11 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> +#include <asm/system.h> extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; -extern void setup_mm_for_reboot(char mode); - extern unsigned long kexec_start_address; extern unsigned long kexec_indirection_page; extern unsigned long kexec_mach_type; @@ -32,24 +31,6 @@ static atomic_t waiting_for_crash_ipi; int machine_kexec_prepare(struct kimage *image) { - unsigned long page_list; - void *reboot_code_buffer; - page_list = image->head & PAGE_MASK; - - reboot_code_buffer = page_address(image->control_code_page); - - /* Prepare parameters for reboot_code_buffer*/ - kexec_start_address = image->start; - kexec_indirection_page = page_list; - kexec_mach_type = machine_arch_type; - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; - - /* copy our kernel relocation code to the control code page */ - memcpy(reboot_code_buffer, - relocate_new_kernel, relocate_new_kernel_size); - - flush_icache_range((unsigned long) reboot_code_buffer, - (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); return 0; } @@ -100,26 +81,35 @@ void (*kexec_reinit)(void); void machine_kexec(struct kimage *image) { + unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; + + page_list = image->head & PAGE_MASK; + /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); + /* Prepare parameters for reboot_code_buffer*/ + kexec_start_address = image->start; + kexec_indirection_page = page_list; + kexec_mach_type = machine_arch_type; + kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; + + /* copy our kernel relocation code to the control code page */ + memcpy(reboot_code_buffer, + relocate_new_kernel, relocate_new_kernel_size); + + + flush_icache_range((unsigned long) reboot_code_buffer, + (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); - local_irq_disable(); - local_fiq_disable(); - setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ - flush_cache_all(); - outer_flush_all(); - outer_disable(); - cpu_proc_fin(); - outer_inv_all(); - flush_cache_all(); - cpu_reset(reboot_code_buffer_phys); + + soft_restart(reboot_code_buffer_phys); } diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c new file mode 100644 index 000000000000..f8179c6a817f --- /dev/null +++ b/arch/arm/kernel/opcodes.c @@ -0,0 +1,72 @@ +/* + * linux/arch/arm/kernel/opcodes.c + * + * A32 condition code lookup feature moved from nwfpe/fpopcode.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <asm/opcodes.h> + +#define ARM_OPCODE_CONDITION_UNCOND 0xf + +/* + * condition code lookup table + * index into the table is test code: EQ, NE, ... LT, GT, AL, NV + * + * bit position in short is condition code: NZCV + */ +static const unsigned short cc_map[16] = { + 0xF0F0, /* EQ == Z set */ + 0x0F0F, /* NE */ + 0xCCCC, /* CS == C set */ + 0x3333, /* CC */ + 0xFF00, /* MI == N set */ + 0x00FF, /* PL */ + 0xAAAA, /* VS == V set */ + 0x5555, /* VC */ + 0x0C0C, /* HI == C set && Z clear */ + 0xF3F3, /* LS == C clear || Z set */ + 0xAA55, /* GE == (N==V) */ + 0x55AA, /* LT == (N!=V) */ + 0x0A05, /* GT == (!Z && (N==V)) */ + 0xF5FA, /* LE == (Z || (N!=V)) */ + 0xFFFF, /* AL always */ + 0 /* NV */ +}; + +/* + * Returns: + * ARM_OPCODE_CONDTEST_FAIL - if condition fails + * ARM_OPCODE_CONDTEST_PASS - if condition passes (including AL) + * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional + * opcode space from v5 onwards + * + * Code that tests whether a conditional instruction would pass its condition + * check should check that return value == ARM_OPCODE_CONDTEST_PASS. + * + * Code that tests if a condition means that the instruction would be executed + * (regardless of conditional or unconditional) should instead check that the + * return value != ARM_OPCODE_CONDTEST_FAIL. + */ +asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr) +{ + u32 cc_bits = opcode >> 28; + u32 psr_cond = psr >> 28; + unsigned int ret; + + if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) { + if ((cc_map[cc_bits] >> (psr_cond)) & 1) + ret = ARM_OPCODE_CONDTEST_PASS; + else + ret = ARM_OPCODE_CONDTEST_FAIL; + } else { + ret = ARM_OPCODE_CONDTEST_UNCOND; + } + + return ret; +} +EXPORT_SYMBOL_GPL(arm_check_condition); diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 24e2347be6b1..5bb91bf3d47f 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -59,8 +59,7 @@ armpmu_get_pmu_id(void) } EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); -int -armpmu_get_max_events(void) +int perf_num_counters(void) { int max_events = 0; @@ -69,12 +68,6 @@ armpmu_get_max_events(void) return max_events; } -EXPORT_SYMBOL_GPL(armpmu_get_max_events); - -int perf_num_counters(void) -{ - return armpmu_get_max_events(); -} EXPORT_SYMBOL_GPL(perf_num_counters); #define HW_OP_UNSUPPORTED 0xFFFF @@ -343,19 +336,25 @@ validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; struct pmu_hw_events fake_pmu; + DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); - memset(&fake_pmu, 0, sizeof(fake_pmu)); + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + memset(fake_used_mask, 0, sizeof(fake_used_mask)); + fake_pmu.used_mask = fake_used_mask; if (!validate_event(&fake_pmu, leader)) - return -ENOSPC; + return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_pmu, sibling)) - return -ENOSPC; + return -EINVAL; } if (!validate_event(&fake_pmu, event)) - return -ENOSPC; + return -EINVAL; return 0; } @@ -374,6 +373,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu) { int i, irq, irqs; struct platform_device *pmu_device = armpmu->plat_device; + struct arm_pmu_platdata *plat = + dev_get_platdata(&pmu_device->dev); irqs = min(pmu_device->num_resources, num_possible_cpus()); @@ -381,8 +382,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu) if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); - if (irq >= 0) + if (irq >= 0) { + if (plat && plat->disable_irq) + plat->disable_irq(irq); free_irq(irq, armpmu); + } } release_pmu(armpmu->type); @@ -396,6 +400,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) int i, err, irq, irqs; struct platform_device *pmu_device = armpmu->plat_device; + if (!pmu_device) + return -ENODEV; + err = reserve_pmu(armpmu->type); if (err) { pr_warning("unable to reserve pmu\n"); @@ -439,7 +446,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) irq); armpmu_release_hardware(armpmu); return err; - } + } else if (plat && plat->enable_irq) + plat->enable_irq(irq); cpumask_set_cpu(i, &armpmu->active_irqs); } @@ -631,6 +639,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = { static int __devinit armpmu_device_probe(struct platform_device *pdev) { + if (!cpu_pmu) + return -ENODEV; + cpu_pmu->plat_device = pdev; return 0; } diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index e63d8115c01b..533be9930ec2 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -65,13 +65,15 @@ enum armv6_counters { * accesses/misses in hardware. */ static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, - [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, - [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, }; static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] @@ -218,13 +220,15 @@ enum armv6mpcore_perf_types { * accesses/misses in hardware. */ static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, - [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, - [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL, }; static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 1ef6d0034b85..460bbbb6b885 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu; * they are not available. */ enum armv7_perf_types { - ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, - ARMV7_PERFCTR_IFETCH_MISS = 0x01, - ARMV7_PERFCTR_ITLB_MISS = 0x02, - ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */ - ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */ - ARMV7_PERFCTR_DTLB_REFILL = 0x05, - ARMV7_PERFCTR_DREAD = 0x06, - ARMV7_PERFCTR_DWRITE = 0x07, - ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, - ARMV7_PERFCTR_EXC_TAKEN = 0x09, - ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, - ARMV7_PERFCTR_CID_WRITE = 0x0B, - /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. + ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, + ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01, + ARMV7_PERFCTR_ITLB_REFILL = 0x02, + ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03, + ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04, + ARMV7_PERFCTR_DTLB_REFILL = 0x05, + ARMV7_PERFCTR_MEM_READ = 0x06, + ARMV7_PERFCTR_MEM_WRITE = 0x07, + ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, + ARMV7_PERFCTR_EXC_TAKEN = 0x09, + ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, + ARMV7_PERFCTR_CID_WRITE = 0x0B, + + /* + * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. * It counts: - * - all branch instructions, + * - all (taken) branch instructions, * - instructions that explicitly write the PC, * - exception generating instructions. */ - ARMV7_PERFCTR_PC_WRITE = 0x0C, - ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, - ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, - ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, + ARMV7_PERFCTR_PC_WRITE = 0x0C, + ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, + ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, + ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, + ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, + ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, + ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ - ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, - ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, - ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, - ARMV7_PERFCTR_MEM_ACCESS = 0x13, - ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, - ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, - ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16, - ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17, - ARMV7_PERFCTR_L2_DCACHE_WB = 0x18, - ARMV7_PERFCTR_BUS_ACCESS = 0x19, - ARMV7_PERFCTR_MEMORY_ERROR = 0x1A, - ARMV7_PERFCTR_INSTR_SPEC = 0x1B, - ARMV7_PERFCTR_TTBR_WRITE = 0x1C, - ARMV7_PERFCTR_BUS_CYCLES = 0x1D, - - ARMV7_PERFCTR_CPU_CYCLES = 0xFF + ARMV7_PERFCTR_MEM_ACCESS = 0x13, + ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, + ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, + ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16, + ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17, + ARMV7_PERFCTR_L2_CACHE_WB = 0x18, + ARMV7_PERFCTR_BUS_ACCESS = 0x19, + ARMV7_PERFCTR_MEM_ERROR = 0x1A, + ARMV7_PERFCTR_INSTR_SPEC = 0x1B, + ARMV7_PERFCTR_TTBR_WRITE = 0x1C, + ARMV7_PERFCTR_BUS_CYCLES = 0x1D, + + ARMV7_PERFCTR_CPU_CYCLES = 0xFF }; /* ARMv7 Cortex-A8 specific event types */ enum armv7_a8_perf_types { - ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, - ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, - ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, - ARMV7_PERFCTR_L2_ACCESS = 0x43, - ARMV7_PERFCTR_L2_CACH_MISS = 0x44, - ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, - ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, - ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, - ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, - ARMV7_PERFCTR_L1_DATA_MISS = 0x49, - ARMV7_PERFCTR_L1_INST_MISS = 0x4A, - ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, - ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, - ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, - ARMV7_PERFCTR_L2_NEON = 0x4E, - ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, - ARMV7_PERFCTR_L1_INST = 0x50, - ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, - ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, - ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, - ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, - ARMV7_PERFCTR_OP_EXECUTED = 0x55, - ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, - ARMV7_PERFCTR_CYCLES_INST = 0x57, - ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, - ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, - ARMV7_PERFCTR_NEON_CYCLES = 0x5A, - - ARMV7_PERFCTR_PMU0_EVENTS = 0x70, - ARMV7_PERFCTR_PMU1_EVENTS = 0x71, - ARMV7_PERFCTR_PMU_EVENTS = 0x72, + ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43, + ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44, + ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50, + ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56, }; /* ARMv7 Cortex-A9 specific event types */ enum armv7_a9_perf_types { - ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, - ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, - ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, - - ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, - ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, - - ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, - ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, - ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, - ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, - ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, - ARMV7_PERFCTR_DATA_EVICTION = 0x65, - ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, - ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, - ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, - - ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, - - ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, - ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, - ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, - ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, - ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, - - ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, - ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, - ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, - ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, - ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, - ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, - ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, - - ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, - ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, - - ARMV7_PERFCTR_ISB_INST = 0x90, - ARMV7_PERFCTR_DSB_INST = 0x91, - ARMV7_PERFCTR_DMB_INST = 0x92, - ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, - - ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, - ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, - ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, - ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, - ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, - ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 + ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68, + ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60, + ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66, }; /* ARMv7 Cortex-A5 specific event types */ enum armv7_a5_perf_types { - ARMV7_PERFCTR_IRQ_TAKEN = 0x86, - ARMV7_PERFCTR_FIQ_TAKEN = 0x87, - - ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0, - ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1, - ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2, - ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3, - ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4, - ARMV7_PERFCTR_READ_ALLOC = 0xc5, - - ARMV7_PERFCTR_STALL_SB_FULL = 0xc9, + ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2, + ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3, }; /* ARMv7 Cortex-A15 specific event types */ enum armv7_a15_perf_types { - ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40, - ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41, - ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42, - ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43, + ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40, + ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41, + ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42, + ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43, - ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C, - ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D, + ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C, + ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D, - ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50, - ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51, - ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52, - ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53, + ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50, + ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51, + ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52, + ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53, - ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76, + ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76, }; /* @@ -197,13 +119,15 @@ enum armv7_a15_perf_types { * accesses/misses in hardware. */ static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, - [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, - [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, }; static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] @@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * combined. */ [C(OP_READ)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(L1I)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, + [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, + [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(LL)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, + [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, + [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(BPU)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * Cortex-A9 HW events mapping */ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = - ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, - [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, - [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, - [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH, }; static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] @@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * combined. */ [C(OP_READ)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(BPU)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * Cortex-A5 HW events mapping */ static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, - [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, - [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, }; static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] @@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [C(L1D)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_DCACHE_ACCESS, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_DCACHE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_DCACHE_ACCESS, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_DCACHE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, }, [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_PREFETCH_LINEFILL, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, + [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, + [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, /* * The prefetch counters don't differentiate between the I * side and the D side. */ [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_PREFETCH_LINEFILL, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, + [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, + [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, }, }, [C(LL)] = { @@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -543,13 +458,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -562,13 +475,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * Cortex-A15 HW events mapping */ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, - [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, - [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE, - [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, - [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, }; static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] @@ -576,16 +491,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { [C(L1D)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL, + [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ, + [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE, + [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -601,11 +512,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] */ [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, - [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -614,16 +525,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(LL)] = { [C(OP_READ)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL, + [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ, + [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] - = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL, + [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE, + [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -633,13 +540,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_L1_DTLB_READ_REFILL, + [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL, + [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -649,11 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -663,13 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, - [C(RESULT_MISS)] - = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index e0cca10a8411..3b99d8269829 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -48,13 +48,15 @@ enum xscale_counters { }; static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, - [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, - [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, - [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, - [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, + [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, + [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, }; static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c3407ee8576..2334bf8a650a 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c @@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type) { clear_bit_unlock(type, pmu_lock); } +EXPORT_SYMBOL_GPL(release_pmu); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 75316f0dd02a..971d65c253a9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -57,7 +57,7 @@ static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -extern void setup_mm_for_reboot(char mode); +extern void setup_mm_for_reboot(void); static volatile int hlt_counter; @@ -92,18 +92,24 @@ static int __init hlt_setup(char *__unused) __setup("nohlt", nohlt_setup); __setup("hlt", hlt_setup); -void arm_machine_restart(char mode, const char *cmd) +extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); +typedef void (*phys_reset_t)(unsigned long); + +/* + * A temporary stack to use for CPU reset. This is static so that we + * don't clobber it with the identity mapping. When running with this + * stack, any references to the current task *will not work* so you + * should really do as little as possible before jumping to your reset + * code. + */ +static u64 soft_restart_stack[16]; + +static void __soft_restart(void *addr) { - /* Disable interrupts first */ - local_irq_disable(); - local_fiq_disable(); + phys_reset_t phys_reset; - /* - * Tell the mm system that we are going to reboot - - * we may need it to insert some 1:1 mappings so that - * soft boot works. - */ - setup_mm_for_reboot(mode); + /* Take out a flat memory mapping. */ + setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); @@ -114,18 +120,35 @@ void arm_machine_restart(char mode, const char *cmd) /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); - /* - * Now call the architecture specific reboot code. - */ - arch_reset(mode, cmd); + /* Switch to the identity mapping. */ + phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); + phys_reset((unsigned long)addr); - /* - * Whoops - the architecture was unable to reboot. - * Tell the user! - */ - mdelay(1000); - printk("Reboot failed -- System halted\n"); - while (1); + /* Should never get here. */ + BUG(); +} + +void soft_restart(unsigned long addr) +{ + u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); + + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); + + /* Disable the L2 if we're the last man standing. */ + if (num_online_cpus() == 1) + outer_disable(); + + /* Change to the new stack and continue with the reset. */ + call_with_stack(__soft_restart, (void *)addr, (void *)stack); + + /* Should never get here. */ + BUG(); +} + +static void null_restart(char mode, const char *cmd) +{ } /* @@ -134,7 +157,7 @@ void arm_machine_restart(char mode, const char *cmd) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart; +void (*arm_pm_restart)(char str, const char *cmd) = null_restart; EXPORT_SYMBOL_GPL(arm_pm_restart); static void do_nothing(void *unused) @@ -183,7 +206,8 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { - tick_nohz_stop_sched_tick(1); + tick_nohz_idle_enter(); + rcu_idle_enter(); leds_event(led_idle_start); while (!need_resched()) { #ifdef CONFIG_HOTPLUG_CPU @@ -192,6 +216,9 @@ void cpu_idle(void) #endif local_irq_disable(); +#ifdef CONFIG_PL310_ERRATA_769419 + wmb(); +#endif if (hlt_counter) { local_irq_enable(); cpu_relax(); @@ -210,7 +237,8 @@ void cpu_idle(void) } } leds_event(led_idle_end); - tick_nohz_restart_sched_tick(); + rcu_idle_exit(); + tick_nohz_idle_exit(); preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -250,7 +278,15 @@ void machine_power_off(void) void machine_restart(char *cmd) { machine_shutdown(); + arm_pm_restart(reboot_mode, cmd); + + /* Give a grace period for failure to restart of 1s */ + mdelay(1000); + + /* Whoops - the platform was unable to reboot. Tell the user! */ + printk("Reboot failed -- System halted\n"); + while (1); } void __show_regs(struct pt_regs *regs) diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 9a46370fe9da..5416c7c12528 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -14,61 +14,153 @@ #include <asm/sched_clock.h> +struct clock_data { + u64 epoch_ns; + u32 epoch_cyc; + u32 epoch_cyc_copy; + u32 mult; + u32 shift; +}; + static void sched_clock_poll(unsigned long wrap_ticks); static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); -static void (*sched_clock_update_fn)(void); + +static struct clock_data cd = { + .mult = NSEC_PER_SEC / HZ, +}; + +static u32 __read_mostly sched_clock_mask = 0xffffffff; + +static u32 notrace jiffy_sched_clock_read(void) +{ + return (u32)(jiffies - INITIAL_JIFFIES); +} + +static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; + +static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) +{ + return (cyc * mult) >> shift; +} + +static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) +{ + u64 epoch_ns; + u32 epoch_cyc; + + /* + * Load the epoch_cyc and epoch_ns atomically. We do this by + * ensuring that we always write epoch_cyc, epoch_ns and + * epoch_cyc_copy in strict order, and read them in strict order. + * If epoch_cyc and epoch_cyc_copy are not equal, then we're in + * the middle of an update, and we should repeat the load. + */ + do { + epoch_cyc = cd.epoch_cyc; + smp_rmb(); + epoch_ns = cd.epoch_ns; + smp_rmb(); + } while (epoch_cyc != cd.epoch_cyc_copy); + + return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); +} + +/* + * Atomically update the sched_clock epoch. + */ +static void notrace update_sched_clock(void) +{ + unsigned long flags; + u32 cyc; + u64 ns; + + cyc = read_sched_clock(); + ns = cd.epoch_ns + + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, + cd.mult, cd.shift); + /* + * Write epoch_cyc and epoch_ns in a way that the update is + * detectable in cyc_to_fixed_sched_clock(). + */ + raw_local_irq_save(flags); + cd.epoch_cyc = cyc; + smp_wmb(); + cd.epoch_ns = ns; + smp_wmb(); + cd.epoch_cyc_copy = cyc; + raw_local_irq_restore(flags); +} static void sched_clock_poll(unsigned long wrap_ticks) { mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); - sched_clock_update_fn(); + update_sched_clock(); } -void __init init_sched_clock(struct clock_data *cd, void (*update)(void), - unsigned int clock_bits, unsigned long rate) +void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; u64 res, wrap; char r_unit; - sched_clock_update_fn = update; + BUG_ON(bits > 32); + WARN_ON(!irqs_disabled()); + WARN_ON(read_sched_clock != jiffy_sched_clock_read); + read_sched_clock = read; + sched_clock_mask = (1 << bits) - 1; /* calculate the mult/shift to convert counter ticks to ns. */ - clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0); + clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); r = rate; if (r >= 4000000) { r /= 1000000; r_unit = 'M'; - } else { + } else if (r >= 1000) { r /= 1000; r_unit = 'k'; - } + } else + r_unit = ' '; /* calculate how many ns until we wrap */ - wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift); + wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); do_div(wrap, NSEC_PER_MSEC); w = wrap; /* calculate the ns resolution of this counter */ - res = cyc_to_ns(1ULL, cd->mult, cd->shift); + res = cyc_to_ns(1ULL, cd.mult, cd.shift); pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", - clock_bits, r, r_unit, res, w); + bits, r, r_unit, res, w); /* * Start the timer to keep sched_clock() properly updated and * sets the initial epoch. */ sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); - update(); + update_sched_clock(); /* * Ensure that sched_clock() starts off at 0ns */ - cd->epoch_ns = 0; + cd.epoch_ns = 0; + + pr_debug("Registered %pF as sched_clock source\n", read); +} + +unsigned long long notrace sched_clock(void) +{ + u32 cyc = read_sched_clock(); + return cyc_to_sched_clock(cyc, sched_clock_mask); } void __init sched_clock_postinit(void) { + /* + * If no sched_clock function has been provided at that point, + * make it the final one one. + */ + if (read_sched_clock == jiffy_sched_clock_read) + setup_sched_clock(jiffy_sched_clock_read, 32, HZ); + sched_clock_poll(sched_clock_timer.data); } diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 7e7977ab994f..129fbd55bde8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -31,6 +31,7 @@ #include <linux/memblock.h> #include <linux/bug.h> #include <linux/compiler.h> +#include <linux/sort.h> #include <asm/unified.h> #include <asm/cpu.h> @@ -52,6 +53,7 @@ #include <asm/mach/time.h> #include <asm/traps.h> #include <asm/unwind.h> +#include <asm/memblock.h> #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #include "compat.h" @@ -461,8 +463,10 @@ static void __init setup_processor(void) cpu_name, read_cpuid_id(), read_cpuid_id() & 15, proc_arch[cpu_architecture()], cr_alignment); - sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); - sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); + snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", + list->arch_name, ENDIANNESS); + snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", + list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; #ifndef CONFIG_ARM_THUMB elf_hwcap &= ~HWCAP_THUMB; @@ -888,13 +892,17 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr) return mdesc; } +static int __init meminfo_cmp(const void *_a, const void *_b) +{ + const struct membank *a = _a, *b = _b; + long cmp = bank_pfn_start(a) - bank_pfn_start(b); + return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; +} void __init setup_arch(char **cmdline_p) { struct machine_desc *mdesc; - unwind_init(); - setup_processor(); mdesc = setup_machine_fdt(__atags_pointer); if (!mdesc) @@ -902,8 +910,14 @@ void __init setup_arch(char **cmdline_p) machine_desc = mdesc; machine_name = mdesc->name; - if (mdesc->soft_reboot) - reboot_setup("s"); +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + extern unsigned long arm_dma_zone_size; + arm_dma_zone_size = mdesc->dma_zone_size; + } +#endif + if (mdesc->restart_mode) + reboot_setup(&mdesc->restart_mode); init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; @@ -916,12 +930,16 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); paging_init(mdesc); request_standard_resources(mdesc); + if (mdesc->restart) + arm_pm_restart = mdesc->restart; + unflatten_device_tree(); #ifdef CONFIG_SMP @@ -932,12 +950,6 @@ void __init setup_arch(char **cmdline_p) tcm_init(); -#ifdef CONFIG_ZONE_DMA - if (mdesc->dma_zone_size) { - extern unsigned long arm_dma_zone_size; - arm_dma_zone_size = mdesc->dma_zone_size; - } -#endif #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 020e99c845e7..1f268bda4552 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -54,14 +54,18 @@ ENDPROC(cpu_suspend_abort) * r0 = control register value */ .align 5 + .pushsection .idmap.text,"ax" ENTRY(cpu_resume_mmu) ldr r3, =cpu_resume_after_mmu + instr_sync mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc mrc p15, 0, r0, c0, c0, 0 @ read id reg + instr_sync mov r0, r0 mov r0, r0 mov pc, r3 @ jump to virtual address ENDPROC(cpu_resume_mmu) + .popsection cpu_resume_after_mmu: bl cpu_init @ restore the und/abt/irq banked regs mov r0, #0 @ return zero on success diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ef5640b9e218..57db122a4f62 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -31,6 +31,7 @@ #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/exception.h> +#include <asm/idmap.h> #include <asm/topology.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> @@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; - pgd_t *pgd; int ret; /* @@ -84,29 +84,11 @@ int __cpuinit __cpu_up(unsigned int cpu) } /* - * Allocate initial page tables to allow the new CPU to - * enable the MMU safely. This essentially means a set - * of our "standard" page tables, with the addition of - * a 1:1 mapping for the physical address of the kernel. - */ - pgd = pgd_alloc(&init_mm); - if (!pgd) - return -ENOMEM; - - if (PHYS_OFFSET != PAGE_OFFSET) { -#ifndef CONFIG_HOTPLUG_CPU - identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); -#endif - identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); - identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); - } - - /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; - secondary_data.pgdir = virt_to_phys(pgd); + secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); @@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu) secondary_data.stack = NULL; secondary_data.pgdir = 0; - if (PHYS_OFFSET != PAGE_OFFSET) { -#ifndef CONFIG_HOTPLUG_CPU - identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); -#endif - identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); - identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); - } - - pgd_free(&init_mm, pgd); - return ret; } @@ -550,6 +522,10 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); +#ifdef CONFIG_HOTPLUG_CPU + platform_cpu_kill(cpu); +#endif + while (1) cpu_relax(); } diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index a8a6682d6b52..c8e938553d47 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -10,8 +10,11 @@ */ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/err.h> #include <linux/smp.h> #include <linux/jiffies.h> #include <linux/clockchips.h> @@ -25,6 +28,7 @@ /* set up by the platform code */ void __iomem *twd_base; +static struct clk *twd_clk; static unsigned long twd_timer_rate; static struct clock_event_device __percpu **twd_evt; @@ -89,6 +93,52 @@ void twd_timer_stop(struct clock_event_device *clk) disable_percpu_irq(clk->irq); } +#ifdef CONFIG_CPU_FREQ + +/* + * Updates clockevent frequency when the cpu frequency changes. + * Called on the cpu that is changing frequency with interrupts disabled. + */ +static void twd_update_frequency(void *data) +{ + twd_timer_rate = clk_get_rate(twd_clk); + + clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); +} + +static int twd_cpufreq_transition(struct notifier_block *nb, + unsigned long state, void *data) +{ + struct cpufreq_freqs *freqs = data; + + /* + * The twd clock events must be reprogrammed to account for the new + * frequency. The timer is local to a cpu, so cross-call to the + * changing cpu. + */ + if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) + smp_call_function_single(freqs->cpu, twd_update_frequency, + NULL, 1); + + return NOTIFY_OK; +} + +static struct notifier_block twd_cpufreq_nb = { + .notifier_call = twd_cpufreq_transition, +}; + +static int twd_cpufreq_init(void) +{ + if (!IS_ERR(twd_clk)) + return cpufreq_register_notifier(&twd_cpufreq_nb, + CPUFREQ_TRANSITION_NOTIFIER); + + return 0; +} +core_initcall(twd_cpufreq_init); + +#endif + static void __cpuinit twd_calibrate_rate(void) { unsigned long count; @@ -140,6 +190,35 @@ static irqreturn_t twd_handler(int irq, void *dev_id) return IRQ_NONE; } +static struct clk *twd_get_clock(void) +{ + struct clk *clk; + int err; + + clk = clk_get_sys("smp_twd", NULL); + if (IS_ERR(clk)) { + pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); + return clk; + } + + err = clk_prepare(clk); + if (err) { + pr_err("smp_twd: clock failed to prepare: %d\n", err); + clk_put(clk); + return ERR_PTR(err); + } + + err = clk_enable(clk); + if (err) { + pr_err("smp_twd: clock failed to enable: %d\n", err); + clk_unprepare(clk); + clk_put(clk); + return ERR_PTR(err); + } + + return clk; +} + /* * Setup the local clock events for a CPU. */ @@ -165,7 +244,13 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) } } - twd_calibrate_rate(); + if (!twd_clk) + twd_clk = twd_get_clock(); + + if (!IS_ERR_OR_NULL(twd_clk)) + twd_timer_rate = clk_get_rate(twd_clk); + else + twd_calibrate_rate(); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | @@ -173,15 +258,11 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; - clk->shift = 20; - clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); - clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); - clk->min_delta_ns = clockevent_delta2ns(0xf, clk); this_cpu_clk = __this_cpu_ptr(twd_evt); *this_cpu_clk = clk; - clockevents_register_device(clk); - + clockevents_config_and_register(clk, twd_timer_rate, + 0xf, 0xffffffff); enable_percpu_irq(clk->irq, 0); } diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 93a22d282c16..1794cc3b0f18 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -1,13 +1,12 @@ #include <linux/init.h> +#include <asm/idmap.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/memory.h> #include <asm/suspend.h> #include <asm/tlbflush.h> -static pgd_t *suspend_pgd; - extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); extern void cpu_resume_mmu(void); @@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) *save_ptr = virt_to_phys(ptr); /* This must correspond to the LDM in cpu_resume() assembly */ - *ptr++ = virt_to_phys(suspend_pgd); + *ptr++ = virt_to_phys(idmap_pgd); *ptr++ = sp; *ptr++ = virt_to_phys(cpu_do_resume); @@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) struct mm_struct *mm = current->active_mm; int ret; - if (!suspend_pgd) + if (!idmap_pgd) return -EINVAL; /* @@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return ret; } - -static int __init cpu_suspend_init(void) -{ - suspend_pgd = pgd_alloc(&init_mm); - if (suspend_pgd) { - unsigned long addr = virt_to_phys(cpu_resume_mmu); - identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE); - } - return suspend_pgd ? 0 : -ENOMEM; -} -core_initcall(cpu_suspend_init); diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 5f452f8fde05..df745188f5de 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -25,6 +25,7 @@ #include <linux/syscalls.h> #include <linux/perf_event.h> +#include <asm/opcodes.h> #include <asm/traps.h> #include <asm/uaccess.h> @@ -185,6 +186,21 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); + res = arm_check_condition(instr, regs->ARM_cpsr); + switch (res) { + case ARM_OPCODE_CONDTEST_PASS: + break; + case ARM_OPCODE_CONDTEST_FAIL: + /* Condition failed - return to next instruction */ + regs->ARM_pc += 4; + return 0; + case ARM_OPCODE_CONDTEST_UNCOND: + /* If unconditional encoding - not a SWP, undef */ + return -EFAULT; + default: + return -EINVAL; + } + if (current->pid != previous_pid) { pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", current->comm, (unsigned long)current->pid); diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30e302d33e0a..01ec453bb924 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -180,9 +180,9 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, */ void __init tcm_init(void) { - u32 tcm_status = read_cpuid_tcmstatus(); - u8 dtcm_banks = (tcm_status >> 16) & 0x03; - u8 itcm_banks = (tcm_status & 0x03); + u32 tcm_status; + u8 dtcm_banks; + u8 itcm_banks; size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; @@ -191,6 +191,22 @@ void __init tcm_init(void) int ret; int i; + /* + * Prior to ARMv5 there is no TCM, and trying to read the status + * register will hang the processor. + */ + if (cpu_architecture() < CPU_ARCH_ARMv5) { + if (dtcm_code_sz || itcm_code_sz) + pr_info("CPU TCM: %u bytes of DTCM and %u bytes of " + "ITCM code compiled in, but no TCM present " + "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz); + return; + } + + tcm_status = read_cpuid_tcmstatus(); + dtcm_banks = (tcm_status >> 16) & 0x03; + itcm_banks = (tcm_status & 0x03); + /* Values greater than 2 for D/ITCM banks are "reserved" */ if (dtcm_banks > 2) dtcm_banks = 0; diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 1040c00405d0..8200deaa14f6 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -43,7 +43,7 @@ struct cputopo_arm cpu_topology[NR_CPUS]; -const struct cpumask *cpu_coregroup_mask(unsigned int cpu) +const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_sibling; } diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index e7e8365795c3..00df012c4678 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); struct unwind_ctrl_block { unsigned long vrs[16]; /* virtual register set */ - unsigned long *insn; /* pointer to the current instructions word */ + const unsigned long *insn; /* pointer to the current instructions word */ int entries; /* number of entries left to interpret */ int byte; /* current byte number in the instructions word */ }; @@ -83,8 +83,9 @@ enum regs { PC = 15 }; -extern struct unwind_idx __start_unwind_idx[]; -extern struct unwind_idx __stop_unwind_idx[]; +extern const struct unwind_idx __start_unwind_idx[]; +static const struct unwind_idx *__origin_unwind_idx; +extern const struct unwind_idx __stop_unwind_idx[]; static DEFINE_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); @@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables); }) /* - * Binary search in the unwind index. The entries entries are + * Binary search in the unwind index. The entries are * guaranteed to be sorted in ascending order by the linker. + * + * start = first entry + * origin = first entry with positive offset (or stop if there is no such entry) + * stop - 1 = last entry */ -static struct unwind_idx *search_index(unsigned long addr, - struct unwind_idx *first, - struct unwind_idx *last) +static const struct unwind_idx *search_index(unsigned long addr, + const struct unwind_idx *start, + const struct unwind_idx *origin, + const struct unwind_idx *stop) { - pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last); + unsigned long addr_prel31; + + pr_debug("%s(%08lx, %p, %p, %p)\n", + __func__, addr, start, origin, stop); + + /* + * only search in the section with the matching sign. This way the + * prel31 numbers can be compared as unsigned longs. + */ + if (addr < (unsigned long)start) + /* negative offsets: [start; origin) */ + stop = origin; + else + /* positive offsets: [origin; stop) */ + start = origin; + + /* prel31 for address relavive to start */ + addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; - if (addr < first->addr) { + while (start < stop - 1) { + const struct unwind_idx *mid = start + ((stop - start) >> 1); + + /* + * As addr_prel31 is relative to start an offset is needed to + * make it relative to mid. + */ + if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < + mid->addr_offset) + stop = mid; + else { + /* keep addr_prel31 relative to start */ + addr_prel31 -= ((unsigned long)mid - + (unsigned long)start); + start = mid; + } + } + + if (likely(start->addr_offset <= addr_prel31)) + return start; + else { pr_warning("unwind: Unknown symbol address %08lx\n", addr); return NULL; - } else if (addr >= last->addr) - return last; + } +} - while (first < last - 1) { - struct unwind_idx *mid = first + ((last - first + 1) >> 1); +static const struct unwind_idx *unwind_find_origin( + const struct unwind_idx *start, const struct unwind_idx *stop) +{ + pr_debug("%s(%p, %p)\n", __func__, start, stop); + while (start < stop) { + const struct unwind_idx *mid = start + ((stop - start) >> 1); - if (addr < mid->addr) - last = mid; + if (mid->addr_offset >= 0x40000000) + /* negative offset */ + start = mid + 1; else - first = mid; + /* positive offset */ + stop = mid; } - - return first; + pr_debug("%s -> %p\n", __func__, stop); + return stop; } -static struct unwind_idx *unwind_find_idx(unsigned long addr) +static const struct unwind_idx *unwind_find_idx(unsigned long addr) { - struct unwind_idx *idx = NULL; + const struct unwind_idx *idx = NULL; unsigned long flags; pr_debug("%s(%08lx)\n", __func__, addr); - if (core_kernel_text(addr)) + if (core_kernel_text(addr)) { + if (unlikely(!__origin_unwind_idx)) + __origin_unwind_idx = + unwind_find_origin(__start_unwind_idx, + __stop_unwind_idx); + /* main unwind table */ idx = search_index(addr, __start_unwind_idx, - __stop_unwind_idx - 1); - else { + __origin_unwind_idx, + __stop_unwind_idx); + } else { /* module unwind tables */ struct unwind_table *table; @@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr) if (addr >= table->begin_addr && addr < table->end_addr) { idx = search_index(addr, table->start, - table->stop - 1); + table->origin, + table->stop); /* Move-to-front to exploit common traces */ list_move(&table->list, &unwind_tables); break; @@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) int unwind_frame(struct stackframe *frame) { unsigned long high, low; - struct unwind_idx *idx; + const struct unwind_idx *idx; struct unwind_ctrl_block ctrl; /* only go to a higher address on the stack */ @@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, unsigned long text_size) { unsigned long flags; - struct unwind_idx *idx; struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, @@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, if (!tab) return tab; - tab->start = (struct unwind_idx *)start; - tab->stop = (struct unwind_idx *)(start + size); + tab->start = (const struct unwind_idx *)start; + tab->stop = (const struct unwind_idx *)(start + size); + tab->origin = unwind_find_origin(tab->start, tab->stop); tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; - /* Convert the symbol addresses to absolute values */ - for (idx = tab->start; idx < tab->stop; idx++) - idx->addr = prel31_to_addr(&idx->addr); - spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); spin_unlock_irqrestore(&unwind_lock, flags); @@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab) kfree(tab); } - -int __init unwind_init(void) -{ - struct unwind_idx *idx; - - /* Convert the symbol addresses to absolute values */ - for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++) - idx->addr = prel31_to_addr(&idx->addr); - - pr_debug("unwind: ARM stack unwinding initialised\n"); - - return 0; -} diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 20b3041e0860..f76e75548670 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -13,6 +13,12 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define IDMAP_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__idmap_text_start) = .; \ + *(.idmap.text) \ + VMLINUX_SYMBOL(__idmap_text_end) = .; + #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x @@ -92,6 +98,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IDMAP_TEXT #ifdef CONFIG_MMU *(.fixup) #endif |