diff options
Diffstat (limited to 'tools')
70 files changed, 1519 insertions, 295 deletions
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 6ebd3e6a1fd1..5e3c673fa3f4 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -27,6 +27,8 @@ #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + #define KVM_REG_SIZE(id) \ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) @@ -114,6 +116,8 @@ struct kvm_debug_exit_arch { }; struct kvm_sync_regs { + /* Used with KVM_CAP_ARM_USER_IRQ */ + __u64 device_irq_level; }; struct kvm_arch_memory_slot { @@ -192,13 +196,17 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 +#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff #define VGIC_LEVEL_INFO_LINE_LEVEL 0 -#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 +#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 +#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index c2860358ae3e..70eea2ecc663 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -39,6 +39,8 @@ #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + #define KVM_REG_SIZE(id) \ (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) @@ -143,6 +145,8 @@ struct kvm_debug_exit_arch { #define KVM_GUESTDBG_USE_HW (1 << 17) struct kvm_sync_regs { + /* Used with KVM_CAP_ARM_USER_IRQ */ + __u64 device_irq_level; }; struct kvm_arch_memory_slot { @@ -212,13 +216,17 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 +#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff #define VGIC_LEVEL_INFO_LINE_LEVEL 0 -#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 +#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 +#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 +#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 4edbe4bb0e8b..07fbeb927834 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h @@ -29,6 +29,9 @@ #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_GUEST_DEBUG +/* Not always available, but if it is, this is the correct offset. */ +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + struct kvm_regs { __u64 pc; __u64 cr; diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 7f4fd65e9208..3dd2a1d308dd 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h @@ -26,6 +26,8 @@ #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 +#define KVM_DEV_FLIC_AISM 9 +#define KVM_DEV_FLIC_AIRQ_INJECT 10 /* * We can have up to 4*64k pending subchannels + 8 adapter interrupts, * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. @@ -41,7 +43,14 @@ struct kvm_s390_io_adapter { __u8 isc; __u8 maskable; __u8 swap; - __u8 pad; + __u8 flags; +}; + +#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 + +struct kvm_s390_ais_req { + __u8 isc; + __u16 mode; }; #define KVM_S390_IO_ADAPTER_MASK 1 @@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine { #define KVM_S390_VM_CPU_FEAT_CMMA 10 #define KVM_S390_VM_CPU_FEAT_PFMFI 11 #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 +#define KVM_S390_VM_CPU_FEAT_KSS 13 struct kvm_s390_vm_cpu_feat { __u64 feat[16]; }; @@ -198,6 +208,10 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_VRS (1UL << 6) #define KVM_SYNC_RICCB (1UL << 7) #define KVM_SYNC_FPRS (1UL << 8) +#define KVM_SYNC_GSCB (1UL << 9) +/* length and alignment of the sdnx as a power of two */ +#define SDNXC 8 +#define SDNXL (1UL << SDNXC) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ @@ -218,8 +232,16 @@ struct kvm_sync_regs { }; __u8 reserved[512]; /* for future vector expansion */ __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ - __u8 padding[52]; /* riccb needs to be 64byte aligned */ + __u8 padding1[52]; /* riccb needs to be 64byte aligned */ __u8 riccb[64]; /* runtime instrumentation controls block */ + __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ + union { + __u8 sdnx[SDNXL]; /* state description annex */ + struct { + __u64 reserved1[2]; + __u64 gscb[4]; + }; + }; }; #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 0fe00446f9ca..2701e5f8145b 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -202,6 +202,8 @@ #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ + /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 85599ad4d024..5dff775af7cd 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -36,6 +36,12 @@ # define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ +#ifdef CONFIG_X86_5LEVEL +# define DISABLE_LA57 0 +#else +# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -55,7 +61,7 @@ #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 -#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) #define DISABLED_MASK17 0 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index fac9a5c0abe9..d91ba04dd007 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -53,6 +53,12 @@ # define NEED_MOVBE 0 #endif +#ifdef CONFIG_X86_5LEVEL +# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) +#else +# define NEED_LA57 0 +#endif + #ifdef CONFIG_X86_64 #ifdef CONFIG_PARAVIRT /* Paravirtualized systems may not have PSE or PGE available */ @@ -98,7 +104,7 @@ #define REQUIRED_MASK13 0 #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 -#define REQUIRED_MASK16 0 +#define REQUIRED_MASK16 (NEED_LA57) #define REQUIRED_MASK17 0 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 739c0c594022..c2824d02ba37 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -9,6 +9,9 @@ #include <linux/types.h> #include <linux/ioctl.h> +#define KVM_PIO_PAGE_OFFSET 1 +#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 + #define DE_VECTOR 0 #define DB_VECTOR 1 #define BP_VECTOR 3 diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index 14458658e988..690a2dcf4078 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -76,7 +76,11 @@ #define EXIT_REASON_WBINVD 54 #define EXIT_REASON_XSETBV 55 #define EXIT_REASON_APIC_WRITE 56 +#define EXIT_REASON_RDRAND 57 #define EXIT_REASON_INVPCID 58 +#define EXIT_REASON_VMFUNC 59 +#define EXIT_REASON_ENCLS 60 +#define EXIT_REASON_RDSEED 61 #define EXIT_REASON_PML_FULL 62 #define EXIT_REASON_XSAVES 63 #define EXIT_REASON_XRSTORS 64 @@ -90,6 +94,7 @@ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ { EXIT_REASON_CPUID, "CPUID" }, \ { EXIT_REASON_HLT, "HLT" }, \ + { EXIT_REASON_INVD, "INVD" }, \ { EXIT_REASON_INVLPG, "INVLPG" }, \ { EXIT_REASON_RDPMC, "RDPMC" }, \ { EXIT_REASON_RDTSC, "RDTSC" }, \ @@ -108,6 +113,8 @@ { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ { EXIT_REASON_MSR_READ, "MSR_READ" }, \ { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ + { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ + { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ @@ -115,20 +122,24 @@ { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ - { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ - { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ + { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ + { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ + { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ { EXIT_REASON_INVEPT, "INVEPT" }, \ + { EXIT_REASON_RDTSCP, "RDTSCP" }, \ { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ + { EXIT_REASON_INVVPID, "INVVPID" }, \ { EXIT_REASON_WBINVD, "WBINVD" }, \ + { EXIT_REASON_XSETBV, "XSETBV" }, \ { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ - { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ - { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ - { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ - { EXIT_REASON_INVD, "INVD" }, \ - { EXIT_REASON_INVVPID, "INVVPID" }, \ + { EXIT_REASON_RDRAND, "RDRAND" }, \ { EXIT_REASON_INVPCID, "INVPCID" }, \ + { EXIT_REASON_VMFUNC, "VMFUNC" }, \ + { EXIT_REASON_ENCLS, "ENCLS" }, \ + { EXIT_REASON_RDSEED, "RDSEED" }, \ + { EXIT_REASON_PML_FULL, "PML_FULL" }, \ { EXIT_REASON_XSAVES, "XSAVES" }, \ { EXIT_REASON_XRSTORS, "XRSTORS" } diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index ebc6dceddb58..7598361ef1f1 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c @@ -29,6 +29,7 @@ int main(void) attr.log_size = 0; attr.log_level = 0; attr.kern_version = 0; + attr.prog_flags = 0; /* * Test existence of __NR_bpf and BPF_PROG_LOAD. diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index 390d7c9685fd..4ce25d43e8e3 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -208,6 +208,16 @@ .off = OFF, \ .imm = IMM }) +/* Unconditional jumps, goto pc + off16 */ + +#define BPF_JMP_A(OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = 0 }) + /* Function call */ #define BPF_EMIT_CALL(FUNC) \ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e553529929f6..94dfa9def355 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -132,6 +132,13 @@ enum bpf_attach_type { */ #define BPF_F_ALLOW_OVERRIDE (1U << 0) +/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the + * verifier will perform strict alignment checking as if the kernel + * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, + * and NET_IP_ALIGN defined to 2. + */ +#define BPF_F_STRICT_ALIGNMENT (1U << 0) + #define BPF_PSEUDO_MAP_FD 1 /* flags for BPF_MAP_UPDATE_ELEM command */ @@ -177,6 +184,7 @@ union bpf_attr { __u32 log_size; /* size of user buffer */ __aligned_u64 log_buf; /* user supplied buffer */ __u32 kern_version; /* checked when prog_type=kprobe */ + __u32 prog_flags; }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -481,8 +489,7 @@ union bpf_attr { * u32 bpf_get_socket_uid(skb) * Get the owner uid of the socket stored inside sk_buff. * @skb: pointer to skb - * Return: uid of the socket owner on success or 0 if the socket pointer - * inside sk_buff is NULL + * Return: uid of the socket owner on success or overflowuid if failed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h index d538897b8e08..17b10304c393 100644 --- a/tools/include/uapi/linux/stat.h +++ b/tools/include/uapi/linux/stat.h @@ -48,17 +48,13 @@ * tv_sec holds the number of seconds before (negative) or after (positive) * 00:00:00 1st January 1970 UTC. * - * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is - * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. - * - * Note that if both tv_sec and tv_nsec are non-zero, then the two values must - * either be both positive or both negative. + * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time. * * __reserved is held in case we need a yet finer resolution. */ struct statx_timestamp { __s64 tv_sec; - __s32 tv_nsec; + __u32 tv_nsec; __s32 __reserved; }; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 4fe444b8092e..6e178987af8e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -117,6 +117,28 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); } +int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, int strict_alignment, + const char *license, __u32 kern_version, + char *log_buf, size_t log_buf_sz) +{ + union bpf_attr attr; + + bzero(&attr, sizeof(attr)); + attr.prog_type = type; + attr.insn_cnt = (__u32)insns_cnt; + attr.insns = ptr_to_u64(insns); + attr.license = ptr_to_u64(license); + attr.log_buf = ptr_to_u64(log_buf); + attr.log_size = log_buf_sz; + attr.log_level = 2; + log_buf[0] = 0; + attr.kern_version = kern_version; + attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0; + + return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); +} + int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags) { diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index edb4daeff7a5..972bd8333eb7 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -35,6 +35,10 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz); +int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns, + size_t insns_cnt, int strict_alignment, + const char *license, __u32 kern_version, + char *log_buf, size_t log_buf_sz); int bpf_map_update_elem(int fd, const void *key, const void *value, __u64 flags); diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 282a60368b14..5f66697fe1e0 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -192,7 +192,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, "complete_and_exit", "kvm_spurious_fault", "__reiserfs_panic", - "lbug_with_loc" + "lbug_with_loc", + "fortify_panic", }; if (func->bind == STB_WEAK) diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt index e6c9902c6d82..165c2b1d4317 100644 --- a/tools/perf/Documentation/perf-probe.txt +++ b/tools/perf/Documentation/perf-probe.txt @@ -240,9 +240,13 @@ Add a probe on schedule() function 12th line with recording cpu local variable: or ./perf probe --add='schedule:12 cpu' - this will add one or more probes which has the name start with "schedule". +Add one or more probes which has the name start with "schedule". - Add probes on lines in schedule() function which calls update_rq_clock(). + ./perf probe schedule* + or + ./perf probe --add='schedule*' + +Add probes on lines in schedule() function which calls update_rq_clock(). ./perf probe 'schedule;update_rq_clock*' or diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt index dfbb506d2c34..142606c0ec9c 100644 --- a/tools/perf/Documentation/perf-script-perl.txt +++ b/tools/perf/Documentation/perf-script-perl.txt @@ -39,7 +39,7 @@ EVENT HANDLERS When perf script is invoked using a trace script, a user-defined 'handler function' is called for each event in the trace. If there's no handler function defined for a given event type, the event is -ignored (or passed to a 'trace_handled' function, see below) and the +ignored (or passed to a 'trace_unhandled' function, see below) and the next event is processed. Most of the event's field values are passed as arguments to the diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt index 54acba221558..51ec2d20068a 100644 --- a/tools/perf/Documentation/perf-script-python.txt +++ b/tools/perf/Documentation/perf-script-python.txt @@ -149,10 +149,8 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu, print "id=%d, args=%s\n" % \ (id, args), -def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, - common_pid, common_comm): - print_header(event_name, common_cpu, common_secs, common_nsecs, - common_pid, common_comm) +def trace_unhandled(event_name, context, event_fields_dict): + print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) def print_header(event_name, cpu, secs, nsecs, pid, comm): print "%-20s %5u %05u.%09u %8u %-20s " % \ @@ -321,7 +319,7 @@ So those are the essential steps in writing and running a script. The process can be generalized to any tracepoint or set of tracepoints you're interested in - basically find the tracepoint(s) you're interested in by looking at the list of available events shown by -'perf list' and/or look in /sys/kernel/debug/tracing events for +'perf list' and/or look in /sys/kernel/debug/tracing/events/ for detailed event and field info, record the corresponding trace data using 'perf record', passing it the list of interesting events, generate a skeleton script using 'perf script -g python' and modify the @@ -334,7 +332,7 @@ right place, you can have your script listed alongside the other scripts listed by the 'perf script -l' command e.g.: ---- -root@tropicana:~# perf script -l +# perf script -l List of available trace scripts: wakeup-latency system-wide min/max/avg wakeup latency rw-by-file <comm> r/w activity for a program, by file @@ -383,8 +381,6 @@ source tree: ---- # ls -al kernel-source/tools/perf/scripts/python - -root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python total 32 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. @@ -399,7 +395,7 @@ otherwise your script won't show up at run-time), 'perf script -l' should show a new entry for your script: ---- -root@tropicana:~# perf script -l +# perf script -l List of available trace scripts: wakeup-latency system-wide min/max/avg wakeup latency rw-by-file <comm> r/w activity for a program, by file @@ -437,7 +433,7 @@ EVENT HANDLERS When perf script is invoked using a trace script, a user-defined 'handler function' is called for each event in the trace. If there's no handler function defined for a given event type, the event is -ignored (or passed to a 'trace_handled' function, see below) and the +ignored (or passed to a 'trace_unhandled' function, see below) and the next event is processed. Most of the event's field values are passed as arguments to the @@ -532,7 +528,7 @@ can implement a set of optional functions: gives scripts a chance to do setup tasks: ---- -def trace_begin: +def trace_begin(): pass ---- @@ -541,7 +537,7 @@ def trace_begin: as display results: ---- -def trace_end: +def trace_end(): pass ---- @@ -550,8 +546,7 @@ def trace_end: of common arguments are passed into it: ---- -def trace_unhandled(event_name, context, common_cpu, common_secs, - common_nsecs, common_pid, common_comm): +def trace_unhandled(event_name, context, event_fields_dict): pass ---- diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index cb0eda3925e6..3517e204a2b3 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -311,6 +311,10 @@ include::itrace.txt[] Set the maximum number of program blocks to print with brstackasm for each sample. +--inline:: + If a callgraph address belongs to an inlined function, the inline stack + will be printed. Each entry has function name and file/line. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-script-perl[1], diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 8354d04b392f..1f4fbc9a3292 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS) include $(srctree)/tools/scripts/Makefile.arch -$(call detected_var,ARCH) +$(call detected_var,SRCARCH) NO_PERF_REGS := 1 # Additional ARCH settings for ppc -ifeq ($(ARCH),powerpc) +ifeq ($(SRCARCH),powerpc) NO_PERF_REGS := 0 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 endif # Additional ARCH settings for x86 -ifeq ($(ARCH),x86) +ifeq ($(SRCARCH),x86) $(call detected,CONFIG_X86) ifeq (${IS_64_BIT}, 1) CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated @@ -43,12 +43,12 @@ ifeq ($(ARCH),x86) NO_PERF_REGS := 0 endif -ifeq ($(ARCH),arm) +ifeq ($(SRCARCH),arm) NO_PERF_REGS := 0 LIBUNWIND_LIBS = -lunwind -lunwind-arm endif -ifeq ($(ARCH),arm64) +ifeq ($(SRCARCH),arm64) NO_PERF_REGS := 0 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 endif @@ -61,7 +61,7 @@ endif # Disable it on all other architectures in case libdw unwind # support is detected in system. Add supported architectures # to the check. -ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) +ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm)) NO_LIBDW_DWARF_UNWIND := 1 endif @@ -115,9 +115,9 @@ endif FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf -FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi +FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi # include ARCH specific config --include $(src-perf)/arch/$(ARCH)/Makefile +-include $(src-perf)/arch/$(SRCARCH)/Makefile ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET @@ -228,12 +228,12 @@ ifeq ($(DEBUG),0) endif INC_FLAGS += -I$(src-perf)/util/include -INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include +INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include INC_FLAGS += -I$(srctree)/tools/include/uapi INC_FLAGS += -I$(srctree)/tools/include/ -INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi -INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ -INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ +INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi +INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/ +INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/ # $(obj-perf) for generated common-cmds.h # $(obj-perf)/util for generated bison/flex headers @@ -355,7 +355,7 @@ ifndef NO_LIBELF ifndef NO_DWARF ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) - msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); + msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled); NO_DWARF := 1 else CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) @@ -380,7 +380,7 @@ ifndef NO_LIBELF CFLAGS += -DHAVE_BPF_PROLOGUE $(call detected,CONFIG_BPF_PROLOGUE) else - msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); + msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset()); endif else msg := $(warning DWARF support is off, BPF prologue is disabled); @@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP endif endif -ifeq ($(ARCH),powerpc) +ifeq ($(SRCARCH),powerpc) ifndef NO_DWARF CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX endif @@ -487,7 +487,7 @@ else endif ifndef NO_LOCAL_LIBUNWIND - ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) + ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64)) $(call feature_check,libunwind-debug-frame) ifneq ($(feature-libunwind-debug-frame), 1) msg := $(warning No debug_frame support found in libunwind); @@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1) NO_PERF_READ_VDSO32 := 1 endif endif - ifneq ($(ARCH), x86) + ifneq ($(SRCARCH), x86) NO_PERF_READ_VDSOX32 := 1 endif ifndef NO_PERF_READ_VDSOX32 @@ -769,7 +769,7 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - ifeq ($(ARCH),x86) + ifeq ($(SRCARCH),x86) ifeq ($(feature-get_cpuid), 0) msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); NO_AUXTRACE := 1 @@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc ETC_PERFCONFIG = etc/perfconfig endif ifndef lib -ifeq ($(ARCH)$(IS_64_BIT), x861) +ifeq ($(SRCARCH)$(IS_64_BIT), x861) lib = lib64 else lib = lib diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 79fe31f20a17..5008f51a08a2 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -226,7 +226,7 @@ endif ifeq ($(config),0) include $(srctree)/tools/scripts/Makefile.arch --include arch/$(ARCH)/Makefile +-include arch/$(SRCARCH)/Makefile endif # The FEATURE_DUMP_EXPORT holds location of the actual diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build index 109eb75cf7de..d9b6af837c7d 100644 --- a/tools/perf/arch/Build +++ b/tools/perf/arch/Build @@ -1,2 +1,2 @@ libperf-y += common.o -libperf-y += $(ARCH)/ +libperf-y += $(SRCARCH)/ diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c index 837067f48a4c..6b40e9f01740 100644 --- a/tools/perf/arch/common.c +++ b/tools/perf/arch/common.c @@ -26,6 +26,7 @@ const char *const arm64_triplets[] = { const char *const powerpc_triplets[] = { "powerpc-unknown-linux-gnu-", + "powerpc-linux-gnu-", "powerpc64-unknown-linux-gnu-", "powerpc64-linux-gnu-", "powerpc64le-linux-gnu-", diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index d05aec491cff..4761b0d7fcb5 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv) "Enable kernel symbol demangling"), OPT_STRING(0, "time", &script.time_str, "str", "Time span of interest (start,stop)"), + OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, + "Show inline function"), OPT_END() }; const char * const script_subcommands[] = { "record", "report", NULL }; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a935b5023732..ad9324d1daf9 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1578,6 +1578,7 @@ static void print_header(int argc, const char **argv) static void print_footer(void) { FILE *output = stat_config.output; + int n; if (!null_run) fprintf(output, "\n"); @@ -1590,7 +1591,9 @@ static void print_footer(void) } fprintf(output, "\n\n"); - if (print_free_counters_hint) + if (print_free_counters_hint && + sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 && + n > 0) fprintf(output, "Some events weren't counted. Try disabling the NMI watchdog:\n" " echo 0 > /proc/sys/kernel/nmi_watchdog\n" diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index d014350adc52..4b2a5d298197 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -681,6 +681,10 @@ static struct syscall_fmt { { .name = "mlockall", .errmsg = true, .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, { .name = "mmap", .hexret = true, +/* The standard mmap maps to old_mmap on s390x */ +#if defined(__s390x__) + .alias = "old_mmap", +#endif .arg_scnprintf = { [0] = SCA_HEX, /* addr */ [2] = SCA_MMAP_PROT, /* prot */ [3] = SCA_MMAP_FLAGS, /* flags */ }, }, diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build index 9213a1273697..999a4e878162 100644 --- a/tools/perf/pmu-events/Build +++ b/tools/perf/pmu-events/Build @@ -2,7 +2,7 @@ hostprogs := jevents jevents-y += json.o jsmn.o jevents.o pmu-events-y += pmu-events.o -JDIR = pmu-events/arch/$(ARCH) +JDIR = pmu-events/arch/$(SRCARCH) JSON = $(shell [ -d $(JDIR) ] && \ find $(JDIR) -name '*.json' -o -name 'mapfile.csv') # @@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \ # directory and create tables in pmu-events.c. # $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) - $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) + $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index af58ebc243ef..84222bdb8689 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ $(Q)echo ';' >> $@ -ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o endif diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c index e7664fe3bd33..8ba2c4618fe9 100644 --- a/tools/perf/tests/bp_signal.c +++ b/tools/perf/tests/bp_signal.c @@ -288,3 +288,17 @@ int test__bp_signal(int subtest __maybe_unused) return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ? TEST_OK : TEST_FAIL; } + +bool test__bp_signal_is_supported(void) +{ +/* + * The powerpc so far does not have support to even create + * instruction breakpoint using the perf event interface. + * Once it's there we can release this. + */ +#ifdef __powerpc__ + return false; +#else + return true; +#endif +} diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 9e08d297f1a9..3ccfd58a8c3c 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -97,10 +97,12 @@ static struct test generic_tests[] = { { .desc = "Breakpoint overflow signal handler", .func = test__bp_signal, + .is_supported = test__bp_signal_is_supported, }, { .desc = "Breakpoint overflow sampling", .func = test__bp_signal_overflow, + .is_supported = test__bp_signal_is_supported, }, { .desc = "Number of exit events of a simple workload", @@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist) if (!perf_test__matches(t, curr, argc, argv)) continue; + if (t->is_supported && !t->is_supported()) { + pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc); + continue; + } + pr_info("%2d: %-*s:", i, width, t->desc); if (intlist__find(skiplist, i)) { diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 1f14e7612cbb..94b7c7b02bde 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, unsigned char buf2[BUFSZ]; size_t ret_len; u64 objdump_addr; + const char *objdump_name; + char decomp_name[KMOD_DECOMP_LEN]; int ret; pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); @@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, state->done[state->done_cnt++] = al.map->start; } + objdump_name = al.map->dso->long_name; + if (dso__needs_decompress(al.map->dso)) { + if (dso__decompress_kmodule_path(al.map->dso, objdump_name, + decomp_name, + sizeof(decomp_name)) < 0) { + pr_debug("decompression failed\n"); + return -1; + } + + objdump_name = decomp_name; + } + /* Read the object code using objdump */ objdump_addr = map__rip_2objdump(al.map, al.addr); - ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); + ret = read_via_objdump(objdump_name, objdump_addr, buf2, len); + + if (dso__needs_decompress(al.map->dso)) + unlink(objdump_name); + if (ret > 0) { /* * The kernel maps are inaccurate - assume objdump is right in diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 32873ec91a4e..cf00ebad2ef5 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused) evsel = perf_evlist__first(evlist); evsel->attr.task = 1; - evsel->attr.sample_freq = 0; + evsel->attr.sample_freq = 1; evsel->attr.inherit = 0; evsel->attr.watermark = 0; evsel->attr.wakeup_events = 1; diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index 631859629403..577363809c9b 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -34,6 +34,7 @@ struct test { int (*get_nr)(void); const char *(*get_desc)(int subtest); } subtest; + bool (*is_supported)(void); }; /* Tests */ @@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest); int test__clang_subtest_get_nr(void); int test__unit_number__scnprint(int subtest); +bool test__bp_signal_is_supported(void); + #if defined(__arm__) || defined(__aarch64__) #ifdef HAVE_DWARF_UNWIND_SUPPORT struct thread; diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 59addd52d9cd..ddb2c6fbdf91 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, return 0; ret = b->callchain->max_depth - a->callchain->max_depth; + if (callchain_param.order == ORDER_CALLER) + ret = -ret; } return ret; } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 683f8340460c..ddbd56df9187 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op const char *s = strchr(ops->raw, '+'); const char *c = strchr(ops->raw, ','); - if (c++ != NULL) + /* + * skip over possible up to 2 operands to get to address, e.g.: + * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0> + */ + if (c++ != NULL) { ops->target.addr = strtoull(c, NULL, 16); - else + if (!ops->target.addr) { + c = strchr(c, ','); + if (c++ != NULL) + ops->target.addr = strtoull(c, NULL, 16); + } + } else { ops->target.addr = strtoull(ops->raw, NULL, 16); + } if (s++ != NULL) { ops->target.offset = strtoull(s, NULL, 16); @@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op static int jump__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops) { + const char *c = strchr(ops->raw, ','); + if (!ops->target.addr || ops->target.offset < 0) return ins__raw_scnprintf(ins, bf, size, ops); - return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); + if (c != NULL) { + const char *c2 = strchr(c + 1, ','); + + /* check for 3-op insn */ + if (c2 != NULL) + c = c2; + c++; + + /* mirror arch objdump's space-after-comma style */ + if (*c == ' ') + c++; + } + + return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, + ins->name, c ? c - ops->raw : 0, ops->raw, + ops->target.offset); } static struct ins_ops jump_ops = { @@ -1294,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil char linkname[PATH_MAX]; char *build_id_filename; char *build_id_path = NULL; + char *pos; if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) @@ -1313,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil if (!build_id_path) return -1; - dirname(build_id_path); + /* + * old style build-id cache has name of XX/XXXXXXX.. while + * new style has XX/XXXXXXX../{elf,kallsyms,vdso}. + * extract the build-id part of dirname in the new style only. + */ + pos = strrchr(build_id_path, '/'); + if (pos && strlen(pos) < SBUILD_ID_SIZE - 2) + dirname(build_id_path); if (dso__is_kcore(dso) || readlink(build_id_path, linkname, sizeof(linkname)) < 0 || @@ -1396,31 +1431,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na sizeof(symfs_filename)); } } else if (dso__needs_decompress(dso)) { - char tmp[PATH_MAX]; - struct kmod_path m; - int fd; - bool ret; - - if (kmod_path__parse_ext(&m, symfs_filename)) - goto out; - - snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX"); - - fd = mkstemp(tmp); - if (fd < 0) { - free(m.ext); - goto out; - } - - ret = decompress_to_file(m.ext, symfs_filename, fd); - - if (ret) - pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename); - - free(m.ext); - close(fd); + char tmp[KMOD_DECOMP_LEN]; - if (!ret) + if (dso__decompress_kmodule_path(dso, symfs_filename, + tmp, sizeof(tmp)) < 0) goto out; strcpy(symfs_filename, tmp); @@ -1429,7 +1443,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na snprintf(command, sizeof(command), "%s %s%s --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 - " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand", + " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", objdump_path ? objdump_path : "objdump", disassembler_style ? "-M " : "", disassembler_style ? disassembler_style : "", diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 168cc49654e7..e0148b081bdf 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size) return bf; } -bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size) -{ - char *id_name = NULL, *ch; - struct stat sb; - char sbuild_id[SBUILD_ID_SIZE]; - - if (!dso->has_build_id) - goto err; - - build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); - id_name = build_id_cache__linkname(sbuild_id, NULL, 0); - if (!id_name) - goto err; - if (access(id_name, F_OK)) - goto err; - if (lstat(id_name, &sb) == -1) - goto err; - if ((size_t)sb.st_size > size - 1) - goto err; - if (readlink(id_name, bf, size - 1) < 0) - goto err; - - bf[sb.st_size] = '\0'; - - /* - * link should be: - * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92 - */ - ch = strrchr(bf, '/'); - if (!ch) - goto err; - if (ch - 3 < bf) - goto err; - - free(id_name); - return strncmp(".ko", ch - 3, 3) == 0; -err: - pr_err("Invalid build id: %s\n", id_name ? : - dso->long_name ? : - dso->short_name ? : - "[unknown]"); - free(id_name); - return false; -} - #define dsos__for_each_with_build_id(pos, head) \ list_for_each_entry(pos, head, node) \ if (!pos->has_build_id) \ diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h index 8a89b195c1fc..96690a55c62c 100644 --- a/tools/perf/util/build-id.h +++ b/tools/perf/util/build-id.h @@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf, size_t size); char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); -bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size); int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 81fc29ac798f..b4204b43ed58 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -621,14 +621,19 @@ enum match_result { static enum match_result match_chain_srcline(struct callchain_cursor_node *node, struct callchain_list *cnode) { - char *left = get_srcline(cnode->ms.map->dso, + char *left = NULL; + char *right = NULL; + enum match_result ret = MATCH_EQ; + int cmp; + + if (cnode->ms.map) + left = get_srcline(cnode->ms.map->dso, map__rip_2objdump(cnode->ms.map, cnode->ip), cnode->ms.sym, true, false); - char *right = get_srcline(node->map->dso, + if (node->map) + right = get_srcline(node->map->dso, map__rip_2objdump(node->map, node->ip), node->sym, true, false); - enum match_result ret = MATCH_EQ; - int cmp; if (left && right) cmp = strcmp(left, right); diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index a96a99d2369f..4e7ab611377a 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso) dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; } +static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf) +{ + int fd = -1; + struct kmod_path m; + + if (!dso__needs_decompress(dso)) + return -1; + + if (kmod_path__parse_ext(&m, dso->long_name)) + return -1; + + if (!m.comp) + goto out; + + fd = mkstemp(tmpbuf); + if (fd < 0) { + dso->load_errno = errno; + goto out; + } + + if (!decompress_to_file(m.ext, name, fd)) { + dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; + close(fd); + fd = -1; + } + +out: + free(m.ext); + return fd; +} + +int dso__decompress_kmodule_fd(struct dso *dso, const char *name) +{ + char tmpbuf[] = KMOD_DECOMP_NAME; + int fd; + + fd = decompress_kmodule(dso, name, tmpbuf); + unlink(tmpbuf); + return fd; +} + +int dso__decompress_kmodule_path(struct dso *dso, const char *name, + char *pathname, size_t len) +{ + char tmpbuf[] = KMOD_DECOMP_NAME; + int fd; + + fd = decompress_kmodule(dso, name, tmpbuf); + if (fd < 0) { + unlink(tmpbuf); + return -1; + } + + strncpy(pathname, tmpbuf, len); + close(fd); + return 0; +} + /* * Parses kernel module specified in @path and updates * @m argument like: @@ -335,6 +393,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, return 0; } +void dso__set_module_info(struct dso *dso, struct kmod_path *m, + struct machine *machine) +{ + if (machine__is_host(machine)) + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; + else + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; + + /* _KMODULE_COMP should be next to _KMODULE */ + if (m->kmod && m->comp) + dso->symtab_type++; + + dso__set_short_name(dso, strdup(m->name), true); +} + /* * Global list of open DSOs and the counter. */ @@ -381,7 +454,7 @@ static int do_open(char *name) static int __open_dso(struct dso *dso, struct machine *machine) { - int fd; + int fd = -EINVAL; char *root_dir = (char *)""; char *name = malloc(PATH_MAX); @@ -392,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine) root_dir = machine->root_dir; if (dso__read_binary_type_filename(dso, dso->binary_type, - root_dir, name, PATH_MAX)) { - free(name); - return -EINVAL; - } + root_dir, name, PATH_MAX)) + goto out; if (!is_regular_file(name)) - return -EINVAL; + goto out; + + if (dso__needs_decompress(dso)) { + char newpath[KMOD_DECOMP_LEN]; + size_t len = sizeof(newpath); + + if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { + fd = -dso->load_errno; + goto out; + } + + strcpy(name, newpath); + } fd = do_open(name); + + if (dso__needs_decompress(dso)) + unlink(name); + +out: free(name); return fd; } diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index 12350b171727..bd061ba7b47c 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext); bool is_kernel_module(const char *pathname, int cpumode); bool decompress_to_file(const char *ext, const char *filename, int output_fd); bool dso__needs_decompress(struct dso *dso); +int dso__decompress_kmodule_fd(struct dso *dso, const char *name); +int dso__decompress_kmodule_path(struct dso *dso, const char *name, + char *pathname, size_t len); + +#define KMOD_DECOMP_NAME "/tmp/perf-kmod-XXXXXX" +#define KMOD_DECOMP_LEN sizeof(KMOD_DECOMP_NAME) struct kmod_path { char *name; @@ -259,6 +265,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, #define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false) #define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true) +void dso__set_module_info(struct dso *dso, struct kmod_path *m, + struct machine *machine); + /* * The dso__data_* external interface provides following functions: * dso__data_get_fd diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index e4f7902d5afa..cda44b0e821c 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void) struct perf_evsel *evsel; event_attr_init(&attr); + /* + * Unnamed union member, not supported as struct member named + * initializer in older compilers such as gcc 4.4.7 + * + * Just for probing the precise_ip: + */ + attr.sample_period = 1; perf_event_attr__set_max_precise_ip(&attr); + /* + * Now let the usual logic to set up the perf_event_attr defaults + * to kick in when we return and before perf_evsel__open() is called. + */ + attr.sample_period = 0; evsel = perf_evsel__new(&attr); if (evsel == NULL) diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c index e415aee6a245..583f3a602506 100644 --- a/tools/perf/util/evsel_fprintf.c +++ b/tools/perf/util/evsel_fprintf.c @@ -7,6 +7,7 @@ #include "map.h" #include "strlist.h" #include "symbol.h" +#include "srcline.h" static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) { @@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, if (!print_oneline) printed += fprintf(fp, "\n"); + if (symbol_conf.inline_name && node->map) { + struct inline_node *inode; + + addr = map__rip_2objdump(node->map, node->ip), + inode = dso__parse_addr_inlines(node->map->dso, addr); + + if (inode) { + struct inline_list *ilist; + + list_for_each_entry(ilist, &inode->val, list) { + if (print_arrow) + printed += fprintf(fp, " <-"); + + /* IP is same, just skip it */ + if (print_ip) + printed += fprintf(fp, "%c%16s", + s, ""); + if (print_sym) + printed += fprintf(fp, " %s", + ilist->funcname); + if (print_srcline) + printed += fprintf(fp, "\n %s:%d", + ilist->filename, + ilist->line_nr); + if (!print_oneline) + printed += fprintf(fp, "\n"); + } + + inline_node__delete(inode); + } + } + if (symbol_conf.bt_stop_list && node->sym && strlist__has_entry(symbol_conf.bt_stop_list, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 314a07151fb7..b5baff3007bb 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused, /* * default get_cpuid(): nothing gets recorded - * actual implementation must be in arch/$(ARCH)/util/header.c + * actual implementation must be in arch/$(SRCARCH)/util/header.c */ int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) { @@ -1469,8 +1469,16 @@ static int __event_process_build_id(struct build_id_event *bev, dso__set_build_id(dso, &bev->build_id); - if (!is_kernel_module(filename, cpumode)) - dso->kernel = dso_type; + if (dso_type != DSO_TYPE_USER) { + struct kmod_path m = { .name = NULL, }; + + if (!kmod_path__parse_name(&m, filename) && m.kmod) + dso__set_module_info(dso, &m, machine); + else + dso->kernel = dso_type; + + free(m.name); + } build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index d97e014c3df3..d7f31cb0a4cb 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine, if (dso == NULL) goto out_unlock; - if (machine__is_host(machine)) - dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; - else - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; - - /* _KMODULE_COMP should be next to _KMODULE */ - if (m->kmod && m->comp) - dso->symtab_type++; - - dso__set_short_name(dso, strdup(m->name), true); + dso__set_module_info(dso, m, machine); dso__set_long_name(dso, strdup(filename), true); } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 9d92af7d0718..40de3cb40d21 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -1219,7 +1219,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile) fprintf(ofp, "# be retrieved using Python functions of the form " "common_*(context).\n"); - fprintf(ofp, "# See the perf-trace-python Documentation for the list " + fprintf(ofp, "# See the perf-script-python Documentation for the list " "of available functions.\n\n"); fprintf(ofp, "import os\n"); diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index df051a52393c..ebc88a74e67b 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c @@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr, } } - list_add_tail(&ilist->list, &node->val); + if (callchain_param.order == ORDER_CALLEE) + list_add_tail(&ilist->list, &node->val); + else + list_add(&ilist->list, &node->val); return 0; } @@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l) #define MAX_INLINE_NEST 1024 -static void inline_list__reverse(struct inline_node *node) +static int inline_list__append_dso_a2l(struct dso *dso, + struct inline_node *node) { - struct inline_list *ilist, *n; + struct a2l_data *a2l = dso->a2l; + char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL; + char *filename = a2l->filename ? strdup(a2l->filename) : NULL; - list_for_each_entry_safe_reverse(ilist, n, &node->val, list) - list_move_tail(&ilist->list, &node->val); + return inline_list__append(filename, funcname, a2l->line, node, dso); } static int addr2line(const char *dso_name, u64 addr, @@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr, bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); - if (a2l->found && unwind_inlines) { + if (!a2l->found) + return 0; + + if (unwind_inlines) { int cnt = 0; + if (node && inline_list__append_dso_a2l(dso, node)) + return 0; + while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, &a2l->funcname, &a2l->line) && cnt++ < MAX_INLINE_NEST) { if (node != NULL) { - if (inline_list__append(strdup(a2l->filename), - strdup(a2l->funcname), - a2l->line, node, - dso) != 0) + if (inline_list__append_dso_a2l(dso, node)) return 0; + // found at least one inline frame + ret = 1; } } + } - if ((node != NULL) && - (callchain_param.order != ORDER_CALLEE)) { - inline_list__reverse(node); - } + if (file) { + *file = a2l->filename ? strdup(a2l->filename) : NULL; + ret = *file ? 1 : 0; } - if (a2l->found && a2l->filename) { - *file = strdup(a2l->filename); + if (line) *line = a2l->line; - if (*file) - ret = 1; - } - return ret; } @@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso) static struct inline_node *addr2inlines(const char *dso_name, u64 addr, struct dso *dso) { - char *file = NULL; - unsigned int line = 0; struct inline_node *node; node = zalloc(sizeof(*node)); @@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr, INIT_LIST_HEAD(&node->val); node->addr = addr; - if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node)) + if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node)) goto out_free_inline_node; if (list_empty(&node->val)) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index e7ee47f7377a..502505cf236a 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -637,43 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata) return 0; } -static int decompress_kmodule(struct dso *dso, const char *name, - enum dso_binary_type type) -{ - int fd = -1; - char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; - struct kmod_path m; - - if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && - type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && - type != DSO_BINARY_TYPE__BUILD_ID_CACHE) - return -1; - - if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE) - name = dso->long_name; - - if (kmod_path__parse_ext(&m, name) || !m.comp) - return -1; - - fd = mkstemp(tmpbuf); - if (fd < 0) { - dso->load_errno = errno; - goto out; - } - - if (!decompress_to_file(m.ext, name, fd)) { - dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; - close(fd); - fd = -1; - } - - unlink(tmpbuf); - -out: - free(m.ext); - return fd; -} - bool symsrc__possibly_runtime(struct symsrc *ss) { return ss->dynsym || ss->opdsec; @@ -705,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, int fd; if (dso__needs_decompress(dso)) { - fd = decompress_kmodule(dso, name, type); + fd = dso__decompress_kmodule_fd(dso, name); if (fd < 0) return -1; + + type = dso->symtab_type; } else { fd = open(name, O_RDONLY); if (fd < 0) { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 8f2b068ff756..e7a98dbd2aed 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map) if (!runtime_ss && syms_ss) runtime_ss = syms_ss; - if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE) - if (dso__build_id_is_kmod(dso, name, PATH_MAX)) - kmod = true; - if (syms_ss) ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); else diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index f90e11a555b2..7755a5e0fe5e 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip, return 0; mod = dwfl_addrmodule(ui->dwfl, ip); + if (mod) { + Dwarf_Addr s; + + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); + if (s != al->map->start) + mod = 0; + } + if (!mod) mod = dwfl_report_elf(ui->dwfl, dso->short_name, dso->long_name, -1, al->map->start, @@ -168,12 +176,24 @@ frame_callback(Dwfl_Frame *state, void *arg) { struct unwind_info *ui = arg; Dwarf_Addr pc; + bool isactivation; if (!dwfl_frame_pc(state, &pc, NULL)) { pr_err("%s", dwfl_errmsg(-1)); return DWARF_CB_ABORT; } + // report the module before we query for isactivation + report_module(pc, ui); + + if (!dwfl_frame_pc(state, &pc, &isactivation)) { + pr_err("%s", dwfl_errmsg(-1)); + return DWARF_CB_ABORT; + } + + if (!isactivation) + --pc; + return entry(pc, ui) || !(--ui->max_stack) ? DWARF_CB_ABORT : DWARF_CB_OK; } @@ -220,7 +240,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui); - if (err && !ui->max_stack) + if (err && ui->max_stack != max_stack) err = 0; /* diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index f8455bed6e65..672c2ada9357 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, while (!ret && (unw_step(&c) > 0) && i < max_stack) { unw_get_reg(&c, UNW_REG_IP, &ips[i]); + + /* + * Decrement the IP for any non-activation frames. + * this is required to properly find the srcline + * for caller frames. + * See also the documentation for dwfl_frame_pc(), + * which this code tries to replicate. + */ + if (unw_is_signal_frame(&c) <= 0) + --ips[i]; + ++i; } diff --git a/tools/power/acpi/.gitignore b/tools/power/acpi/.gitignore new file mode 100644 index 000000000000..cba3d994995c --- /dev/null +++ b/tools/power/acpi/.gitignore @@ -0,0 +1,4 @@ +acpidbg +acpidump +ec +include diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 91edd0566237..f389b02d43a0 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -11,7 +11,8 @@ endif CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include LDLIBS += -lcap -lelf -TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs +TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ + test_align TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o @@ -34,6 +35,7 @@ $(BPFOBJ): force CLANG ?= clang %.o: %.c - $(CLANG) -I. -I../../../include/uapi -I../../../../samples/bpf/ \ + $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ + -I../../../../samples/bpf/ \ -Wno-compare-distinct-pointer-types \ -O2 -target bpf -c $< -o $@ diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h index 19d0604f8694..487cbfb89beb 100644 --- a/tools/testing/selftests/bpf/bpf_endian.h +++ b/tools/testing/selftests/bpf/bpf_endian.h @@ -1,23 +1,42 @@ #ifndef __BPF_ENDIAN__ #define __BPF_ENDIAN__ -#include <asm/byteorder.h> +#include <linux/swab.h> -#if __BYTE_ORDER == __LITTLE_ENDIAN -# define __bpf_ntohs(x) __builtin_bswap16(x) -# define __bpf_htons(x) __builtin_bswap16(x) -#elif __BYTE_ORDER == __BIG_ENDIAN -# define __bpf_ntohs(x) (x) -# define __bpf_htons(x) (x) +/* LLVM's BPF target selects the endianness of the CPU + * it compiles on, or the user specifies (bpfel/bpfeb), + * respectively. The used __BYTE_ORDER__ is defined by + * the compiler, we cannot rely on __BYTE_ORDER from + * libc headers, since it doesn't reflect the actual + * requested byte order. + * + * Note, LLVM's BPF target has different __builtin_bswapX() + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE + * in bpfel and bpfeb case, which means below, that we map + * to cpu_to_be16(). We could use it unconditionally in BPF + * case, but better not rely on it, so that this header here + * can be used from application and BPF program side, which + * use different targets. + */ +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +# define __bpf_constant_ntohs(x) ___constant_swab16(x) +# define __bpf_constant_htons(x) ___constant_swab16(x) +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +# define __bpf_constant_ntohs(x) (x) +# define __bpf_constant_htons(x) (x) #else -# error "Fix your __BYTE_ORDER?!" +# error "Fix your compiler's __BYTE_ORDER__?!" #endif #define bpf_htons(x) \ (__builtin_constant_p(x) ? \ - __constant_htons(x) : __bpf_htons(x)) + __bpf_constant_htons(x) : __bpf_htons(x)) #define bpf_ntohs(x) \ (__builtin_constant_p(x) ? \ - __constant_ntohs(x) : __bpf_ntohs(x)) + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) -#endif +#endif /* __BPF_ENDIAN__ */ diff --git a/tools/testing/selftests/bpf/include/uapi/linux/types.h b/tools/testing/selftests/bpf/include/uapi/linux/types.h new file mode 100644 index 000000000000..51841848fbfe --- /dev/null +++ b/tools/testing/selftests/bpf/include/uapi/linux/types.h @@ -0,0 +1,22 @@ +#ifndef _UAPI_LINUX_TYPES_H +#define _UAPI_LINUX_TYPES_H + +#include <asm-generic/int-ll64.h> + +/* copied from linux:include/uapi/linux/types.h */ +#define __bitwise +typedef __u16 __bitwise __le16; +typedef __u16 __bitwise __be16; +typedef __u32 __bitwise __le32; +typedef __u32 __bitwise __be32; +typedef __u64 __bitwise __le64; +typedef __u64 __bitwise __be64; + +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; + +#define __aligned_u64 __u64 __attribute__((aligned(8))) +#define __aligned_be64 __be64 __attribute__((aligned(8))) +#define __aligned_le64 __le64 __attribute__((aligned(8))) + +#endif /* _UAPI_LINUX_TYPES_H */ diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c new file mode 100644 index 000000000000..9644d4e069de --- /dev/null +++ b/tools/testing/selftests/bpf/test_align.c @@ -0,0 +1,453 @@ +#include <asm/types.h> +#include <linux/types.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <stddef.h> +#include <stdbool.h> + +#include <linux/unistd.h> +#include <linux/filter.h> +#include <linux/bpf_perf_event.h> +#include <linux/bpf.h> + +#include <bpf/bpf.h> + +#include "../../../include/linux/filter.h" + +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define MAX_INSNS 512 +#define MAX_MATCHES 16 + +struct bpf_align_test { + const char *descr; + struct bpf_insn insns[MAX_INSNS]; + enum { + UNDEF, + ACCEPT, + REJECT + } result; + enum bpf_prog_type prog_type; + const char *matches[MAX_MATCHES]; +}; + +static struct bpf_align_test tests[] = { + { + .descr = "mov", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_3, 16), + BPF_MOV64_IMM(BPF_REG_3, 32), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "1: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", + "2: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", + "3: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", + "4: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", + "5: R1=ctx R3=imm32,min_value=32,max_value=32,min_align=32 R10=fp", + }, + }, + { + .descr = "shift", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_4, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "1: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", + "2: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", + "3: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", + "4: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", + "5: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", + "6: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", + "7: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm32,min_value=32,max_value=32,min_align=32 R10=fp", + "8: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm16,min_value=16,max_value=16,min_align=16 R10=fp", + "9: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", + "10: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm4,min_value=4,max_value=4,min_align=4 R10=fp", + "11: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm2,min_value=2,max_value=2,min_align=2 R10=fp", + }, + }, + { + .descr = "addsub", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "1: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", + "2: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=4 R10=fp", + "3: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R10=fp", + "4: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", + "5: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm12,min_value=12,max_value=12,min_align=4 R10=fp", + "6: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm14,min_value=14,max_value=14,min_align=2 R10=fp", + }, + }, + { + .descr = "mul", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 7), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "1: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", + "2: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", + "3: R1=ctx R3=imm14,min_value=14,max_value=14,min_align=2 R10=fp", + "4: R1=ctx R3=imm56,min_value=56,max_value=56,min_align=4 R10=fp", + }, + }, + +#define PREP_PKT_POINTERS \ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ + offsetof(struct __sk_buff, data)), \ + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ + offsetof(struct __sk_buff, data_end)) + +#define LOAD_UNKNOWN(DST_REG) \ + PREP_PKT_POINTERS, \ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \ + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \ + BPF_EXIT_INSN(), \ + BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0) + + { + .descr = "unknown shift", + .insns = { + LOAD_UNKNOWN(BPF_REG_3), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1), + LOAD_UNKNOWN(BPF_REG_4), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", + "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv55,min_align=2 R10=fp", + "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv54,min_align=4 R10=fp", + "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv53,min_align=8 R10=fp", + "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv52,min_align=16 R10=fp", + "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv56 R10=fp", + "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv51,min_align=32 R10=fp", + "20: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv52,min_align=16 R10=fp", + "21: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv53,min_align=8 R10=fp", + "22: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv54,min_align=4 R10=fp", + "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv55,min_align=2 R10=fp", + }, + }, + { + .descr = "unknown mul", + .insns = { + LOAD_UNKNOWN(BPF_REG_3), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_3), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", + "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", + "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv55,min_align=1 R10=fp", + "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", + "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv54,min_align=2 R10=fp", + "12: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", + "13: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv53,min_align=4 R10=fp", + "14: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", + "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv52,min_align=8 R10=fp", + "16: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv50,min_align=8 R10=fp" + }, + }, + { + .descr = "packet const offset", + .insns = { + PREP_PKT_POINTERS, + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + + /* Skip over ethernet header. */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3), + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + "4: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=0,r=0) R10=fp", + "5: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=14,r=0) R10=fp", + "6: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R4=pkt(id=0,off=14,r=0) R5=pkt(id=0,off=14,r=0) R10=fp", + "10: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv56 R5=pkt(id=0,off=14,r=18) R10=fp", + "14: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", + "15: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", + }, + }, + { + .descr = "packet variable offset", + .insns = { + LOAD_UNKNOWN(BPF_REG_6), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + + /* First, add a constant to the R5 packet pointer, + * then a variable with a known alignment. + */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + /* Now, test in the other direction. Adding first + * the variable offset to R5, then the constant. + */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + /* Test multiple accumulations of unknown values + * into a packet pointer. + */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R6=inv54,min_align=4 R10=fp", + + /* Offset is added to packet pointer R5, resulting in known + * auxiliary alignment and offset. + */ + "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R5=pkt(id=1,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + + /* At the time the word size load is performed from R5, + * it's total offset is NET_IP_ALIGN + reg->off (0) + + * reg->aux_off (14) which is 16. Then the variable + * offset is considered using reg->aux_off_align which + * is 4 and meets the load's requirements. + */ + "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=1,off=4,r=4),aux_off=14,aux_off_align=4 R5=pkt(id=1,off=0,r=4),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + + + /* Variable offset is added to R5 packet pointer, + * resulting in auxiliary alignment of 4. + */ + "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=0,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", + + /* Constant offset is added to R5, resulting in + * reg->off of 14. + */ + "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=14,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", + + /* At the time the word size load is performed from R5, + * it's total offset is NET_IP_ALIGN + reg->off (14) which + * is 16. Then the variable offset is considered using + * reg->aux_off_align which is 4 and meets the load's + * requirements. + */ + "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=2,off=18,r=18),aux_off_align=4 R5=pkt(id=2,off=14,r=18),aux_off_align=4 R6=inv54,min_align=4 R10=fp", + + /* Constant offset is added to R5 packet pointer, + * resulting in reg->off value of 14. + */ + "26: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=0,off=14,r=8) R6=inv54,min_align=4 R10=fp", + /* Variable offset is added to R5, resulting in an + * auxiliary offset of 14, and an auxiliary alignment of 4. + */ + "27: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + /* Constant is added to R5 again, setting reg->off to 4. */ + "28: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=4,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + /* And once more we add a variable, which causes an accumulation + * of reg->off into reg->aux_off_align, with resulting value of + * 18. The auxiliary alignment stays at 4. + */ + "29: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=4,off=0,r=0),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + /* At the time the word size load is performed from R5, + * it's total offset is NET_IP_ALIGN + reg->off (0) + + * reg->aux_off (18) which is 20. Then the variable offset + * is considered using reg->aux_off_align which is 4 and meets + * the load's requirements. + */ + "33: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=4,off=4,r=4),aux_off=18,aux_off_align=4 R5=pkt(id=4,off=0,r=4),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + }, + }, +}; + +static int probe_filter_length(const struct bpf_insn *fp) +{ + int len; + + for (len = MAX_INSNS - 1; len > 0; --len) + if (fp[len].code != 0 || fp[len].imm != 0) + break; + return len + 1; +} + +static char bpf_vlog[32768]; + +static int do_test_single(struct bpf_align_test *test) +{ + struct bpf_insn *prog = test->insns; + int prog_type = test->prog_type; + int prog_len, i; + int fd_prog; + int ret; + + prog_len = probe_filter_length(prog); + fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, + prog, prog_len, 1, "GPL", 0, + bpf_vlog, sizeof(bpf_vlog)); + if (fd_prog < 0) { + printf("Failed to load program.\n"); + printf("%s", bpf_vlog); + ret = 1; + } else { + ret = 0; + for (i = 0; i < MAX_MATCHES; i++) { + const char *t, *m = test->matches[i]; + + if (!m) + break; + t = strstr(bpf_vlog, m); + if (!t) { + printf("Failed to find match: %s\n", m); + ret = 1; + printf("%s", bpf_vlog); + break; + } + } + close(fd_prog); + } + return ret; +} + +static int do_test(unsigned int from, unsigned int to) +{ + int all_pass = 0; + int all_fail = 0; + unsigned int i; + + for (i = from; i < to; i++) { + struct bpf_align_test *test = &tests[i]; + int fail; + + printf("Test %3d: %s ... ", + i, test->descr); + fail = do_test_single(test); + if (fail) { + all_fail++; + printf("FAIL\n"); + } else { + all_pass++; + printf("PASS\n"); + } + } + printf("Results: %d pass %d fail\n", + all_pass, all_fail); + return 0; +} + +int main(int argc, char **argv) +{ + unsigned int from = 0, to = ARRAY_SIZE(tests); + + if (argc == 3) { + unsigned int l = atoi(argv[argc - 2]); + unsigned int u = atoi(argv[argc - 1]); + + if (l < to && u < to) { + from = l; + to = u + 1; + } + } else if (argc == 2) { + unsigned int t = atoi(argv[argc - 1]); + + if (t < to) { + from = t; + to = t + 1; + } + } + return do_test(from, to); +} diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c index 39387bb7e08c..6e11ba11709e 100644 --- a/tools/testing/selftests/bpf/test_pkt_access.c +++ b/tools/testing/selftests/bpf/test_pkt_access.c @@ -5,6 +5,7 @@ * License as published by the Free Software Foundation. */ #include <stddef.h> +#include <string.h> #include <linux/bpf.h> #include <linux/if_ether.h> #include <linux/if_packet.h> diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3773562056da..cabb19b1e371 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -49,6 +49,7 @@ #define MAX_NR_MAPS 4 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) +#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) struct bpf_test { const char *descr; @@ -2615,6 +2616,30 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "direct packet access: test17 (pruning, alignment)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), + BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_A(-6), + }, + .errstr = "misaligned packet access off 2+15+-4 size 4", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, + }, + { "helper access to packet: test1, valid packet_ptr range", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -3341,6 +3366,70 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS }, { + "alu ops on ptr_to_map_value_or_null, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "R4 invalid mem access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS + }, + { + "alu ops on ptr_to_map_value_or_null, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "R4 invalid mem access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS + }, + { + "alu ops on ptr_to_map_value_or_null, 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 4 }, + .errstr = "R4 invalid mem access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS + }, + { "invalid memory access with multiple map_lookup_elem calls", .insns = { BPF_MOV64_IMM(BPF_REG_1, 10), @@ -4937,7 +5026,149 @@ static struct bpf_test tests[] = { .fixup_map_in_map = { 3 }, .errstr = "R1 type=map_value_or_null expected=map_ptr", .result = REJECT, - } + }, + { + "ld_abs: check calling conv, r1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r3", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .errstr = "R3 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_EXIT_INSN(), + }, + .errstr = "R4 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r5", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .errstr = "R5 !read_ok", + .result = REJECT, + }, + { + "ld_abs: check calling conv, r7", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_7, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "ld_ind: check calling conv, r1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r3", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .errstr = "R3 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_EXIT_INSN(), + }, + .errstr = "R4 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r5", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_5, 1), + BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .errstr = "R5 !read_ok", + .result = REJECT, + }, + { + "ld_ind: check calling conv, r7", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -5059,9 +5290,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv, do_test_fixup(test, prog, map_fds); - fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, - prog, prog_len, "GPL", 0, bpf_vlog, - sizeof(bpf_vlog)); + fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, + prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT, + "GPL", 0, bpf_vlog, sizeof(bpf_vlog)); expected_ret = unpriv && test->result_unpriv != UNDEF ? test->result_unpriv : test->result; diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest index 32e6211e1c6e..717581145cfc 100755 --- a/tools/testing/selftests/ftrace/ftracetest +++ b/tools/testing/selftests/ftrace/ftracetest @@ -58,7 +58,7 @@ parse_opts() { # opts ;; --verbose|-v|-vv) VERBOSE=$((VERBOSE + 1)) - [ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1)) + [ $1 = '-vv' ] && VERBOSE=$((VERBOSE + 1)) shift 1 ;; --debug|-d) diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc index 07bb3e5930b4..aa31368851c9 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc @@ -48,7 +48,7 @@ test_event_enabled() { e=`cat $EVENT_ENABLE` if [ "$e" != $val ]; then echo "Expected $val but found $e" - exit -1 + exit 1 fi } diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index 9aec6fcb7729..f2019b37370d 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -34,10 +34,10 @@ reset_ftrace_filter() { # reset all triggers in set_ftrace_filter echo > set_ftrace_filter grep -v '^#' set_ftrace_filter | while read t; do tr=`echo $t | cut -d: -f2` - if [ "$tr" == "" ]; then + if [ "$tr" = "" ]; then continue fi - if [ $tr == "enable_event" -o $tr == "disable_event" ]; then + if [ $tr = "enable_event" -o $tr = "disable_event" ]; then tr=`echo $t | cut -d: -f1-4` limit=`echo $t | cut -d: -f5` else diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc index 4c5a061a5b4e..c73db7863adb 100644 --- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc +++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc @@ -75,9 +75,13 @@ rmdir foo if [ -d foo ]; then fail "foo still exists" fi -exit 0 - +mkdir foo +echo "schedule:enable_event:sched:sched_switch" > foo/set_ftrace_filter +rmdir foo +if [ -d foo ]; then + fail "foo still exists" +fi instance_slam() { diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc new file mode 100644 index 000000000000..f4d1ff785d67 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc @@ -0,0 +1,21 @@ +#!/bin/sh +# description: Register/unregister many kprobe events + +# ftrace fentry skip size depends on the machine architecture. +# Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc +case `uname -m` in + x86_64|i[3456]86) OFFS=5;; + ppc*) OFFS=4;; + *) OFFS=0;; +esac + +echo "Setup up to 256 kprobes" +grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \ +head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||: + +echo 1 > events/kprobes/enable +echo 0 > events/kprobes/enable +echo > kprobe_events +echo "Waiting for unoptimizing & freeing" +sleep 5 +echo "Done" diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore index 427621792229..2f1f7b013293 100644 --- a/tools/testing/selftests/powerpc/tm/.gitignore +++ b/tools/testing/selftests/powerpc/tm/.gitignore @@ -11,3 +11,4 @@ tm-signal-context-chk-fpu tm-signal-context-chk-gpr tm-signal-context-chk-vmx tm-signal-context-chk-vsx +tm-vmx-unavail diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index 5576ee6a51f2..958c11c14acd 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -2,7 +2,8 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu tm-signal-context-chk-vmx tm-signal-context-chk-vsx TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \ - tm-vmxcopy tm-fork tm-tar tm-tmspr $(SIGNAL_CONTEXT_CHK_TESTS) + tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail \ + $(SIGNAL_CONTEXT_CHK_TESTS) include ../../lib.mk @@ -13,6 +14,7 @@ CFLAGS += -mhtm $(OUTPUT)/tm-syscall: tm-syscall-asm.S $(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include $(OUTPUT)/tm-tmspr: CFLAGS += -pthread +$(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64 SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS)) $(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c index d9c49f41515e..e79ccd6aada1 100644 --- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c @@ -42,12 +42,12 @@ int test_body(void) printf("Check DSCR TM context switch: "); fflush(stdout); for (;;) { - rv = 1; asm __volatile__ ( /* set a known value into the DSCR */ "ld 3, %[dscr1];" "mtspr %[sprn_dscr], 3;" + "li %[rv], 1;" /* start and suspend a transaction */ "tbegin.;" "beq 1f;" diff --git a/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c new file mode 100644 index 000000000000..137185ba4937 --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c @@ -0,0 +1,118 @@ +/* + * Copyright 2017, Michael Neuling, IBM Corp. + * Licensed under GPLv2. + * Original: Breno Leitao <brenohl@br.ibm.com> & + * Gustavo Bueno Romero <gromero@br.ibm.com> + * Edited: Michael Neuling + * + * Force VMX unavailable during a transaction and see if it corrupts + * the checkpointed VMX register state after the abort. + */ + +#include <inttypes.h> +#include <htmintrin.h> +#include <string.h> +#include <stdlib.h> +#include <stdio.h> +#include <pthread.h> +#include <sys/mman.h> +#include <unistd.h> +#include <pthread.h> + +#include "tm.h" +#include "utils.h" + +int passed; + +void *worker(void *unused) +{ + __int128 vmx0; + uint64_t texasr; + + asm goto ( + "li 3, 1;" /* Stick non-zero value in VMX0 */ + "std 3, 0(%[vmx0_ptr]);" + "lvx 0, 0, %[vmx0_ptr];" + + /* Wait here a bit so we get scheduled out 255 times */ + "lis 3, 0x3fff;" + "1: ;" + "addi 3, 3, -1;" + "cmpdi 3, 0;" + "bne 1b;" + + /* Kernel will hopefully turn VMX off now */ + + "tbegin. ;" + "beq failure;" + + /* Cause VMX unavail. Any VMX instruction */ + "vaddcuw 0,0,0;" + + "tend. ;" + "b %l[success];" + + /* Check VMX0 sanity after abort */ + "failure: ;" + "lvx 1, 0, %[vmx0_ptr];" + "vcmpequb. 2, 0, 1;" + "bc 4, 24, %l[value_mismatch];" + "b %l[value_match];" + : + : [vmx0_ptr] "r"(&vmx0) + : "r3" + : success, value_match, value_mismatch + ); + + /* HTM aborted and VMX0 is corrupted */ +value_mismatch: + texasr = __builtin_get_texasr(); + + printf("\n\n==============\n\n"); + printf("Failure with error: %lx\n", _TEXASR_FAILURE_CODE(texasr)); + printf("Summary error : %lx\n", _TEXASR_FAILURE_SUMMARY(texasr)); + printf("TFIAR exact : %lx\n\n", _TEXASR_TFIAR_EXACT(texasr)); + + passed = 0; + return NULL; + + /* HTM aborted but VMX0 is correct */ +value_match: +// printf("!"); + return NULL; + +success: +// printf("."); + return NULL; +} + +int tm_vmx_unavail_test() +{ + int threads; + pthread_t *thread; + + SKIP_IF(!have_htm()); + + passed = 1; + + threads = sysconf(_SC_NPROCESSORS_ONLN) * 4; + thread = malloc(sizeof(pthread_t)*threads); + if (!thread) + return EXIT_FAILURE; + + for (uint64_t i = 0; i < threads; i++) + pthread_create(&thread[i], NULL, &worker, NULL); + + for (uint64_t i = 0; i < threads; i++) + pthread_join(thread[i], NULL); + + free(thread); + + return passed ? EXIT_SUCCESS : EXIT_FAILURE; +} + + +int main(int argc, char **argv) +{ + return test_harness(tm_vmx_unavail_test, "tm_vmx_unavail_test"); +} diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index f659c146cdc8..9bd2cd71645d 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c @@ -7,6 +7,7 @@ #include <limits.h> #include <netdb.h> #include <libudev.h> +#include <dirent.h> #include "sysfs_utils.h" #undef PROGNAME @@ -35,18 +36,11 @@ err: return NULL; } - - static int parse_status(const char *value) { int ret = 0; char *c; - - for (int i = 0; i < vhci_driver->nports; i++) - memset(&vhci_driver->idev[i], 0, sizeof(vhci_driver->idev[i])); - - /* skip a header line */ c = strchr(value, '\n'); if (!c) @@ -57,9 +51,11 @@ static int parse_status(const char *value) int port, status, speed, devid; unsigned long socket; char lbusid[SYSFS_BUS_ID_SIZE]; + struct usbip_imported_device *idev; + char hub[3]; - ret = sscanf(c, "%d %d %d %x %lx %31s\n", - &port, &status, &speed, + ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", + hub, &port, &status, &speed, &devid, &socket, lbusid); if (ret < 5) { @@ -67,34 +63,36 @@ static int parse_status(const char *value) BUG(); } - dbg("port %d status %d speed %d devid %x", - port, status, speed, devid); + dbg("hub %s port %d status %d speed %d devid %x", + hub, port, status, speed, devid); dbg("socket %lx lbusid %s", socket, lbusid); - /* if a device is connected, look at it */ - { - struct usbip_imported_device *idev = &vhci_driver->idev[port]; + idev = &vhci_driver->idev[port]; + memset(idev, 0, sizeof(*idev)); + + if (strncmp("hs", hub, 2) == 0) + idev->hub = HUB_SPEED_HIGH; + else /* strncmp("ss", hub, 2) == 0 */ + idev->hub = HUB_SPEED_SUPER; - idev->port = port; - idev->status = status; + idev->port = port; + idev->status = status; - idev->devid = devid; + idev->devid = devid; - idev->busnum = (devid >> 16); - idev->devnum = (devid & 0x0000ffff); + idev->busnum = (devid >> 16); + idev->devnum = (devid & 0x0000ffff); - if (idev->status != VDEV_ST_NULL - && idev->status != VDEV_ST_NOTASSIGNED) { - idev = imported_device_init(idev, lbusid); - if (!idev) { - dbg("imported_device_init failed"); - return -1; - } + if (idev->status != VDEV_ST_NULL + && idev->status != VDEV_ST_NOTASSIGNED) { + idev = imported_device_init(idev, lbusid); + if (!idev) { + dbg("imported_device_init failed"); + return -1; } } - /* go to the next line */ c = strchr(c, '\n'); if (!c) @@ -107,18 +105,33 @@ static int parse_status(const char *value) return 0; } +#define MAX_STATUS_NAME 16 + static int refresh_imported_device_list(void) { const char *attr_status; + char status[MAX_STATUS_NAME+1] = "status"; + int i, ret; - attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device, - "status"); - if (!attr_status) { - err("udev_device_get_sysattr_value failed"); - return -1; + for (i = 0; i < vhci_driver->ncontrollers; i++) { + if (i > 0) + snprintf(status, sizeof(status), "status.%d", i); + + attr_status = udev_device_get_sysattr_value(vhci_driver->hc_device, + status); + if (!attr_status) { + err("udev_device_get_sysattr_value failed"); + return -1; + } + + dbg("controller %d", i); + + ret = parse_status(attr_status); + if (ret != 0) + return ret; } - return parse_status(attr_status); + return 0; } static int get_nports(void) @@ -134,6 +147,33 @@ static int get_nports(void) return (int)strtoul(attr_nports, NULL, 10); } +static int vhci_hcd_filter(const struct dirent *dirent) +{ + return strcmp(dirent->d_name, "vhci_hcd") >= 0; +} + +static int get_ncontrollers(void) +{ + struct dirent **namelist; + struct udev_device *platform; + int n; + + platform = udev_device_get_parent(vhci_driver->hc_device); + if (platform == NULL) + return -1; + + n = scandir(udev_device_get_syspath(platform), &namelist, vhci_hcd_filter, NULL); + if (n < 0) + err("scandir failed"); + else { + for (int i = 0; i < n; i++) + free(namelist[i]); + free(namelist); + } + + return n; +} + /* * Read the given port's record. * @@ -213,16 +253,31 @@ int usbip_vhci_driver_open(void) vhci_driver->hc_device = udev_device_new_from_subsystem_sysname(udev_context, USBIP_VHCI_BUS_TYPE, - USBIP_VHCI_DRV_NAME); + USBIP_VHCI_DEVICE_NAME); if (!vhci_driver->hc_device) { err("udev_device_new_from_subsystem_sysname failed"); goto err; } vhci_driver->nports = get_nports(); - dbg("available ports: %d", vhci_driver->nports); + if (vhci_driver->nports <= 0) { + err("no available ports"); + goto err; + } else if (vhci_driver->nports > MAXNPORT) { + err("port number exceeds %d", MAXNPORT); + goto err; + } + + vhci_driver->ncontrollers = get_ncontrollers(); + dbg("available controllers: %d", vhci_driver->ncontrollers); + + if (vhci_driver->ncontrollers <=0) { + err("no available usb controllers"); + goto err; + } + if (refresh_imported_device_list()) goto err; @@ -270,11 +325,15 @@ err: } -int usbip_vhci_get_free_port(void) +int usbip_vhci_get_free_port(uint32_t speed) { for (int i = 0; i < vhci_driver->nports; i++) { + if (speed == USB_SPEED_SUPER && + vhci_driver->idev[i].hub != HUB_SPEED_SUPER) + continue; + if (vhci_driver->idev[i].status == VDEV_ST_NULL) - return i; + return vhci_driver->idev[i].port; } return -1; diff --git a/tools/usb/usbip/libsrc/vhci_driver.h b/tools/usb/usbip/libsrc/vhci_driver.h index fa2316cf2cac..4898d3bafb10 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.h +++ b/tools/usb/usbip/libsrc/vhci_driver.h @@ -11,9 +11,16 @@ #include "usbip_common.h" #define USBIP_VHCI_BUS_TYPE "platform" +#define USBIP_VHCI_DEVICE_NAME "vhci_hcd.0" #define MAXNPORT 128 +enum hub_speed { + HUB_SPEED_HIGH = 0, + HUB_SPEED_SUPER, +}; + struct usbip_imported_device { + enum hub_speed hub; uint8_t port; uint32_t status; @@ -31,6 +38,7 @@ struct usbip_vhci_driver { /* /sys/devices/platform/vhci_hcd */ struct udev_device *hc_device; + int ncontrollers; int nports; struct usbip_imported_device idev[MAXNPORT]; }; @@ -44,7 +52,7 @@ void usbip_vhci_driver_close(void); int usbip_vhci_refresh_device_list(void); -int usbip_vhci_get_free_port(void); +int usbip_vhci_get_free_port(uint32_t speed); int usbip_vhci_attach_device2(uint8_t port, int sockfd, uint32_t devid, uint32_t speed); diff --git a/tools/usb/usbip/src/usbip_attach.c b/tools/usb/usbip/src/usbip_attach.c index 70a6b507fb62..6e89768ffe30 100644 --- a/tools/usb/usbip/src/usbip_attach.c +++ b/tools/usb/usbip/src/usbip_attach.c @@ -94,6 +94,7 @@ static int import_device(int sockfd, struct usbip_usb_device *udev) { int rc; int port; + uint32_t speed = udev->speed; rc = usbip_vhci_driver_open(); if (rc < 0) { @@ -101,13 +102,15 @@ static int import_device(int sockfd, struct usbip_usb_device *udev) return -1; } - port = usbip_vhci_get_free_port(); + port = usbip_vhci_get_free_port(speed); if (port < 0) { err("no free port"); usbip_vhci_driver_close(); return -1; } + dbg("got free port %d", port); + rc = usbip_vhci_attach_device(port, sockfd, udev->busnum, udev->devnum, udev->speed); if (rc < 0) { |