diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-10-05 23:16:31 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-10-05 23:16:47 +0300 |
commit | 2606cf059c56bfb86d5d6bd0f41bd7eedefc8b0a (patch) | |
tree | 6bd918ad4fc55e677cc6ccb3212eab873c467c7f /tools | |
parent | 49e7265fd098fdade2bbdd9331e6b914cda7fa83 (diff) | |
parent | f291209eca5eba0b4704fa0832af57b12dbc1a02 (diff) | |
download | linux-2606cf059c56bfb86d5d6bd0f41bd7eedefc8b0a.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
No conflicts (or adjacent changes of note).
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools')
41 files changed, 811 insertions, 607 deletions
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index a00a53e15ab7..1d111350197f 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -57,6 +57,7 @@ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */ #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f @@ -155,6 +156,15 @@ * Not susceptible to Post-Barrier * Return Stack Buffer Predictions. */ +#define ARCH_CAP_GDS_CTRL BIT(25) /* + * CPU is vulnerable to Gather + * Data Sampling (GDS) and + * has controls for mitigation. + */ +#define ARCH_CAP_GDS_NO BIT(26) /* + * CPU is not vulnerable to Gather + * Data Sampling (GDS). + */ #define ARCH_CAP_XAPIC_DISABLE BIT(21) /* * IA32_XAPIC_DISABLE_STATUS MSR @@ -178,6 +188,8 @@ #define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ #define RTM_ALLOW BIT(1) /* TSX development mode */ #define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ +#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ +#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index fd6c1cb585db..abe087c53b4b 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -820,8 +820,11 @@ __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) +#define __NR_fchmodat2 452 +__SYSCALL(__NR_fchmodat2, sys_fchmodat2) + #undef __NR_syscalls -#define __NR_syscalls 452 +#define __NR_syscalls 453 /* * 32 bit systems traditionally used different diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h index a87bbbbca2d4..794c1d857677 100644 --- a/tools/include/uapi/drm/drm.h +++ b/tools/include/uapi/drm/drm.h @@ -673,8 +673,11 @@ struct drm_gem_open { * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT * and &DRM_PRIME_CAP_EXPORT. * - * PRIME buffers are exposed as dma-buf file descriptors. See - * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing". + * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and + * &DRM_PRIME_CAP_EXPORT are always advertised. + * + * PRIME buffers are exposed as dma-buf file descriptors. + * See :ref:`prime_buffer_sharing`. */ #define DRM_CAP_PRIME 0x5 /** @@ -682,6 +685,8 @@ struct drm_gem_open { * * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl. + * + * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME. */ #define DRM_PRIME_CAP_IMPORT 0x1 /** @@ -689,6 +694,8 @@ struct drm_gem_open { * * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl. + * + * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME. */ #define DRM_PRIME_CAP_EXPORT 0x2 /** @@ -756,15 +763,14 @@ struct drm_gem_open { /** * DRM_CAP_SYNCOBJ * - * If set to 1, the driver supports sync objects. See - * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects". + * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`. */ #define DRM_CAP_SYNCOBJ 0x13 /** * DRM_CAP_SYNCOBJ_TIMELINE * * If set to 1, the driver supports timeline operations on sync objects. See - * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects". + * :ref:`drm_sync_objects`. */ #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 @@ -909,6 +915,27 @@ struct drm_syncobj_timeline_wait { __u32 pad; }; +/** + * struct drm_syncobj_eventfd + * @handle: syncobj handle. + * @flags: Zero to wait for the point to be signalled, or + * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be + * available for the point. + * @point: syncobj timeline point (set to zero for binary syncobjs). + * @fd: Existing eventfd to sent events to. + * @pad: Must be zero. + * + * Register an eventfd to be signalled by a syncobj. The eventfd counter will + * be incremented by one. + */ +struct drm_syncobj_eventfd { + __u32 handle; + __u32 flags; + __u64 point; + __s32 fd; + __u32 pad; +}; + struct drm_syncobj_array { __u64 handles; @@ -1169,6 +1196,8 @@ extern "C" { */ #define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2) +#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. @@ -1180,25 +1209,50 @@ extern "C" { #define DRM_COMMAND_BASE 0x40 #define DRM_COMMAND_END 0xA0 -/* - * Header for events written back to userspace on the drm fd. The - * type defines the type of event, the length specifies the total - * length of the event (including the header), and user_data is - * typically a 64 bit value passed with the ioctl that triggered the - * event. A read on the drm fd will always only return complete - * events, that is, if for example the read buffer is 100 bytes, and - * there are two 64 byte events pending, only one will be returned. +/** + * struct drm_event - Header for DRM events + * @type: event type. + * @length: total number of payload bytes (including header). * - * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and - * up are chipset specific. + * This struct is a header for events written back to user-space on the DRM FD. + * A read on the DRM FD will always only return complete events: e.g. if the + * read buffer is 100 bytes large and there are two 64 byte events pending, + * only one will be returned. + * + * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and + * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK, + * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE. */ struct drm_event { __u32 type; __u32 length; }; +/** + * DRM_EVENT_VBLANK - vertical blanking event + * + * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the + * &_DRM_VBLANK_EVENT flag set. + * + * The event payload is a struct drm_event_vblank. + */ #define DRM_EVENT_VBLANK 0x01 +/** + * DRM_EVENT_FLIP_COMPLETE - page-flip completion event + * + * This event is sent in response to an atomic commit or legacy page-flip with + * the &DRM_MODE_PAGE_FLIP_EVENT flag set. + * + * The event payload is a struct drm_event_vblank. + */ #define DRM_EVENT_FLIP_COMPLETE 0x02 +/** + * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event + * + * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE. + * + * The event payload is a struct drm_event_crtc_sequence. + */ #define DRM_EVENT_CRTC_SEQUENCE 0x03 struct drm_event_vblank { diff --git a/tools/include/uapi/linux/seccomp.h b/tools/include/uapi/linux/seccomp.h new file mode 100644 index 000000000000..dbfc9b37fcae --- /dev/null +++ b/tools/include/uapi/linux/seccomp.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_SECCOMP_H +#define _UAPI_LINUX_SECCOMP_H + +#include <linux/compiler.h> +#include <linux/types.h> + + +/* Valid values for seccomp.mode and prctl(PR_SET_SECCOMP, <mode>) */ +#define SECCOMP_MODE_DISABLED 0 /* seccomp is not in use. */ +#define SECCOMP_MODE_STRICT 1 /* uses hard-coded filter. */ +#define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */ + +/* Valid operations for seccomp syscall. */ +#define SECCOMP_SET_MODE_STRICT 0 +#define SECCOMP_SET_MODE_FILTER 1 +#define SECCOMP_GET_ACTION_AVAIL 2 +#define SECCOMP_GET_NOTIF_SIZES 3 + +/* Valid flags for SECCOMP_SET_MODE_FILTER */ +#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) +#define SECCOMP_FILTER_FLAG_LOG (1UL << 1) +#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) +#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) +#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4) +/* Received notifications wait in killable state (only respond to fatal signals) */ +#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5) + +/* + * All BPF programs must return a 32-bit value. + * The bottom 16-bits are for optional return data. + * The upper 16-bits are ordered from least permissive values to most, + * as a signed value (so 0x8000000 is negative). + * + * The ordering ensures that a min_t() over composed return values always + * selects the least permissive choice. + */ +#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ +#define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ +#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD +#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ +#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ +#define SECCOMP_RET_USER_NOTIF 0x7fc00000U /* notifies userspace */ +#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ +#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ +#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ + +/* Masks for the return value sections. */ +#define SECCOMP_RET_ACTION_FULL 0xffff0000U +#define SECCOMP_RET_ACTION 0x7fff0000U +#define SECCOMP_RET_DATA 0x0000ffffU + +/** + * struct seccomp_data - the format the BPF program executes over. + * @nr: the system call number + * @arch: indicates system call convention as an AUDIT_ARCH_* value + * as defined in <linux/audit.h>. + * @instruction_pointer: at the time of the system call. + * @args: up to 6 system call arguments always stored as 64-bit values + * regardless of the architecture. + */ +struct seccomp_data { + int nr; + __u32 arch; + __u64 instruction_pointer; + __u64 args[6]; +}; + +struct seccomp_notif_sizes { + __u16 seccomp_notif; + __u16 seccomp_notif_resp; + __u16 seccomp_data; +}; + +struct seccomp_notif { + __u64 id; + __u32 pid; + __u32 flags; + struct seccomp_data data; +}; + +/* + * Valid flags for struct seccomp_notif_resp + * + * Note, the SECCOMP_USER_NOTIF_FLAG_CONTINUE flag must be used with caution! + * If set by the process supervising the syscalls of another process the + * syscall will continue. This is problematic because of an inherent TOCTOU. + * An attacker can exploit the time while the supervised process is waiting on + * a response from the supervising process to rewrite syscall arguments which + * are passed as pointers of the intercepted syscall. + * It should be absolutely clear that this means that the seccomp notifier + * _cannot_ be used to implement a security policy! It should only ever be used + * in scenarios where a more privileged process supervises the syscalls of a + * lesser privileged process to get around kernel-enforced security + * restrictions when the privileged process deems this safe. In other words, + * in order to continue a syscall the supervising process should be sure that + * another security mechanism or the kernel itself will sufficiently block + * syscalls if arguments are rewritten to something unsafe. + * + * Similar precautions should be applied when stacking SECCOMP_RET_USER_NOTIF + * or SECCOMP_RET_TRACE. For SECCOMP_RET_USER_NOTIF filters acting on the + * same syscall, the most recently added filter takes precedence. This means + * that the new SECCOMP_RET_USER_NOTIF filter can override any + * SECCOMP_IOCTL_NOTIF_SEND from earlier filters, essentially allowing all + * such filtered syscalls to be executed by sending the response + * SECCOMP_USER_NOTIF_FLAG_CONTINUE. Note that SECCOMP_RET_TRACE can equally + * be overriden by SECCOMP_USER_NOTIF_FLAG_CONTINUE. + */ +#define SECCOMP_USER_NOTIF_FLAG_CONTINUE (1UL << 0) + +struct seccomp_notif_resp { + __u64 id; + __s64 val; + __s32 error; + __u32 flags; +}; + +#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0) + +/* valid flags for seccomp_notif_addfd */ +#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ +#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */ + +/** + * struct seccomp_notif_addfd + * @id: The ID of the seccomp notification + * @flags: SECCOMP_ADDFD_FLAG_* + * @srcfd: The local fd number + * @newfd: Optional remote FD number if SETFD option is set, otherwise 0. + * @newfd_flags: The O_* flags the remote FD should have applied + */ +struct seccomp_notif_addfd { + __u64 id; + __u32 flags; + __u32 srcfd; + __u32 newfd; + __u32 newfd_flags; +}; + +#define SECCOMP_IOC_MAGIC '!' +#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) +#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) +#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type) +#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type) + +/* Flags for seccomp notification fd ioctl. */ +#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) +#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ + struct seccomp_notif_resp) +#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) +/* On success, the return value is the remote process's added fd number */ +#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \ + struct seccomp_notif_addfd) + +#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64) + +#endif /* _UAPI_LINUX_SECCOMP_H */ diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl index cfda2511badf..cb5e757f6621 100644 --- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl +++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl @@ -366,3 +366,4 @@ 449 n64 futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 451 n64 cachestat sys_cachestat +452 n64 fchmodat2 sys_fchmodat2 diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 8c0b08b7a80e..20e50586e8a2 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -538,3 +538,4 @@ 449 common futex_waitv sys_futex_waitv 450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node 451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index a6935af2235c..0122cc156952 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -454,3 +454,4 @@ 449 common futex_waitv sys_futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node 451 common cachestat sys_cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 sys_fchmodat2 diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 227538b0ce80..1d6eee30eceb 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -373,6 +373,8 @@ 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 451 common cachestat sys_cachestat +452 common fchmodat2 sys_fchmodat2 +453 64 map_shadow_stack sys_map_shadow_stack # # Due to a historical design error, certain syscalls are numbered differently diff --git a/tools/perf/bench/sched-seccomp-notify.c b/tools/perf/bench/sched-seccomp-notify.c index b04ebcde4036..a01c40131493 100644 --- a/tools/perf/bench/sched-seccomp-notify.c +++ b/tools/perf/bench/sched-seccomp-notify.c @@ -9,7 +9,7 @@ #include <sys/syscall.h> #include <sys/ioctl.h> #include <linux/time64.h> -#include <linux/seccomp.h> +#include <uapi/linux/seccomp.h> #include <sys/prctl.h> #include <unistd.h> diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 4314c9197850..e21caadda7c1 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -21,6 +21,7 @@ FILES=( "include/uapi/linux/perf_event.h" "include/uapi/linux/prctl.h" "include/uapi/linux/sched.h" + "include/uapi/linux/seccomp.h" "include/uapi/linux/stat.h" "include/uapi/linux/usbdevice_fs.h" "include/uapi/linux/vhost.h" diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index a7e88332276d..72ba4a9239c6 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -991,7 +991,7 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) } } free(cpuid); - if (!pmu) + if (!pmu || !table) return table; for (i = 0; i < table->num_pmus; i++) { diff --git a/tools/perf/pmu-events/metric.py b/tools/perf/pmu-events/metric.py index 0e9ec65d92ae..3e673f25d5fd 100644 --- a/tools/perf/pmu-events/metric.py +++ b/tools/perf/pmu-events/metric.py @@ -413,10 +413,10 @@ def has_event(event: Event) -> Function: # pylint: disable=invalid-name return Function('has_event', event) -def strcmp_cpuid_str(event: str) -> Function: +def strcmp_cpuid_str(cpuid: Event) -> Function: # pylint: disable=redefined-builtin # pylint: disable=invalid-name - return Function('strcmp_cpuid_str', event) + return Function('strcmp_cpuid_str', cpuid) class Metric: """An individual metric that will specifiable on the perf command line.""" diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c deleted file mode 100644 index 9887ae09242d..000000000000 --- a/tools/perf/util/bpf-prologue.c +++ /dev/null @@ -1,508 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * bpf-prologue.c - * - * Copyright (C) 2015 He Kuang <hekuang@huawei.com> - * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> - * Copyright (C) 2015 Huawei Inc. - */ - -#include <bpf/libbpf.h> -#include "debug.h" -#include "bpf-loader.h" -#include "bpf-prologue.h" -#include "probe-finder.h" -#include <errno.h> -#include <stdlib.h> -#include <dwarf-regs.h> -#include <linux/filter.h> - -#define BPF_REG_SIZE 8 - -#define JMP_TO_ERROR_CODE -1 -#define JMP_TO_SUCCESS_CODE -2 -#define JMP_TO_USER_CODE -3 - -struct bpf_insn_pos { - struct bpf_insn *begin; - struct bpf_insn *end; - struct bpf_insn *pos; -}; - -static inline int -pos_get_cnt(struct bpf_insn_pos *pos) -{ - return pos->pos - pos->begin; -} - -static int -append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos) -{ - if (!pos->pos) - return -BPF_LOADER_ERRNO__PROLOGUE2BIG; - - if (pos->pos + 1 >= pos->end) { - pr_err("bpf prologue: prologue too long\n"); - pos->pos = NULL; - return -BPF_LOADER_ERRNO__PROLOGUE2BIG; - } - - *(pos->pos)++ = new_insn; - return 0; -} - -static int -check_pos(struct bpf_insn_pos *pos) -{ - if (!pos->pos || pos->pos >= pos->end) - return -BPF_LOADER_ERRNO__PROLOGUE2BIG; - return 0; -} - -/* - * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see - * Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM - * instruction (BPF_{B,H,W,DW}). - */ -static int -argtype_to_ldx_size(const char *type) -{ - int arg_size = type ? atoi(&type[1]) : 64; - - switch (arg_size) { - case 8: - return BPF_B; - case 16: - return BPF_H; - case 32: - return BPF_W; - case 64: - default: - return BPF_DW; - } -} - -static const char * -insn_sz_to_str(int insn_sz) -{ - switch (insn_sz) { - case BPF_B: - return "BPF_B"; - case BPF_H: - return "BPF_H"; - case BPF_W: - return "BPF_W"; - case BPF_DW: - return "BPF_DW"; - default: - return "UNKNOWN"; - } -} - -/* Give it a shorter name */ -#define ins(i, p) append_insn((i), (p)) - -/* - * Give a register name (in 'reg'), generate instruction to - * load register into an eBPF register rd: - * 'ldd target_reg, offset(ctx_reg)', where: - * ctx_reg is pre initialized to pointer of 'struct pt_regs'. - */ -static int -gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg, - const char *reg, int target_reg) -{ - int offset = regs_query_register_offset(reg); - - if (offset < 0) { - pr_err("bpf: prologue: failed to get register %s\n", - reg); - return offset; - } - ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos); - - return check_pos(pos); -} - -/* - * Generate a BPF_FUNC_probe_read function call. - * - * src_base_addr_reg is a register holding base address, - * dst_addr_reg is a register holding dest address (on stack), - * result is: - * - * *[dst_addr_reg] = *([src_base_addr_reg] + offset) - * - * Arguments of BPF_FUNC_probe_read: - * ARG1: ptr to stack (dest) - * ARG2: size (8) - * ARG3: unsafe ptr (src) - */ -static int -gen_read_mem(struct bpf_insn_pos *pos, - int src_base_addr_reg, - int dst_addr_reg, - long offset, - int probeid) -{ - /* mov arg3, src_base_addr_reg */ - if (src_base_addr_reg != BPF_REG_ARG3) - ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos); - /* add arg3, #offset */ - if (offset) - ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos); - - /* mov arg2, #reg_size */ - ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos); - - /* mov arg1, dst_addr_reg */ - if (dst_addr_reg != BPF_REG_ARG1) - ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos); - - /* Call probe_read */ - ins(BPF_EMIT_CALL(probeid), pos); - /* - * Error processing: if read fail, goto error code, - * will be relocated. Target should be the start of - * error processing code. - */ - ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE), - pos); - - return check_pos(pos); -} - -/* - * Each arg should be bare register. Fetch and save them into argument - * registers (r3 - r5). - * - * BPF_REG_1 should have been initialized with pointer to - * 'struct pt_regs'. - */ -static int -gen_prologue_fastpath(struct bpf_insn_pos *pos, - struct probe_trace_arg *args, int nargs) -{ - int i, err = 0; - - for (i = 0; i < nargs; i++) { - err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value, - BPF_PROLOGUE_START_ARG_REG + i); - if (err) - goto errout; - } - - return check_pos(pos); -errout: - return err; -} - -/* - * Slow path: - * At least one argument has the form of 'offset($rx)'. - * - * Following code first stores them into stack, then loads all of then - * to r2 - r5. - * Before final loading, the final result should be: - * - * low address - * BPF_REG_FP - 24 ARG3 - * BPF_REG_FP - 16 ARG2 - * BPF_REG_FP - 8 ARG1 - * BPF_REG_FP - * high address - * - * For each argument (described as: offn(...off2(off1(reg)))), - * generates following code: - * - * r7 <- fp - * r7 <- r7 - stack_offset // Ideal code should initialize r7 using - * // fp before generating args. However, - * // eBPF won't regard r7 as stack pointer - * // if it is generated by minus 8 from - * // another stack pointer except fp. - * // This is why we have to set r7 - * // to fp for each variable. - * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx() - * (r7) <- r3 // skip following instructions for bare reg - * r3 <- r3 + off1 . // skip if off1 == 0 - * r2 <- 8 \ - * r1 <- r7 |-> generated by gen_read_mem() - * call probe_read / - * jnei r0, 0, err ./ - * r3 <- (r7) - * r3 <- r3 + off2 . // skip if off2 == 0 - * r2 <- 8 \ // r2 may be broken by probe_read, so set again - * r1 <- r7 |-> generated by gen_read_mem() - * call probe_read / - * jnei r0, 0, err ./ - * ... - */ -static int -gen_prologue_slowpath(struct bpf_insn_pos *pos, - struct probe_trace_arg *args, int nargs) -{ - int err, i, probeid; - - for (i = 0; i < nargs; i++) { - struct probe_trace_arg *arg = &args[i]; - const char *reg = arg->value; - struct probe_trace_arg_ref *ref = NULL; - int stack_offset = (i + 1) * -8; - - pr_debug("prologue: fetch arg %d, base reg is %s\n", - i, reg); - - /* value of base register is stored into ARG3 */ - err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg, - BPF_REG_ARG3); - if (err) { - pr_err("prologue: failed to get offset of register %s\n", - reg); - goto errout; - } - - /* Make r7 the stack pointer. */ - ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos); - /* r7 += -8 */ - ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos); - /* - * Store r3 (base register) onto stack - * Ensure fp[offset] is set. - * fp is the only valid base register when storing - * into stack. We are not allowed to use r7 as base - * register here. - */ - ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3, - stack_offset), pos); - - ref = arg->ref; - probeid = BPF_FUNC_probe_read_kernel; - while (ref) { - pr_debug("prologue: arg %d: offset %ld\n", - i, ref->offset); - - if (ref->user_access) - probeid = BPF_FUNC_probe_read_user; - - err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7, - ref->offset, probeid); - if (err) { - pr_err("prologue: failed to generate probe_read function call\n"); - goto errout; - } - - ref = ref->next; - /* - * Load previous result into ARG3. Use - * BPF_REG_FP instead of r7 because verifier - * allows FP based addressing only. - */ - if (ref) - ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, - BPF_REG_FP, stack_offset), pos); - } - } - - /* Final pass: read to registers */ - for (i = 0; i < nargs; i++) { - int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW; - - pr_debug("prologue: load arg %d, insn_sz is %s\n", - i, insn_sz_to_str(insn_sz)); - ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i, - BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos); - } - - ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos); - - return check_pos(pos); -errout: - return err; -} - -static int -prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code, - struct bpf_insn *success_code, struct bpf_insn *user_code) -{ - struct bpf_insn *insn; - - if (check_pos(pos)) - return -BPF_LOADER_ERRNO__PROLOGUE2BIG; - - for (insn = pos->begin; insn < pos->pos; insn++) { - struct bpf_insn *target; - u8 class = BPF_CLASS(insn->code); - u8 opcode; - - if (class != BPF_JMP) - continue; - opcode = BPF_OP(insn->code); - if (opcode == BPF_CALL) - continue; - - switch (insn->off) { - case JMP_TO_ERROR_CODE: - target = error_code; - break; - case JMP_TO_SUCCESS_CODE: - target = success_code; - break; - case JMP_TO_USER_CODE: - target = user_code; - break; - default: - pr_err("bpf prologue: internal error: relocation failed\n"); - return -BPF_LOADER_ERRNO__PROLOGUE; - } - - insn->off = target - (insn + 1); - } - return 0; -} - -int bpf__gen_prologue(struct probe_trace_arg *args, int nargs, - struct bpf_insn *new_prog, size_t *new_cnt, - size_t cnt_space) -{ - struct bpf_insn *success_code = NULL; - struct bpf_insn *error_code = NULL; - struct bpf_insn *user_code = NULL; - struct bpf_insn_pos pos; - bool fastpath = true; - int err = 0, i; - - if (!new_prog || !new_cnt) - return -EINVAL; - - if (cnt_space > BPF_MAXINSNS) - cnt_space = BPF_MAXINSNS; - - pos.begin = new_prog; - pos.end = new_prog + cnt_space; - pos.pos = new_prog; - - if (!nargs) { - ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), - &pos); - - if (check_pos(&pos)) - goto errout; - - *new_cnt = pos_get_cnt(&pos); - return 0; - } - - if (nargs > BPF_PROLOGUE_MAX_ARGS) { - pr_warning("bpf: prologue: %d arguments are dropped\n", - nargs - BPF_PROLOGUE_MAX_ARGS); - nargs = BPF_PROLOGUE_MAX_ARGS; - } - - /* First pass: validation */ - for (i = 0; i < nargs; i++) { - struct probe_trace_arg_ref *ref = args[i].ref; - - if (args[i].value[0] == '@') { - /* TODO: fetch global variable */ - pr_err("bpf: prologue: global %s%+ld not support\n", - args[i].value, ref ? ref->offset : 0); - return -ENOTSUP; - } - - while (ref) { - /* fastpath is true if all args has ref == NULL */ - fastpath = false; - - /* - * Instruction encodes immediate value using - * s32, ref->offset is long. On systems which - * can't fill long in s32, refuse to process if - * ref->offset too large (or small). - */ -#ifdef __LP64__ -#define OFFSET_MAX ((1LL << 31) - 1) -#define OFFSET_MIN ((1LL << 31) * -1) - if (ref->offset > OFFSET_MAX || - ref->offset < OFFSET_MIN) { - pr_err("bpf: prologue: offset out of bound: %ld\n", - ref->offset); - return -BPF_LOADER_ERRNO__PROLOGUEOOB; - } -#endif - ref = ref->next; - } - } - pr_debug("prologue: pass validation\n"); - - if (fastpath) { - /* If all variables are registers... */ - pr_debug("prologue: fast path\n"); - err = gen_prologue_fastpath(&pos, args, nargs); - if (err) - goto errout; - } else { - pr_debug("prologue: slow path\n"); - - /* Initialization: move ctx to a callee saved register. */ - ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos); - - err = gen_prologue_slowpath(&pos, args, nargs); - if (err) - goto errout; - /* - * start of ERROR_CODE (only slow pass needs error code) - * mov r2 <- 1 // r2 is error number - * mov r3 <- 0 // r3, r4... should be touched or - * // verifier would complain - * mov r4 <- 0 - * ... - * goto usercode - */ - error_code = pos.pos; - ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1), - &pos); - - for (i = 0; i < nargs; i++) - ins(BPF_ALU64_IMM(BPF_MOV, - BPF_PROLOGUE_START_ARG_REG + i, - 0), - &pos); - ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE), - &pos); - } - - /* - * start of SUCCESS_CODE: - * mov r2 <- 0 - * goto usercode // skip - */ - success_code = pos.pos; - ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos); - - /* - * start of USER_CODE: - * Restore ctx to r1 - */ - user_code = pos.pos; - if (!fastpath) { - /* - * Only slow path needs restoring of ctx. In fast path, - * register are loaded directly from r1. - */ - ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos); - err = prologue_relocate(&pos, error_code, success_code, - user_code); - if (err) - goto errout; - } - - err = check_pos(&pos); - if (err) - goto errout; - - *new_cnt = pos_get_cnt(&pos); - return 0; -errout: - return err; -} diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c index 90ce22f9c1a9..939ec769bf4a 100644 --- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c +++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c @@ -23,7 +23,9 @@ #define MAX_CPUS 4096 // FIXME: These should come from system headers +#ifndef bool typedef char bool; +#endif typedef int pid_t; typedef long long int __s64; typedef __s64 time64_t; diff --git a/tools/perf/util/hashmap.h b/tools/perf/util/hashmap.h index 0a5bf1937a7c..c12f8320e668 100644 --- a/tools/perf/util/hashmap.h +++ b/tools/perf/util/hashmap.h @@ -80,16 +80,6 @@ struct hashmap { size_t sz; }; -#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \ - .hash_fn = (hash_fn), \ - .equal_fn = (equal_fn), \ - .ctx = (ctx), \ - .buckets = NULL, \ - .cap = 0, \ - .cap_bits = 0, \ - .sz = 0, \ -} - void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, hashmap_equal_fn equal_fn, void *ctx); struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d85602aa4b9f..8de6f39abd1b 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -520,7 +520,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, pmu_name = pe->pmu; } - alias = malloc(sizeof(*alias)); + alias = zalloc(sizeof(*alias)); if (!alias) return -ENOMEM; diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 42806add0114..1a21d6beebc6 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -92,7 +92,7 @@ endif TARGETS += tmpfs TARGETS += tpm2 TARGETS += tty -TARGETS += uevents +TARGETS += uevent TARGETS += user TARGETS += user_events TARGETS += vDSO diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index a53c254c6058..4aabeaa525d4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -185,6 +185,8 @@ static void test_cubic(void) do_test("bpf_cubic", NULL); + ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called"); + bpf_link__destroy(link); bpf_cubic__destroy(cubic_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 064cc5e8d9ad..dda7060e86a0 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -475,6 +475,55 @@ out: test_sockmap_drop_prog__destroy(drop); } +static void test_sockmap_skb_verdict_peek(void) +{ + int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail; + struct test_sockmap_pass_prog *pass; + char snd[256] = "0123456789"; + char rcv[256] = "0"; + + pass = test_sockmap_pass_prog__open_and_load(); + if (!ASSERT_OK_PTR(pass, "open_and_load")) + return; + verdict = bpf_program__fd(pass->progs.prog_skb_verdict); + map = bpf_map__fd(pass->maps.sock_map_rx); + + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + s = socket_loopback(AF_INET, SOCK_STREAM); + if (!ASSERT_GT(s, -1, "socket_loopback(s)")) + goto out; + + err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1); + if (!ASSERT_OK(err, "create_pairs(s)")) + goto out; + + err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c1)")) + goto out_close; + + sent = xsend(p1, snd, sizeof(snd), 0); + ASSERT_EQ(sent, sizeof(snd), "xsend(p1)"); + recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK); + ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)"); + err = ioctl(c1, FIONREAD, &avail); + ASSERT_OK(err, "ioctl(FIONREAD) error"); + ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)"); + recvd = recv(c1, rcv, sizeof(rcv), 0); + ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)"); + err = ioctl(c1, FIONREAD, &avail); + ASSERT_OK(err, "ioctl(FIONREAD) error"); + ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)"); + +out_close: + close(c1); + close(p1); +out: + test_sockmap_pass_prog__destroy(pass); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -515,4 +564,6 @@ void test_sockmap_basic(void) test_sockmap_skb_verdict_fionread(true); if (test__start_subtest("sockmap skb_verdict fionread on drop")) test_sockmap_skb_verdict_fionread(false); + if (test__start_subtest("sockmap skb_verdict msg_f_peek")) + test_sockmap_skb_verdict_peek(); } diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c index 7a2ecd4eca5d..99af79ea21a9 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -2378,3 +2378,87 @@ void serial_test_tc_opts_chain_mixed(void) test_tc_chain_mixed(BPF_TCX_INGRESS); test_tc_chain_mixed(BPF_TCX_EGRESS); } + +static int generate_dummy_prog(void) +{ + const struct bpf_insn prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, opts); + const size_t log_buf_sz = 256; + char *log_buf; + int fd = -1; + + log_buf = malloc(log_buf_sz); + if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc")) + return fd; + opts.log_buf = log_buf; + opts.log_size = log_buf_sz; + + log_buf[0] = '\0'; + opts.log_level = 0; + fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL", + prog_insns, prog_insn_cnt, &opts); + ASSERT_STREQ(log_buf, "", "log_0"); + ASSERT_GE(fd, 0, "prog_fd"); + free(log_buf); + return fd; +} + +static void test_tc_opts_max_target(int target, int flags, bool relative) +{ + int err, ifindex, i, prog_fd, last_fd = -1; + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + const int max_progs = 63; + + ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth"); + ifindex = if_nametoindex("tcx_opts1"); + ASSERT_NEQ(ifindex, 0, "non_zero_ifindex"); + + assert_mprog_count_ifindex(ifindex, target, 0); + + for (i = 0; i < max_progs; i++) { + prog_fd = generate_dummy_prog(); + if (!ASSERT_GE(prog_fd, 0, "dummy_prog")) + goto cleanup; + err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + assert_mprog_count_ifindex(ifindex, target, i + 1); + if (i == max_progs - 1 && relative) + last_fd = prog_fd; + else + close(prog_fd); + } + + prog_fd = generate_dummy_prog(); + if (!ASSERT_GE(prog_fd, 0, "dummy_prog")) + goto cleanup; + opta.flags = flags; + if (last_fd > 0) + opta.relative_fd = last_fd; + err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta); + ASSERT_EQ(err, -ERANGE, "prog_64_attach"); + assert_mprog_count_ifindex(ifindex, target, max_progs); + close(prog_fd); +cleanup: + if (last_fd > 0) + close(last_fd); + ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth"); + ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed"); + ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed"); +} + +void serial_test_tc_opts_max(void) +{ + test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false); + test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false); + + test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false); + test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true); + + test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true); + test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c index d9660e7200e2..c997e3e3d3fb 100644 --- a/tools/testing/selftests/bpf/progs/bpf_cubic.c +++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c @@ -490,6 +490,8 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay) } } +int bpf_cubic_acked_called = 0; + void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, const struct ack_sample *sample) { @@ -497,6 +499,7 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk, struct bictcp *ca = inet_csk_ca(sk); __u32 delay; + bpf_cubic_acked_called = 1; /* Some calls are for duplicates without timetamps */ if (sample->rtt_us < 0) return; diff --git a/tools/testing/selftests/fchmodat2/Makefile b/tools/testing/selftests/fchmodat2/Makefile index 20839f8e43f2..71ec34bf1501 100644 --- a/tools/testing/selftests/fchmodat2/Makefile +++ b/tools/testing/selftests/fchmodat2/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-or-later -CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES) +CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES) TEST_GEN_PROGS := fchmodat2_test include ../lib.mk diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 3e36019eeb4a..5d7f28b02d73 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -387,7 +387,7 @@ char *strdup_printf(const char *fmt, ...) char *str; va_start(ap, fmt); - vasprintf(&str, fmt, ap); + TEST_ASSERT(vasprintf(&str, fmt, ap) >= 0, "vasprintf() failed"); va_end(ap); return str; diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index d8ecacd03ecf..9f99ea42f45f 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -12,19 +12,37 @@ #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK) +static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX]; + bool filter_reg(__u64 reg) { + switch (reg & ~REG_MASK) { /* - * Some ISA extensions are optional and not present on all host, - * but they can't be disabled through ISA_EXT registers when present. - * So, to make life easy, just filtering out these kind of registers. + * Same set of ISA_EXT registers are not present on all host because + * ISA_EXT registers are visible to the KVM user space based on the + * ISA extensions available on the host. Also, disabling an ISA + * extension using corresponding ISA_EXT register does not affect + * the visibility of the ISA_EXT register itself. + * + * Based on above, we should filter-out all ISA_EXT registers. */ - switch (reg & ~REG_MASK) { + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V: + case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR: @@ -32,6 +50,15 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI: case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM: return true; + /* AIA registers are always available when Ssaia can't be disabled */ + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h): + case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h): + return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA]; default: break; } @@ -50,24 +77,27 @@ static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext) unsigned long value; ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value); - if (ret) { - printf("Failed to get ext %d", ext); - return false; - } - - return !!value; + return (ret) ? false : !!value; } void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c) { + unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 }; struct vcpu_reg_sublist *s; + int rc; + + for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) + __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]); /* * Disable all extensions which were enabled by default * if they were available in the risc-v host. */ - for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) - __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); + for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) { + rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0); + if (rc && isa_ext_state[i]) + isa_ext_cant_disable[i] = true; + } for_each_sublist(c, s) { if (!s->feature) @@ -506,10 +536,6 @@ static __u64 base_regs[] = { KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time), KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare), KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state), - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A, - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C, - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I, - KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M, KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01, KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME, KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI, diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh index a5cb4b09a46c..0899019a7fcb 100755 --- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh +++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh @@ -25,7 +25,7 @@ if [[ "$1" == "-cgroup-v2" ]]; then fi if [[ $cgroup2 ]]; then - cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') + cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}') if [[ -z "$cgroup_path" ]]; then cgroup_path=/dev/cgroup/memory mount -t cgroup2 none $cgroup_path @@ -33,7 +33,7 @@ if [[ $cgroup2 ]]; then fi echo "+hugetlb" >$cgroup_path/cgroup.subtree_control else - cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') + cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') if [[ -z "$cgroup_path" ]]; then cgroup_path=/dev/cgroup/memory mount -t cgroup memory,hugetlb $cgroup_path diff --git a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh index bf2d2a684edf..14d26075c863 100755 --- a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh +++ b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh @@ -20,7 +20,7 @@ fi if [[ $cgroup2 ]]; then - CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') + CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}') if [[ -z "$CGROUP_ROOT" ]]; then CGROUP_ROOT=/dev/cgroup/memory mount -t cgroup2 none $CGROUP_ROOT @@ -28,7 +28,7 @@ if [[ $cgroup2 ]]; then fi echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control else - CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') + CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') if [[ -z "$CGROUP_ROOT" ]]; then CGROUP_ROOT=/dev/cgroup/memory mount -t cgroup memory,hugetlb $CGROUP_ROOT diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 321db8850da0..ef90aca4cc96 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -6,13 +6,14 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \ nft_concat_range.sh nft_conntrack_helper.sh \ nft_queue.sh nft_meta.sh nf_nat_edemux.sh \ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ - conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh + conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \ + conntrack_sctp_collision.sh HOSTPKG_CONFIG := pkg-config CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null) LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl) -TEST_GEN_FILES = nf-queue connect_close audit_logread +TEST_GEN_FILES = nf-queue connect_close audit_logread sctp_collision include ../lib.mk diff --git a/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh new file mode 100755 index 000000000000..a924e595cfd8 --- /dev/null +++ b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Testing For SCTP COLLISION SCENARIO as Below: +# +# 14:35:47.655279 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT] [init tag: 2017837359] +# 14:35:48.353250 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT] [init tag: 1187206187] +# 14:35:48.353275 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT ACK] [init tag: 2017837359] +# 14:35:48.353283 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [COOKIE ECHO] +# 14:35:48.353977 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [COOKIE ACK] +# 14:35:48.855335 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT ACK] [init tag: 164579970] +# +# TOPO: SERVER_NS (link0)<--->(link1) ROUTER_NS (link2)<--->(link3) CLIENT_NS + +CLIENT_NS=$(mktemp -u client-XXXXXXXX) +CLIENT_IP="198.51.200.1" +CLIENT_PORT=1234 + +SERVER_NS=$(mktemp -u server-XXXXXXXX) +SERVER_IP="198.51.100.1" +SERVER_PORT=1234 + +ROUTER_NS=$(mktemp -u router-XXXXXXXX) +CLIENT_GW="198.51.200.2" +SERVER_GW="198.51.100.2" + +# setup the topo +setup() { + ip net add $CLIENT_NS + ip net add $SERVER_NS + ip net add $ROUTER_NS + ip -n $SERVER_NS link add link0 type veth peer name link1 netns $ROUTER_NS + ip -n $CLIENT_NS link add link3 type veth peer name link2 netns $ROUTER_NS + + ip -n $SERVER_NS link set link0 up + ip -n $SERVER_NS addr add $SERVER_IP/24 dev link0 + ip -n $SERVER_NS route add $CLIENT_IP dev link0 via $SERVER_GW + + ip -n $ROUTER_NS link set link1 up + ip -n $ROUTER_NS link set link2 up + ip -n $ROUTER_NS addr add $SERVER_GW/24 dev link1 + ip -n $ROUTER_NS addr add $CLIENT_GW/24 dev link2 + ip net exec $ROUTER_NS sysctl -wq net.ipv4.ip_forward=1 + + ip -n $CLIENT_NS link set link3 up + ip -n $CLIENT_NS addr add $CLIENT_IP/24 dev link3 + ip -n $CLIENT_NS route add $SERVER_IP dev link3 via $CLIENT_GW + + # simulate the delay on OVS upcall by setting up a delay for INIT_ACK with + # tc on $SERVER_NS side + tc -n $SERVER_NS qdisc add dev link0 root handle 1: htb + tc -n $SERVER_NS class add dev link0 parent 1: classid 1:1 htb rate 100mbit + tc -n $SERVER_NS filter add dev link0 parent 1: protocol ip u32 match ip protocol 132 \ + 0xff match u8 2 0xff at 32 flowid 1:1 + tc -n $SERVER_NS qdisc add dev link0 parent 1:1 handle 10: netem delay 1200ms + + # simulate the ctstate check on OVS nf_conntrack + ip net exec $ROUTER_NS iptables -A FORWARD -m state --state INVALID,UNTRACKED -j DROP + ip net exec $ROUTER_NS iptables -A INPUT -p sctp -j DROP + + # use a smaller number for assoc's max_retrans to reproduce the issue + modprobe sctp + ip net exec $CLIENT_NS sysctl -wq net.sctp.association_max_retrans=3 +} + +cleanup() { + ip net exec $CLIENT_NS pkill sctp_collision 2>&1 >/dev/null + ip net exec $SERVER_NS pkill sctp_collision 2>&1 >/dev/null + ip net del "$CLIENT_NS" + ip net del "$SERVER_NS" + ip net del "$ROUTER_NS" +} + +do_test() { + ip net exec $SERVER_NS ./sctp_collision server \ + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT & + ip net exec $CLIENT_NS ./sctp_collision client \ + $CLIENT_IP $CLIENT_PORT $SERVER_IP $SERVER_PORT +} + +# NOTE: one way to work around the issue is set a smaller hb_interval +# ip net exec $CLIENT_NS sysctl -wq net.sctp.hb_interval=3500 + +# run the test case +trap cleanup EXIT +setup && \ +echo "Test for SCTP Collision in nf_conntrack:" && \ +do_test && echo "PASS!" +exit $? diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh index 83c271b1c735..bb34329e02a7 100755 --- a/tools/testing/selftests/netfilter/nft_audit.sh +++ b/tools/testing/selftests/netfilter/nft_audit.sh @@ -12,10 +12,11 @@ nft --version >/dev/null 2>&1 || { } logfile=$(mktemp) +rulefile=$(mktemp) echo "logging into $logfile" ./audit_logread >"$logfile" & logread_pid=$! -trap 'kill $logread_pid; rm -f $logfile' EXIT +trap 'kill $logread_pid; rm -f $logfile $rulefile' EXIT exec 3<"$logfile" do_test() { # (cmd, log) @@ -26,12 +27,14 @@ do_test() { # (cmd, log) res=$(diff -a -u <(echo "$2") - <&3) [ $? -eq 0 ] && { echo "OK"; return; } echo "FAIL" - echo "$res" - ((RC++)) + grep -v '^\(---\|+++\|@@\)' <<< "$res" + ((RC--)) } nft flush ruleset +# adding tables, chains and rules + for table in t1 t2; do do_test "nft add table $table" \ "table=$table family=2 entries=1 op=nft_register_table" @@ -62,6 +65,48 @@ for table in t1 t2; do "table=$table family=2 entries=6 op=nft_register_rule" done +for ((i = 0; i < 500; i++)); do + echo "add rule t2 c3 counter accept comment \"rule $i\"" +done >$rulefile +do_test "nft -f $rulefile" \ +'table=t2 family=2 entries=500 op=nft_register_rule' + +# adding sets and elements + +settype='type inet_service; counter' +setelem='{ 22, 80, 443 }' +setblock="{ $settype; elements = $setelem; }" +do_test "nft add set t1 s $setblock" \ +"table=t1 family=2 entries=4 op=nft_register_set" + +do_test "nft add set t1 s2 $setblock; add set t1 s3 { $settype; }" \ +"table=t1 family=2 entries=5 op=nft_register_set" + +do_test "nft add element t1 s3 $setelem" \ +"table=t1 family=2 entries=3 op=nft_register_setelem" + +# adding counters + +do_test 'nft add counter t1 c1' \ +'table=t1 family=2 entries=1 op=nft_register_obj' + +do_test 'nft add counter t2 c1; add counter t2 c2' \ +'table=t2 family=2 entries=2 op=nft_register_obj' + +# adding/updating quotas + +do_test 'nft add quota t1 q1 { 10 bytes }' \ +'table=t1 family=2 entries=1 op=nft_register_obj' + +do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \ +'table=t2 family=2 entries=2 op=nft_register_obj' + +# changing the quota value triggers obj update path +do_test 'nft add quota t1 q1 { 20 bytes }' \ +'table=t1 family=2 entries=1 op=nft_register_obj' + +# resetting rules + do_test 'nft reset rules t1 c2' \ 'table=t1 family=2 entries=3 op=nft_reset_rule' @@ -70,19 +115,6 @@ do_test 'nft reset rules table t1' \ table=t1 family=2 entries=3 op=nft_reset_rule table=t1 family=2 entries=3 op=nft_reset_rule' -do_test 'nft reset rules' \ -'table=t1 family=2 entries=3 op=nft_reset_rule -table=t1 family=2 entries=3 op=nft_reset_rule -table=t1 family=2 entries=3 op=nft_reset_rule -table=t2 family=2 entries=3 op=nft_reset_rule -table=t2 family=2 entries=3 op=nft_reset_rule -table=t2 family=2 entries=3 op=nft_reset_rule' - -for ((i = 0; i < 500; i++)); do - echo "add rule t2 c3 counter accept comment \"rule $i\"" -done | do_test 'nft -f -' \ -'table=t2 family=2 entries=500 op=nft_register_rule' - do_test 'nft reset rules t2 c3' \ 'table=t2 family=2 entries=189 op=nft_reset_rule table=t2 family=2 entries=188 op=nft_reset_rule @@ -105,4 +137,57 @@ table=t2 family=2 entries=180 op=nft_reset_rule table=t2 family=2 entries=188 op=nft_reset_rule table=t2 family=2 entries=135 op=nft_reset_rule' +# resetting sets and elements + +elem=(22 ,80 ,443) +relem="" +for i in {1..3}; do + relem+="${elem[((i - 1))]}" + do_test "nft reset element t1 s { $relem }" \ + "table=t1 family=2 entries=$i op=nft_reset_setelem" +done + +do_test 'nft reset set t1 s' \ +'table=t1 family=2 entries=3 op=nft_reset_setelem' + +# deleting rules + +readarray -t handles < <(nft -a list chain t1 c1 | \ + sed -n 's/.*counter.* handle \(.*\)$/\1/p') + +do_test "nft delete rule t1 c1 handle ${handles[0]}" \ +'table=t1 family=2 entries=1 op=nft_unregister_rule' + +cmd='delete rule t1 c1 handle' +do_test "nft $cmd ${handles[1]}; $cmd ${handles[2]}" \ +'table=t1 family=2 entries=2 op=nft_unregister_rule' + +do_test 'nft flush chain t1 c2' \ +'table=t1 family=2 entries=3 op=nft_unregister_rule' + +do_test 'nft flush table t2' \ +'table=t2 family=2 entries=509 op=nft_unregister_rule' + +# deleting chains + +do_test 'nft delete chain t2 c2' \ +'table=t2 family=2 entries=1 op=nft_unregister_chain' + +# deleting sets and elements + +do_test 'nft delete element t1 s { 22 }' \ +'table=t1 family=2 entries=1 op=nft_unregister_setelem' + +do_test 'nft delete element t1 s { 80, 443 }' \ +'table=t1 family=2 entries=2 op=nft_unregister_setelem' + +do_test 'nft flush set t1 s2' \ +'table=t1 family=2 entries=3 op=nft_unregister_setelem' + +do_test 'nft delete set t1 s2' \ +'table=t1 family=2 entries=1 op=nft_unregister_set' + +do_test 'nft delete set t1 s3' \ +'table=t1 family=2 entries=1 op=nft_unregister_set' + exit $RC diff --git a/tools/testing/selftests/netfilter/sctp_collision.c b/tools/testing/selftests/netfilter/sctp_collision.c new file mode 100644 index 000000000000..21bb1cfd8a85 --- /dev/null +++ b/tools/testing/selftests/netfilter/sctp_collision.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <arpa/inet.h> + +int main(int argc, char *argv[]) +{ + struct sockaddr_in saddr = {}, daddr = {}; + int sd, ret, len = sizeof(daddr); + struct timeval tv = {25, 0}; + char buf[] = "hello"; + + if (argc != 6 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) { + printf("%s <server|client> <LOCAL_IP> <LOCAL_PORT> <REMOTE_IP> <REMOTE_PORT>\n", + argv[0]); + return -1; + } + + sd = socket(AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP); + if (sd < 0) { + printf("Failed to create sd\n"); + return -1; + } + + saddr.sin_family = AF_INET; + saddr.sin_addr.s_addr = inet_addr(argv[2]); + saddr.sin_port = htons(atoi(argv[3])); + + ret = bind(sd, (struct sockaddr *)&saddr, sizeof(saddr)); + if (ret < 0) { + printf("Failed to bind to address\n"); + goto out; + } + + ret = listen(sd, 5); + if (ret < 0) { + printf("Failed to listen on port\n"); + goto out; + } + + daddr.sin_family = AF_INET; + daddr.sin_addr.s_addr = inet_addr(argv[4]); + daddr.sin_port = htons(atoi(argv[5])); + + /* make test shorter than 25s */ + ret = setsockopt(sd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)); + if (ret < 0) { + printf("Failed to setsockopt SO_RCVTIMEO\n"); + goto out; + } + + if (!strcmp(argv[1], "server")) { + sleep(1); /* wait a bit for client's INIT */ + ret = connect(sd, (struct sockaddr *)&daddr, len); + if (ret < 0) { + printf("Failed to connect to peer\n"); + goto out; + } + ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len); + if (ret < 0) { + printf("Failed to recv msg %d\n", ret); + goto out; + } + ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len); + if (ret < 0) { + printf("Failed to send msg %d\n", ret); + goto out; + } + printf("Server: sent! %d\n", ret); + } + + if (!strcmp(argv[1], "client")) { + usleep(300000); /* wait a bit for server's listening */ + ret = connect(sd, (struct sockaddr *)&daddr, len); + if (ret < 0) { + printf("Failed to connect to peer\n"); + goto out; + } + sleep(1); /* wait a bit for server's delayed INIT_ACK to reproduce the issue */ + ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len); + if (ret < 0) { + printf("Failed to send msg %d\n", ret); + goto out; + } + ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len); + if (ret < 0) { + printf("Failed to recv msg %d\n", ret); + goto out; + } + printf("Client: rcvd! %d\n", ret); + } + ret = 0; +out: + close(sd); + return ret; +} diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile index 843ba56d8e49..254d676a2689 100644 --- a/tools/testing/selftests/openat2/Makefile +++ b/tools/testing/selftests/openat2/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-or-later -CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined +CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test include ../lib.mk diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 49f2ad1793fd..7ea42fa02eab 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -59,12 +59,11 @@ override define INSTALL_RULE done; endef -override define EMIT_TESTS +emit_tests: +@for TARGET in $(SUB_DIRS); do \ BUILD_TARGET=$(OUTPUT)/$$TARGET; \ - $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET $@;\ done; -endef override define CLEAN +@for TARGET in $(SUB_DIRS); do \ @@ -77,4 +76,4 @@ endef tags: find . -name '*.c' -o -name '*.h' | xargs ctags -.PHONY: tags $(SUB_DIRS) +.PHONY: tags $(SUB_DIRS) emit_tests diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 2b95e44d20ff..a284fa874a9f 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile @@ -30,13 +30,14 @@ override define RUN_TESTS +TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests endef -DEFAULT_EMIT_TESTS := $(EMIT_TESTS) -override define EMIT_TESTS - $(DEFAULT_EMIT_TESTS) +emit_tests: + for TEST in $(TEST_GEN_PROGS); do \ + BASENAME_TEST=`basename $$TEST`; \ + echo "$(COLLECTION):$$BASENAME_TEST"; \ + done +TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests +TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests +TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests -endef DEFAULT_INSTALL_RULE := $(INSTALL_RULE) override define INSTALL_RULE @@ -64,4 +65,4 @@ sampling_tests: event_code_tests: TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all -.PHONY: all run_tests ebb sampling_tests event_code_tests +.PHONY: all run_tests ebb sampling_tests event_code_tests emit_tests diff --git a/tools/testing/selftests/proc/proc-empty-vm.c b/tools/testing/selftests/proc/proc-empty-vm.c index b16c13688b88..ee71ce52cb6a 100644 --- a/tools/testing/selftests/proc/proc-empty-vm.c +++ b/tools/testing/selftests/proc/proc-empty-vm.c @@ -267,6 +267,7 @@ static const char g_smaps_rollup[] = "Private_Dirty: 0 kB\n" "Referenced: 0 kB\n" "Anonymous: 0 kB\n" +"KSM: 0 kB\n" "LazyFree: 0 kB\n" "AnonHugePages: 0 kB\n" "ShmemPmdMapped: 0 kB\n" diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c index 22374d29ffdd..8202f1327c39 100644 --- a/tools/testing/selftests/user_events/abi_test.c +++ b/tools/testing/selftests/user_events/abi_test.c @@ -91,16 +91,18 @@ static int reg_disable(long *enable, int bit) FIXTURE(user) { long check; + bool umount; }; FIXTURE_SETUP(user) { - USER_EVENT_FIXTURE_SETUP(return); + USER_EVENT_FIXTURE_SETUP(return, self->umount); change_event(false); self->check = 0; } FIXTURE_TEARDOWN(user) { + USER_EVENT_FIXTURE_TEARDOWN(self->umount); } TEST_F(user, enablement) { diff --git a/tools/testing/selftests/user_events/dyn_test.c b/tools/testing/selftests/user_events/dyn_test.c index 32c827a52d7d..a85980190bea 100644 --- a/tools/testing/selftests/user_events/dyn_test.c +++ b/tools/testing/selftests/user_events/dyn_test.c @@ -144,13 +144,16 @@ do { \ FIXTURE(user) { int check; + bool umount; }; FIXTURE_SETUP(user) { - USER_EVENT_FIXTURE_SETUP(return); + USER_EVENT_FIXTURE_SETUP(return, self->umount); } FIXTURE_TEARDOWN(user) { + USER_EVENT_FIXTURE_TEARDOWN(self->umount); + wait_for_delete(); } diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c index 6a260caeeddc..dcd7509fe2e0 100644 --- a/tools/testing/selftests/user_events/ftrace_test.c +++ b/tools/testing/selftests/user_events/ftrace_test.c @@ -204,10 +204,11 @@ FIXTURE(user) { int data_fd; int enable_fd; int check; + bool umount; }; FIXTURE_SETUP(user) { - USER_EVENT_FIXTURE_SETUP(return); + USER_EVENT_FIXTURE_SETUP(return, self->umount); self->status_fd = open(status_file, O_RDONLY); ASSERT_NE(-1, self->status_fd); @@ -219,6 +220,8 @@ FIXTURE_SETUP(user) { } FIXTURE_TEARDOWN(user) { + USER_EVENT_FIXTURE_TEARDOWN(self->umount); + close(self->status_fd); close(self->data_fd); diff --git a/tools/testing/selftests/user_events/perf_test.c b/tools/testing/selftests/user_events/perf_test.c index f893398cda05..5288e768b207 100644 --- a/tools/testing/selftests/user_events/perf_test.c +++ b/tools/testing/selftests/user_events/perf_test.c @@ -111,16 +111,19 @@ static int clear(int *check) FIXTURE(user) { int data_fd; int check; + bool umount; }; FIXTURE_SETUP(user) { - USER_EVENT_FIXTURE_SETUP(return); + USER_EVENT_FIXTURE_SETUP(return, self->umount); self->data_fd = open(data_file, O_RDWR); ASSERT_NE(-1, self->data_fd); } FIXTURE_TEARDOWN(user) { + USER_EVENT_FIXTURE_TEARDOWN(self->umount); + close(self->data_fd); if (clear(&self->check) != 0) diff --git a/tools/testing/selftests/user_events/user_events_selftests.h b/tools/testing/selftests/user_events/user_events_selftests.h index 690378942f82..e1c3c063c031 100644 --- a/tools/testing/selftests/user_events/user_events_selftests.h +++ b/tools/testing/selftests/user_events/user_events_selftests.h @@ -11,13 +11,19 @@ #include "../kselftest.h" -static inline bool tracefs_enabled(char **message, bool *fail) +static inline void tracefs_unmount(void) +{ + umount("/sys/kernel/tracing"); +} + +static inline bool tracefs_enabled(char **message, bool *fail, bool *umount) { struct stat buf; int ret; *message = ""; *fail = false; + *umount = false; /* Ensure tracefs is installed */ ret = stat("/sys/kernel/tracing", &buf); @@ -37,6 +43,8 @@ static inline bool tracefs_enabled(char **message, bool *fail) return false; } + *umount = true; + ret = stat("/sys/kernel/tracing/README", &buf); } @@ -49,13 +57,14 @@ static inline bool tracefs_enabled(char **message, bool *fail) return true; } -static inline bool user_events_enabled(char **message, bool *fail) +static inline bool user_events_enabled(char **message, bool *fail, bool *umount) { struct stat buf; int ret; *message = ""; *fail = false; + *umount = false; if (getuid() != 0) { *message = "Must be run as root"; @@ -63,7 +72,7 @@ static inline bool user_events_enabled(char **message, bool *fail) return false; } - if (!tracefs_enabled(message, fail)) + if (!tracefs_enabled(message, fail, umount)) return false; /* Ensure user_events is installed */ @@ -85,10 +94,10 @@ static inline bool user_events_enabled(char **message, bool *fail) return true; } -#define USER_EVENT_FIXTURE_SETUP(statement) do { \ +#define USER_EVENT_FIXTURE_SETUP(statement, umount) do { \ char *message; \ bool fail; \ - if (!user_events_enabled(&message, &fail)) { \ + if (!user_events_enabled(&message, &fail, &(umount))) { \ if (fail) { \ TH_LOG("Setup failed due to: %s", message); \ ASSERT_FALSE(fail); \ @@ -97,4 +106,9 @@ static inline bool user_events_enabled(char **message, bool *fail) } \ } while (0) +#define USER_EVENT_FIXTURE_TEARDOWN(umount) do { \ + if ((umount)) \ + tracefs_unmount(); \ +} while (0) + #endif /* _USER_EVENTS_SELFTESTS_H */ diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c index e0ffe69c271c..7093fd5333be 100644 --- a/tools/tracing/rtla/src/timerlat_aa.c +++ b/tools/tracing/rtla/src/timerlat_aa.c @@ -159,6 +159,7 @@ static int timerlat_aa_irq_latency(struct timerlat_aa_data *taa_data, taa_data->thread_nmi_sum = 0; taa_data->thread_irq_sum = 0; taa_data->thread_softirq_sum = 0; + taa_data->thread_thread_sum = 0; taa_data->thread_blocking_duration = 0; taa_data->timer_irq_start_time = 0; taa_data->timer_irq_duration = 0; @@ -337,7 +338,23 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor taa_data->timer_irq_start_time = start; taa_data->timer_irq_duration = duration; - taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start; + /* + * We are dealing with two different clock sources: the + * external clock source that timerlat uses as a reference + * and the clock used by the tracer. There are also two + * moments: the time reading the clock and the timer in + * which the event is placed in the buffer (the trace + * event timestamp). If the processor is slow or there + * is some hardware noise, the difference between the + * timestamp and the external clock read can be longer + * than the IRQ handler delay, resulting in a negative + * time. If so, set IRQ start delay as 0. In the end, + * it is less relevant than the noise. + */ + if (expected_start < taa_data->timer_irq_start_time) + taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start; + else + taa_data->timer_irq_start_delay = 0; /* * not exit from idle. @@ -528,7 +545,7 @@ static int timerlat_aa_kworker_start_handler(struct trace_seq *s, struct tep_rec static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu, int irq_thresh, int thread_thresh) { - unsigned long long exp_irq_ts; + long long exp_irq_ts; int total; int irq; @@ -545,12 +562,15 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu, /* * Expected IRQ arrival time using the trace clock as the base. + * + * TODO: Add a list of previous IRQ, and then run the list backwards. */ exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay; - - if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) - printf(" Previous IRQ interference: \t\t up to %9.2f us\n", - ns_to_usf(taa_data->prev_irq_duration)); + if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) { + if (taa_data->prev_irq_timstamp < taa_data->timer_irq_start_time) + printf(" Previous IRQ interference: \t\t up to %9.2f us\n", + ns_to_usf(taa_data->prev_irq_duration)); + } /* * The delay that the IRQ suffered before starting. diff --git a/tools/tracing/rtla/src/timerlat_u.c b/tools/tracing/rtla/src/timerlat_u.c index 05e310696dd5..01dbf9a6b5a5 100644 --- a/tools/tracing/rtla/src/timerlat_u.c +++ b/tools/tracing/rtla/src/timerlat_u.c @@ -45,7 +45,7 @@ static int timerlat_u_main(int cpu, struct timerlat_u_params *params) retval = sched_setaffinity(gettid(), sizeof(set), &set); if (retval == -1) { - err_msg("Error setting user thread affinity\n"); + debug_msg("Error setting user thread affinity %d, is the CPU online?\n", cpu); exit(1); } @@ -193,7 +193,9 @@ void *timerlat_u_dispatcher(void *data) procs_count--; } } - break; + + if (!procs_count) + break; } sleep(1); |